max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
billy2pupa/fl.py | backwardn/billy | 33 | 12759151 | <gh_stars>10-100
from openstatesapi.jurisdiction import make_jurisdiction
J = make_jurisdiction('fl')
J.url = 'http://myflorida.com'
| 1.507813 | 2 |
pcapkit/protocols/data/protocol.py | binref/PyPCAPKit | 3 | 12759152 | # -*- coding: utf-8 -*-
"""data modules for root protocol"""
from typing import TYPE_CHECKING
from pcapkit.corekit.infoclass import Info
__all__ = ['Packet']
class Packet(Info):
"""Header and payload data."""
#: packet header
header: 'bytes'
#: packet payload
payload: 'bytes'
if TYPE_CHECKING:
def __init__(self, header: 'bytes', payload: 'bytes') -> 'None': ... # pylint: disable=unused-argument, super-init-not-called, multiple-statements
| 2.46875 | 2 |
src/torforce/losses.py | leaprovenzano/torforce | 0 | 12759153 | <filename>src/torforce/losses.py
import torch
from torch import nn
class ClippedSurrogateLoss(nn.Module):
"""Clipped surrogate loss as set out by Shulman et al. This loss is used in proximal policy optimization
to optimize the policy network. The mean reduced negation of clipped surrogate objective as detailed in
the paper:
.. math::
L^{CLIP}(\\theta) = min(r(\\theta))\hat{A}_t, clip(r(\\theta), 1-\\varepsilon, 1+\\varepsilon)\hat{A}_t)
with an optional entropy bonus to encourage exploration.
References:
- Schulman, Wolski, Dhariwal, Radford and Klimov. `Proximal Policy Optimization Algorithms <https://arxiv.org/pdf/1707.06347.pdf>`_. 2017.
Attributes:
clip (float): used to clip the :math:`r(\\theta)` in bounds :math:`[1 + clip, 1 - clip]`. reccommended ranges are
around .1 - .3.
entropy_coef (float): strength of the entropy bonus good ranges will depend on the problem you're optimizing and
the sort of distribution you're using. But in general if used this should be a very small number between (usually
max .01), defaults to 0. (no entropy bonus)
"""
def __init__(self, clip=.2, entropy_coef=0.):
super().__init__()
self.clip = clip
self.entropy_coef = entropy_coef
def forward(self, dist: torch.distributions.Distribution, action: torch.Tensor, advantage: torch.Tensor, old_logprob: torch.Tensor) -> torch.Tensor:
"""get the clipped surrogate loss for a distribution `dist` generated by the policy network.
Args:
dist (torch.distributions.Distribution): action distribution of the policy head being optimized
action (torch.Tensor): the actual action taken during collection in the enviornment
advantage (torch.Tensor): the estimated advantage of taking `action`
old_logprob (torch.Tensor): the original log probability of taking `action` under the old policy.
Returns:
torch.Tensor
"""
new_logprob = dist.log_prob(action)
ratio = torch.exp(new_logprob - old_logprob)
unclipped = ratio * advantage
clipped = torch.clamp(ratio, 1.0 - self.clip, 1.0 + self.clip) * advantage
err = -torch.min(clipped, unclipped).mean()
if self.entropy_bonus:
err = err - dist.entropy().mean() * self.entropy_bonus
return err
| 2.296875 | 2 |
utils/utils.py | sachio222/aha4 | 1 | 12759154 | <reponame>sachio222/aha4
# utils.py
# <NAME>, 2020
#
# Large portions inspired from Stanford CS2300 best practices guidelines
# for deep learning projects. Original repository available below:
# https://github.com/cs230-stanford/cs230-code-examples/blob/master/pytorch/vision/utils.py
import json
import torch
import torchvision
import matplotlib.pyplot as plt
from pathlib2 import Path
class Params():
def __init__(self, json_path):
with open(json_path) as f:
params = json.load(f)
self.__dict__.update(params)
def save(self, json_path):
with open(json_path, "w") as f:
json.dump(self.__dict__, f, indent=4)
def update(self, json_path):
"""Loads parameters from json file."""
with open(json_path) as f:
params = json.load(f)
self.__dict__.update(params)
@property
def dict(self):
"""Gives dict-like access to Params instance by 'params.dict['learning_rate]."""
return self.__dict__
class RunningAverage():
"""A simple class that maintains the running average of a qty.
Example:
```
loss_avg = RunningAverage()
loss_avg.update(2)
loss_avg() = 3
```
"""
def __init__(self):
self.steps = 0
self.total = 0
def update(self, val):
self.total += val
self.steps += 1
def __call__(self):
return self.total / float(self.steps)
reset = __init__
def show_sample_img(dataset, idx):
sample = dataset.__getitem__(idx)
plt.imshow(sample[0].numpy()[0])
plt.show()
def print_full_tensor(tensor):
"""You know how it only shows part of the tensor when you print?
Well use this to show the whole thing.
"""
torch.set_printoptions(profile="full")
print(tensor)
torch.set_printoptions(profile="default")
def get_save_state(epoch, model, optimizer):
state = {
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optim_dict': optimizer.state_dict()
}
return state
def save_checkpoint(state, checkpoint, name="last", silent=True):
"""Saves state dict to file.
Args:
state: (dict) contains epoch, state dict and optimizer dict
checkpoint: (Path) directory name to store saved states
name: (string) previx to '.pth.tar' eg: name.pth.tar
silent: (bool) if True, bypass output messages
Todo:
Simplify the silent checks so I don't need 4 if statements
"""
filepath = checkpoint / "{}.pth.tar".format(name)
if not Path(checkpoint).exists():
if not silent:
print("Creating checkpoint directory {}".format(checkpoint))
Path(checkpoint).mkdir()
else:
if not silent:
print("Getting checkpoint directory...")
if not silent:
print("Saving file...")
# Remember to convert filepath to str or it flips out when trying to save
torch.save(state, str(filepath))
if not silent:
print("File saved successfully.")
def load_checkpoint(checkpoint, model, optimizer=None, name="last"):
"""Loads parameters dict from checkpoint file to model, and optimizer.
Args:
checkpoint: (string) filename to load
model: (torch.nn.Module) model to load parameters
optimizer: (torch.optim) optional: resume optimizer from checkpoint
name: (string) previx to '.pth.tar' eg: name.pth.tar
"""
filepath = checkpoint / "{}.pth.tar".format(name)
print("Looking for saved files...", end=" ")
if not Path(checkpoint).exists():
raise ("File does not exist at {}".format(checkpoint))
checkpoint = torch.load(str(filepath))
print("Found.")
model.load_state_dict(checkpoint.get("state_dict"), strict=False)
if optimizer:
optimizer.load_state_dict(checkpoint.get("optim_dict"))
print("Loading saved weights complete.")
return checkpoint
def showme(tnsr,
size_dim0=10,
size_dim1=10,
title=None,
full=False,
detach=False,
grid=False):
"""Does all the nasty matplotlib stuff for free.
"""
if detach:
tnsr = tnsr.detach().numpy()
if not grid:
if len(tnsr.shape) > 2:
tnsr = tnsr.view(tnsr.shape[0], -1)
fig, ax = plt.subplots(figsize=(size_dim0, size_dim1))
ax.set_title(title, color="blue", loc="left", pad=20)
ax.matshow(tnsr)
plt.show()
print(tnsr.shape)
if full:
print(tnsr)
else:
grid_img = torchvision.utils.make_grid(tnsr, nrow=5)
plt.imshow(grid_img.permute(1, 2, 0))
plt.show()
print(tnsr.shape)
def animate_weights(t, nrow=11, label=None, auto=False):
"""Animates weights during training. Only works on Mac.
Press ctrl + C in terminal to escape. Change auto to True if you are
running on a mac. It is pretty good.
Usage example:
>>> animate_weights(conv1_weights, i)
Args:
t: (tensor) from "model.layer_name.weight.data" on layer
iter: (scalar, string) Optional. Shows label for each pass
"""
grid_img = torchvision.utils.make_grid(t, nrow)
# plt.ion()
plt.title(label, color="blue", loc="left", pad=20)
plt.imshow(grid_img.numpy()[0])
if not auto:
plt.show(block=True)
else:
plt.show(block=False)
plt.pause(0.0001)
plt.close()
| 2.5625 | 3 |
preproc/conditions.py | dlesbre/preprocessor | 1 | 12759155 | """This module encodes a simple conditional
evaluation system"""
from typing import List
from .preprocessor import Preprocessor
SINGLE_CHAR_OPERATORS = "()"
DOUBLE_CHAR_OPERATORS = ["==", "!="]
def condition_lexer(string : str) -> List[str]:
"""lexes the input string into a stream of tokens"""
lexemes : List[str] = []
in_string = False
current_lexeme = ""
str_len = len(string)
i = 0
while i < str_len:
char = string[i]
if in_string:
if char == '"':
in_string = False
lexemes.append(current_lexeme)
current_lexeme = ""
else:
current_lexeme += char
else:
# not in a string
if char in SINGLE_CHAR_OPERATORS:
if current_lexeme != "":
lexemes.append(current_lexeme)
lexemes.append(char)
current_lexeme = ""
elif i+1 < str_len and string[i:i+2] in DOUBLE_CHAR_OPERATORS:
if current_lexeme != "":
lexemes.append(current_lexeme)
lexemes.append(string[i:i+2])
current_lexeme = ""
i += 1
elif char == '"':
if current_lexeme != "":
lexemes.append(current_lexeme)
current_lexeme = ""
in_string = True
elif char.isspace():
if current_lexeme != "":
lexemes.append(current_lexeme)
current_lexeme = ""
else:
current_lexeme += char
i += 1
if current_lexeme:
lexemes.append(current_lexeme)
return lexemes
def find_matching_close_parenthese(tokens: List[str], start_index: int) -> int:
"""finds the ")" matching the opening parenthese "(" found at
tokens[start_index]. returns len(tokens) if none exists"""
j = start_index + 1
depth = 0
len_tok = len(tokens)
while j < len_tok:
if tokens[j] == "(":
depth += 1
if tokens[j] == ")":
depth -= 1
if depth == -1:
break
j += 1
return j
def condition_evaluator(preproc: Preprocessor, tokens: List[str]) -> bool:
"""evaluates a string of tokens into a boolean"""
i = 0
len_tok = len(tokens)
while i < len_tok:
tok = tokens[i]
if tok == "(":
j = find_matching_close_parenthese(tokens, i)
if j == len_tok:
preproc.send_error("invalid-condition",
"invalid condition syntax.\n"
"Unmatched \"(\". (missing closing parenthese?)"
)
if i == 0 and j == len_tok-1:
return condition_evaluator(preproc, tokens[1:-1])
i = j
elif tok == ")":
preproc.send_error("invalid-condition",
"invalid condition syntax.\n"
"Unmatched \")\". (missing openning parenthese?)"
)
elif tok == "and":
# uses python lazy evaluation
return condition_evaluator(preproc, tokens[:i])\
and condition_evaluator(preproc, tokens[i+1:])
elif tok == "or":
# uses python lazy evaluation
return condition_evaluator(preproc, tokens[:i])\
or condition_evaluator(preproc, tokens[i+1:])
elif tok == "not":
if i != 0:
preproc.send_error("invalid-condition",
'invalid condition syntax.\n'
'"not" must be preceeded by "and", "or" or "("\n'
'got "{} not"'.format(tokens[i-1])
)
return not condition_evaluator(preproc, tokens[1:])
i += 1
return simple_condition_evaluator(preproc, tokens)
def simple_condition_evaluator(preproc: Preprocessor, tokens: List[str]) -> bool:
"""evaluates a string of tokens into a boolean,
assumes the string of tokens doesn't contain "and", "or" and "not"
"""
len_tok = len(tokens)
if len_tok == 1:
return not(tokens[0] in ["false", "0", ""])
if len_tok == 2:
if tokens[0] == "def":
return tokens[1] in preproc.commands or tokens[1] in preproc.blocks
if tokens[0] == "ndef":
return not (tokens[1] in preproc.commands or tokens[1] in preproc.blocks)
if len_tok == 3:
if tokens[1] == "==":
return tokens[0] == tokens[2]
if tokens[1] == "!=":
return tokens[0] != tokens[2]
preproc.send_error("invalid-condition",
"invalid condition syntax.\n"
"simple conditions are: \n"
" | true | false | 1 | 0 | <string>\n"
" | def <identifier> | ndef <identifier>\n"
" | <str> == <str> | <str> != <str>"
)
return False
def condition_eval(preproc: Preprocessor, string: str) -> bool:
"""evaluates a condition.
String must follow the condition syntax:
simple_condition =
| true | false | 1 | 0 | <string>
| def <identifier> | ndef <identifier>
| <str> == <str> | <str> != <str>
condition =
| <simple_condition> | not <simple_condition>
| <condition> and <condition>
| <condition> or <condition>
| (<condition>)"""
lexemes = condition_lexer(string)
return condition_evaluator(preproc, lexemes)
| 3.71875 | 4 |
sellerhome/urls.py | Aniketh896/BitWork | 2 | 12759156 | <filename>sellerhome/urls.py<gh_stars>1-10
from django.urls import path
from sellerhome import views
urlpatterns = [
path('', views.seller_view, name="sellerhome"),
] | 1.71875 | 2 |
Programs/BingImageDownloader/main.py | Alperencode/Python | 1 | 12759157 | from bing_image_downloader import downloader
downloader.download("people from drone view", limit=1000, output_dir='images', adult_filter_off=False)
# First one is the query for search
# Second one is the limit of images to download
# Third one is the output directory
# Fourth one is the adult filter off (which is closed in this case)
# Additional notes: First pictures can be irrelevant but its will be okay let the program keep running. | 2.84375 | 3 |
codegen_sources/preprocessing/tests/tokenization/test_tokenize_java.py | mxl1n/CodeGen | 0 | 12759158 | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pytest
from codegen_sources.preprocessing.lang_processors.java_processor import JavaProcessor
from pathlib import Path
processor = JavaProcessor(root_folder=Path(__file__).parents[4].joinpath("tree-sitter"))
TESTS = []
TESTS.append(
(
r"""
public class HelloWorld
{
public void main(String[] args) {
System.out.println("Hello \n World!");
}
}""",
[
"public",
"class",
"HelloWorld",
"{",
"public",
"void",
"main",
"(",
"String",
"[",
"]",
"args",
")",
"{",
"System",
".",
"out",
".",
"println",
"(",
'" Hello ▁ \\n ▁ World ! "',
")",
";",
"}",
"}",
],
)
)
TESTS.append(
(
r"""
overload((byte)1);
overload(1L);
overload(1.0f);""",
[
"overload",
"(",
"(",
"byte",
")",
"1",
")",
";",
"overload",
"(",
"1L",
")",
";",
"overload",
"(",
"1.0f",
")",
";",
],
)
)
TESTS.append(
(
r"""Runnable r = ()-> System.out.print("Run method");""",
[
"Runnable",
"r",
"=",
"(",
")",
"->",
"System",
".",
"out",
".",
"print",
"(",
'" Run ▁ method "',
")",
";",
],
)
)
TESTS.append(
(
r"""String s = "Hi I am\nMarie";""",
["String", "s", "=", '" Hi ▁ I ▁ am \\n Marie "', ";"],
)
)
TESTS2 = []
TESTS2.append(
r"""
import java.util.concurrent.TimeUnit;
public class Mensuration{ //mensuration of a child
private int height;
private int weight;
private String child_name;
public Mensuration(int height, int weight, String name):{
this.height = height;
this.weight = weight;
this.child_name = name;
}
public int get_height(){
return height;
}
public int get_weight(){
return weight;
}
public String get_name(){
String s = "Name:\n" + child_name;
return s;
}
}"""
)
TESTS2.append(
r"""
private enum Answer {
YES {
@Override public String toString() {
return "yes";
}
},
NO,
MAYBE
}"""
)
TESTS2.append(
r"""
return new MyClass() {
@Override public void method() {
if (condition()) {
try {
something();
} catch (ProblemException e) {
recover();
}
} else if (otherCondition()) {
somethingElse();
} else {
lastThing();
}
}
};"""
)
TESTS2.append(
r"""
public boolean equals(Object o_) {
if ( o_ == null ) {
return false;
}
if ( o_.getClass() != this.getClass() ) {
return false;
}
Pair<?, ?> o = (Pair<?, ?>) o_;
return x.equals(o.x) && y.equals(o.y);
}
}
"""
)
TESTS3 = []
TESTS3.append(
(
r"""/*
This is the docstring !!
*/
/* ---------- */
public class HelloWorld
{
public void main(String[] args) {
System.out.println("Hello \n World!");
}
}""",
[
"/* STRNEWLINE This ▁ is ▁ the ▁ docstring ▁ ! ! STRNEWLINE */",
"public",
"class",
"HelloWorld",
"{",
"public",
"void",
"main",
"(",
"String",
"[",
"]",
"args",
")",
"{",
"System",
".",
"out",
".",
"println",
"(",
'" Hello ▁ \\n ▁ World ! "',
")",
";",
"}",
"}",
],
)
)
TESTS3.append(
(
r"""
overload((byte)1);
// this is my comfff
// ----- ***
overload(1L); // this is my comfff
overload(1.0f);""",
[
"overload",
"(",
"(",
"byte",
")",
"1",
")",
";",
"// ▁ this ▁ is ▁ my ▁ comfff ENDCOM",
"overload",
"(",
"1L",
")",
";",
"// ▁ this ▁ is ▁ my ▁ comfff ENDCOM",
"overload",
"(",
"1.0f",
")",
";",
],
)
)
TESTS_TOKENIZE_DETOKENIZE_STRING = [
r"""public int read ( ) throws IOException {
int current = super . read ( ) ;
if ( current == '\r' || ( current == '\n' && lastChar != '\r' ) ) {
lineCounter ++ ;
}
lastChar = current ;
return lastChar ;
}""",
r"""public int curly_brackets ( ) throws IOException {
System . out . println ( "This } is the output" ) ;
System . out . println ( "This {} is the output" ) ;
System . out . println ( '}' ) ;
}""",
r"""public int commas ( ) throws IOException {
System . out . println ( "This ; is the output" ) ;
System . out . println ( "This , is the output" ) ;
System . out . println ( ';' ) ;
System . out . println ( ',' ) ;
}""",
r"""public void inException ( ) {
throw new IllegalArgumentException ( "Type \'" + typeToEvaluate + "\' is not a Class, " + "ParameterizedType, GenericArrayType or TypeVariable. Can't extract type." ) ;
}
""",
]
TESTS_DONT_PROCESS_STRINGS = [
(
r"""
public class HelloWorld
{
// This is a comment
public void main(String[] args) {
System.out.println("Hello \n World!");
}
}""",
[
"public",
"class",
"HelloWorld",
"{",
"// This is a comment ENDCOM",
"public",
"void",
"main",
"(",
"String",
"[",
"]",
"args",
")",
"{",
"System",
".",
"out",
".",
"println",
"(",
'"Hello \\n World!"',
")",
";",
"}",
"}",
],
),
(
r"""
public class HelloEarth
{
/* This is a
multiline
comment */
public void main(String[] args) {
System.out.println("Hello \nEarth!");
}
}""",
[
"public",
"class",
"HelloEarth",
"{",
"/* This is a\\n multiline\\n comment */",
"public",
"void",
"main",
"(",
"String",
"[",
"]",
"args",
")",
"{",
"System",
".",
"out",
".",
"println",
"(",
'"Hello \\nEarth!"',
")",
";",
"}",
"}",
],
),
]
TESTS_BACK_R_CHAR = [
(
"""
public class HelloWorld
{\r
public void main(String[] args) {
System.out.println("Hello \rWorld!");
}
}""",
[
"public",
"class",
"HelloWorld",
"{",
"public",
"void",
"main",
"(",
"String",
"[",
"]",
"args",
")",
"{",
"System",
".",
"out",
".",
"println",
"(",
'"Hello World!"',
")",
";",
"}",
"}",
],
)
]
TESTS_IMPORTS = [
(
(
r"""
import java.lang.*;
import javafx.util.Pair;
public class HelloWorld
{
public void main(String[] args) {
System.out.println("Hello \n World!");
}
}""",
[
"import",
"java",
".",
"lang",
".",
"*",
";",
"import",
"javafx",
".",
"util",
".",
"Pair",
";",
"public",
"class",
"HelloWorld",
"{",
"public",
"void",
"main",
"(",
"String",
"[",
"]",
"args",
")",
"{",
"System",
".",
"out",
".",
"println",
"(",
'" Hello ▁ \\n ▁ World ! "',
")",
";",
"}",
"}",
],
)
)
]
def test_java_tokenizer_discarding_comments():
for i, (x, y) in enumerate(TESTS):
y_ = processor.tokenize_code(x)
if y_ != y:
line_diff = [
j for j, (line, line_) in enumerate(zip(y, y_)) if line != line_
]
line_diff = line_diff[-1] if len(line_diff) > 0 else -1
raise Exception(
f"Difference at {line_diff}\nExpected:\n==========\n{y}\nbut found:\n==========\n{y_}"
)
def test_tokenize_imports():
for i, (x, y) in enumerate(TESTS_IMPORTS):
y_ = processor.tokenize_code(x)
if y_ != y:
line_diff = [
j for j, (line, line_) in enumerate(zip(y, y_)) if line != line_
]
line_diff = line_diff[-1] if len(line_diff) > 0 else -1
raise Exception(
f"Difference at {line_diff}\nExpected:\n==========\n{y}\nbut found:\n==========\n{y_}"
)
def test_java_detokenizer_discarding_comments():
for i, x in enumerate([x[0] for x in TESTS] + [x[0] for x in TESTS3] + TESTS2):
tokens = processor.tokenize_code(x)
x_ = processor.detokenize_code(tokens)
tokens_ = processor.tokenize_code(x_)
if tokens != tokens:
line_diff = [
j
for j, (line, line_) in enumerate(zip(tokens, tokens_))
if line != line_
]
raise Exception(
f"Difference at {line_diff}\n========== Original:\n{x}\n========== Tokenized {tokens} \n Detokenized:\n{x_} \n Retokenized {tokens_}"
)
def test_java_tokenizer_keeping_comments():
for i, (x, y) in enumerate(TESTS3):
y_ = processor.tokenize_code(x, keep_comments=True)
if y_ != y:
line_diff = [
j for j, (line, line_) in enumerate(zip(y, y_)) if line != line_
]
line_diff = line_diff[-1] if len(line_diff) > 0 else -1
raise Exception(
f"Difference at {line_diff}\nExpected:\n==========\n{y}\nbut found:\n==========\n{y_}"
)
def test_dont_process_strings():
for i, (x, y) in enumerate(TESTS_DONT_PROCESS_STRINGS):
y_ = processor.tokenize_code(x, keep_comments=True, process_strings=False)
print(y_)
if y_ != y:
line_diff = [
j for j, (line, line_) in enumerate(zip(y, y_)) if line != line_
]
line_diff = line_diff[-1] if len(line_diff) > 0 else -1
raise Exception(
f"Difference at {line_diff}\nExpected:\n==========\n{y}\nbut found:\n==========\n{y_}"
)
def test_backr_chars():
for i, (x, y) in enumerate(TESTS_BACK_R_CHAR):
y_ = processor.tokenize_code(x, keep_comments=True, process_strings=False)
print(y_)
if y_ != y:
line_diff = [
j for j, (line, line_) in enumerate(zip(y, y_)) if line != line_
]
line_diff = line_diff[-1] if len(line_diff) > 0 else -1
raise Exception(
f"Difference at {line_diff}\nExpected:\n==========\n{y}\nbut found:\n==========\n{y_}"
)
def test_java_detokenizer_keeping_comments():
for i, x in enumerate([x[0] for x in TESTS] + [x[0] for x in TESTS3] + TESTS2):
tokens = processor.tokenize_code(x, keep_comments=True)
x_ = processor.detokenize_code(tokens)
tokens_ = processor.tokenize_code(x_, keep_comments=True)
if tokens != tokens_:
line_diff = [
j
for j, (line, line_) in enumerate(zip(tokens, tokens_))
if line != line_
]
raise Exception(
f"Difference at {line_diff}\n========== Original:\n{x}\n========== Tokenized {tokens} \n Detokenized:\n{x_} \n Retokenized {tokens_}"
)
def test_tokenize_detokenize():
test_detokenize_invertible(TESTS_TOKENIZE_DETOKENIZE_STRING)
@pytest.mark.skip("Helper function")
def test_detokenize_invertible(test_examples):
for i, x in enumerate(test_examples):
x_ = processor.detokenize_code(processor.tokenize_code(x, keep_comments=True))
if x_.strip() != x.strip():
raise Exception(
f"Expected:\n==========\n{x.strip()}\nbut found:\n==========\n{x_.strip()}"
)
| 2.390625 | 2 |
src/generate_artificial.py | ozansener/egocentric-object | 0 | 12759159 | <gh_stars>0
from artificial_mnist import ArtificialMnist
data_generator = ArtificialMnist()
data_generator.get_n_random_samples(60000, './MNIST_a/train_data/')
data_generator.get_n_random_samples(10000, './MNIST_a/test_data/')
| 1.71875 | 2 |
src/simmer/make_triceratops_contrasts.py | arjunsavel/SImMer | 0 | 12759160 | """
Script to find all contrast curve .csv files,
read them, and re-write them in the TRICERATOPS
contrast folder using the required data format.
"""
# CDD
# Created: 5/28/22
# Updated: 5/28/22
import numpy as np
import pandas as pd
import os as os
import glob as glob
#Defaults
verbose = False
#Files and directories
tridir = '/Users/courtney/Documents/data/toi_paper_data/contrast_curves_for_triceratops/'
#Get list of final images
namestr = '/Users/courtney/Documents/data/shaneAO/*/reduced*/*/*/contrast_curve.csv'
flist = glob.glob(namestr)
print('Files: ', len(flist))
#Loop through these files!
counter = 0
for ff in np.arange(len(flist)):
file = flist[ff]
#Use current filename to set final filename
parts = file.split('/')
filt = parts[-2] #filter is second to last part of filename
tic = parts[-3] #TIC is third to last part of filename
night = parts[-4].split('reduced_')[1] #reduced_[NIGHT] is fourth to last part of filename
#Don't include Kepler or K2 targets
if tic[0] == 'K': #Catch Kepler or K2 prefixes
continue
if tic[0] == 'E': #catch EPIC prefixes
continue
#Remove T or TIC prefix
if 'TIC' in tic:
if verbose:
print('TIC name: ', tic)
tic = tic.split('TIC')[1]
if verbose:
print('renamed as : ', tic)
if 'T' in tic:
if verbose:
print('T name: ', tic)
tic = tic.split('T')[1]
if verbose:
print('renamed as : ', tic)
#Recast to drop leading zeros and spaces
tic = str(int(tic))
#Set output file
outname = tic+'_'+filt+'_'+night+'_contrast_curve.csv'
#Read in the contrast curve
c = pd.read_csv(file)
#Drop the error column
c = c[['arcsec','dmag']]
#Don't keep any rows with missing values
c = c.dropna()
#Write TRICERATOPS-friendly output file
c.to_csv(tridir+outname,index=False,header=False)
counter += 1
print('Saved ', counter, ' contrast curves in TRICERATOPS format.')
| 2.953125 | 3 |
setup.py | Sobolev5/django-simple-profiler | 2 | 12759161 | <gh_stars>1-10
import os
import sys
import setuptools
__author__ = '<NAME> <<EMAIL>>'
__version__ = '0.3.2'
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='django-simple-profiler',
version=__version__,
install_requires=['colorclass', 'terminaltables', 'psutil'],
author='<NAME>',
url="https://github.com/Sobolev5/django-simple-profiler",
author_email='<EMAIL>',
description='Django simple profiler it is a useful tool for Django framework that allows you to profile your views.',
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(exclude=[".git", ".gitignore"]),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
) | 1.421875 | 1 |
src/login/bklogin/ee_official_login/oauth/google/views.py | Canway-shiisa/bk-user | 0 | 12759162 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS
Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import urllib.parse
from django.contrib.auth import authenticate
from .utils import gen_oauth_login_url
from bklogin.bkauth import actions
from bklogin.bkauth.constants import REDIRECT_FIELD_NAME
from bklogin.common.log import logger
def login(request):
"""
登录处理
"""
# 获取用户实际请求的URL, 目前account.REDIRECT_FIELD_NAME = 'c_url'
redirect_to = request.GET.get(REDIRECT_FIELD_NAME, "")
# 获取用户实际访问的蓝鲸应用
app_id = request.GET.get("app_id", "")
# 来自注销
is_from_logout = bool(request.GET.get("is_from_logout") or 0)
# google登录回调后会自动添加code参数
code = request.GET.get("code")
# 若没有code参数,则表示需要跳转到google登录
if code is None or is_from_logout:
# 生成跳转到google登录的链接
google_oauth_login_url, state = gen_oauth_login_url({"app_id": app_id, REDIRECT_FIELD_NAME: redirect_to})
# 将state 设置于session,Oauth2.0特有的,防止csrf攻击的
request.session["state"] = state
# 直接调用蓝鲸登录重定向方法
response = actions.login_redirect_response(request, google_oauth_login_url, is_from_logout)
logger.debug(
"custom_login:oauth.google code is None or is_from_logout! code=%s, is_from_logout=%s",
code,
is_from_logout,
)
return response
# 已经有企业认证票据参数(如code参数),表示企业登录后的回调或企业认证票据还存在
# oauth2.0 特有处理逻辑,防止csrf攻击
# 处理state参数
state = request.GET.get("state", "")
state_dict = dict(urllib.parse.parse_qsl(state))
app_id = state_dict.get("app_id")
redirect_to = state_dict.get(REDIRECT_FIELD_NAME, "")
state_from_session = request.session.get("state")
# 校验state,防止csrf攻击
if state != state_from_session:
logger.debug(
"custom_login:oauth.google state != state_from_session [state=%s, state_from_session=%s]",
state,
state_from_session,
)
return actions.login_failed_response(request, redirect_to, app_id)
# 验证用户登录是否OK
user = authenticate(code=code)
if user is None:
logger.debug("custom_login:oauth.google user is None, will redirect_to=%s", redirect_to)
# 直接调用蓝鲸登录失败处理方法
return actions.login_failed_response(request, redirect_to, app_id)
# 成功,则调用蓝鲸登录成功的处理函数,并返回响应
logger.debug("custom_login:oauth.google login success, will redirect_to=%s", redirect_to)
return actions.login_success_response(request, user, redirect_to, app_id)
| 2.015625 | 2 |
itracker/common/network/branched_autoenc_network.py | djpetti/isl-gazecapture | 12 | 12759163 | <reponame>djpetti/isl-gazecapture<filename>itracker/common/network/branched_autoenc_network.py<gh_stars>10-100
import cPickle as pickle
import logging
import tensorflow as tf
from .. import utils
from network import Network
import autoencoder
logger = logging.getLogger(__name__)
layers = tf.keras.layers
applications = tf.keras.applications
class BranchedAutoencNetwork(Network):
""" Extension of LargeVggNetwork that uses a branched architecture based on
the appearance of a subject's eye. """
def __init__(self, *args, **kwargs):
""" Takes the same parameters as Network. Additionally, it requires the
following parameters:
Args:
autoenc_model_file: The saved weights to use for the autoencoder.
cluster_data: The file containing the saved clustering data. """
self.__autoenc_file = kwargs.get("autoenc_model_file")
if not self.__autoenc_file:
raise ValueError("'autoenc_model_file' arg is required.")
self.__cluster_file = kwargs.get("cluster_data")
if not self.__cluster_file:
raise ValueError("'cluster_data' arg is required.")
self.__autoencoder = None
self.__clusters = None
super(BranchedAutoencNetwork, self).__init__(*args, **kwargs)
def __load_autoencoder(self):
""" Loads the autoencoder model. """
net = autoencoder.Autoencoder(self._input_shape, eye_shape=self._eye_shape)
self.__autoencoder = net.build()
# Load the saved weights.
logger.info("Loading autoencoder weights from %s." % (self.__autoenc_file))
self.__autoencoder.load_weights(self.__autoenc_file)
# Load the cluster data.
logger.info("Loading clusters from %s." % (self.__cluster_file))
cluster_file = file(self.__cluster_file, "rb")
self.__clusters = pickle.load(cluster_file)
# Freeze the model.
utils.freeze_all(self.__autoencoder)
def __compute_groups(self, encodings):
""" Computes the groups given encodings based on cluster data.
Args:
encodings: The encodings we want to group.
Returns:
A vector with the same first dimension as encodings in which each element
signifies the chosen group for the corresponding encoding. """
def distance(center):
""" Computes the euclidean distances between a set of encodings and a
cluster center.
Args:
center: The cluster center.
Returns:
A vector of the corresponding distances. """
return tf.norm(encodings - center, axis=1)
# Compute distance to all the cluster centroids.
center_distances = []
for center in self.__clusters:
center_dist = distance(center)
center_distances.append(center_dist)
# Figure out the closest one for each encoding.
distances = tf.stack(center_distances, axis=0)
closest = tf.argmin(distances, axis=0)
return closest
def __get_appearance_groups(self, to_branch):
""" Determines which group to put the image in based on the appearance.
Args:
to_branch: A vector corresponding to the left eye inputs that we want
to separate into groups.
Returns:
Tensor containing images belonging to the first group, and a tensor
containing images belonging to the second group. """
if not self.__autoencoder:
# We need to load the autoencoder model if we haven't done so.
self.__load_autoencoder()
# Compute encodings for each group.
_, _, encodings = self.__autoencoder([self._left_eye_node,
self._right_eye_node,
self._face_node,
self._grid_input])
# Compute the groups based on the encodings.
groups = self.__compute_groups(encodings)
# Create masks for each group.
zeros = tf.zeros_like(groups)
first_mask = tf.equal(groups, zeros)
second_mask = tf.logical_not(first_mask)
# Split each group.
first_group = tf.boolean_mask(to_branch, first_mask)
second_group = tf.boolean_mask(to_branch, second_mask)
return [first_group, second_group]
def _build_custom(self):
trainable = not self._fine_tune
# Get pretrained VGG model for use as a base.
vgg = applications.vgg19.VGG19(include_top=False,
input_shape=self._input_shape)
vgg_out = vgg(self._face_node)
# Freeze all layers in VGG.
for layer in vgg.layers:
layer.trainable = False
# Shared eye layers.
conv_e1 = layers.Conv2D(144, (11, 11), strides=(4, 4), activation="relu",
kernel_regularizer=self._l2, trainable=trainable)
pool_e1 = layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2))
norm_e1 = layers.BatchNormalization(trainable=trainable)
pad_e2 = layers.ZeroPadding2D(padding=(2, 2))
conv_e2 = layers.Conv2D(384, (5, 5), activation="relu",
kernel_regularizer=self._l2, trainable=trainable)
pool_e2 = layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2))
norm_e2 = layers.BatchNormalization(trainable=trainable)
pad_e3 = layers.ZeroPadding2D(padding=(1, 1))
conv_e3 = layers.Conv2D(576, (3, 3), activation="relu",
kernel_regularizer=self._l2, trainable=trainable)
conv_e4 = layers.Conv2D(64, (1, 1), activation="relu",
kernel_regularizer=self._l2, trainable=trainable)
flatten_e4 = layers.Flatten()
# Left eye stack.
leye_conv_e1 = conv_e1(self._left_eye_node)
leye_pool_e1 = pool_e1(leye_conv_e1)
leye_norm_e1 = norm_e1(leye_pool_e1)
leye_pad_e2 = pad_e2(leye_norm_e1)
leye_conv_e2 = conv_e2(leye_pad_e2)
leye_pool_e2 = pool_e2(leye_conv_e2)
leye_norm_e2 = norm_e2(leye_pool_e2)
leye_pad_e3 = pad_e3(leye_norm_e2)
leye_conv_e3 = conv_e3(leye_pad_e3)
leye_conv_e4 = conv_e4(leye_conv_e3)
leye_flatten_e4 = flatten_e4(leye_conv_e4)
# Right eye stack.
reye_conv_e1 = conv_e1(self._right_eye_node)
reye_pool_e1 = pool_e1(reye_conv_e1)
reye_norm_e1 = norm_e1(reye_pool_e1)
reye_pad_e2 = pad_e2(reye_norm_e1)
reye_conv_e2 = conv_e2(reye_pad_e2)
reye_pool_e2 = pool_e2(reye_conv_e2)
reye_norm_e2 = norm_e2(reye_pool_e2)
reye_pad_e3 = pad_e3(reye_norm_e2)
reye_conv_e3 = conv_e3(reye_pad_e3)
reye_conv_e4 = conv_e4(reye_conv_e3)
reye_flatten_e4 = flatten_e4(reye_conv_e4)
# Concatenate eyes and put through a shared FC layer.
eye_combined = layers.Concatenate()([reye_flatten_e4, leye_flatten_e4])
eye_drop = layers.Dropout(0.5)(eye_combined)
fc_e1 = layers.Dense(128, activation="relu",
kernel_regularizer=self._l2)(eye_drop)
# Face layers.
face_flatten_f4 = layers.Flatten()(vgg_out)
face_drop = layers.Dropout(0.5)(face_flatten_f4)
face_fc1 = layers.Dense(128, activation="relu",
kernel_regularizer=self._l2,
trainable=trainable)(face_drop)
face_fc2 = layers.Dense(64, activation="relu",
kernel_regularizer=self._l2)(face_fc1)
# Face grid.
grid_flat = layers.Flatten()(self._grid_input)
grid_fc1 = layers.Dense(256, activation="relu",
kernel_regularizer=self._l2,
trainable=trainable)(grid_flat)
grid_fc2 = layers.Dense(128, activation="relu",
kernel_regularizer=self._l2,
trainable=trainable)(grid_fc1)
# Create a special layer for computing the branching.
branch_layer = layers.Lambda(self.__get_appearance_groups)
grid_fc2_b1, grid_fc2_b2 = branch_layer(grid_fc2)
# We have to use the branch layer on everything we want to concatenate so
# they have compatible input shapes.
fc_e1_b1, fc_e1_b2 = branch_layer(fc_e1)
face_fc2_b1, face_fc2_b2 = branch_layer(face_fc2)
# Concat everything and put through final FF layers.
all_concat_b1 = layers.Concatenate()([fc_e1_b1, face_fc2_b1, grid_fc2_b1])
all_fc1_b1 = layers.Dense(128, activation="relu",
kernel_regularizer=self._l2)(all_concat_b1)
all_fc2_b1 = layers.Dense(2, kernel_regularizer=self._l2)(all_fc1_b1)
# Same for the second branch.
all_concat_b2 = layers.Concatenate()([fc_e1_b2, face_fc2_b2, grid_fc2_b2])
all_fc1_b2 = layers.Dense(128, activation="relu",
kernel_regularizer=self._l2)(all_concat_b2)
all_fc2_b2 = layers.Dense(2, kernel_regularizer=self._l2)(all_fc1_b2)
# For training, we can now concatenate the output from the two branches back
# together.
all_fc2 = layers.Concatenate(axis=0, name="dots")([all_fc2_b1, all_fc2_b2])
return all_fc2
def prepare_labels(self, labels):
# Since we reorder the output from our model relative to the input, we need
# our labels to be organized similarly.
labels_group1, labels_group2 = \
self.__get_appearance_groups(labels["dots"])
dots = tf.concat([labels_group1, labels_group2], axis=0)
return {"dots": dots}
| 2.25 | 2 |
config_template.py | haizstm/Komet-CL | 1 | 12759164 | <reponame>haizstm/Komet-CL
import datetime
# Basic bot config, insert your token here, update description if you want
prefixes = [".", "!"]
token = "token-goes-here"
bot_description = "Robocop-NG, the moderation bot of ReSwitched."
# If you forked robocop-ng, put your repo here
source_url = "https://github.com/reswitched/robocop-ng"
rules_url = "https://reswitched.team/discord/#rules"
# The bot description to be used in .robocop embed
embed_desc = "Robocop-NG is developed by [Ave](https://github.com/aveao)"\
" and [tomGER](https://github.com/tumGER), and is a rewrite "\
"of Robocop.\nRobocop is based on Kurisu by 916253 and ihaveamac."
# Minimum account age required to join the guild
# If user's account creation is shorter than the time delta given here
# then user will be kicked and informed
min_age = datetime.timedelta(minutes=15)
# The bot will only work in these guilds
guild_whitelist = [
269333940928512010 # ReSwitched discord
]
# Role that gets chosen by default by .approve and .revoke if none was specified
default_named_role = "community"
# Named roles to be used with .approve and .revoke
# Example: .approve User hacker
named_roles = {
"community": 420010997877833731,
"hacker": 364508795038072833,
"participant": 434353085926866946
}
# The bot manager and staff roles
# Bot manager can run eval, exit and other destructive commands
# Staff can run administrative commands
bot_manager_role_id = 466447265863696394 # Bot management role in ReSwitched
staff_role_ids = [364647829248933888, # Team role in ReSwitched
360138431524765707, # Mod role in ReSwitched
466447265863696394, # Bot management role in ReSwitched
360138163156549632, # Admin role in ReSwitched
287289529986187266] # Wizard role in ReSwitched
# Various log channels used to log bot and guild's activity
# You can use same channel for multiple log types
# Spylog channel logs suspicious messages or messages by members under watch
# Invites created with .invite will direct to the welcome channel.
log_channel = 290958160414375946 # server-logs in ReSwitched
botlog_channel = 529070282409771048 # bot-logs channel in ReSwitched
modlog_channel = 542114169244221452 # mod-logs channel in ReSwitched
spylog_channel = 548304839294189579 # spy channel in ReSwitched
welcome_channel = 326416669058662401 # newcomers channel in ReSwitched
rules_channel = 567353505367719947 # rules channel in AtlasNX
support_faq_channel = 628596855470555167 # support-faq channel in AtlasNX
list_files_channel = 680792242188845079 # Used for uploading raw text files for list editing.
# These channel entries are used to determine which roles will be given
# access when we unmute on them
general_channels = [420029476634886144,
414949821003202562,
383368936466546698,
343244421044633602,
491316901692178432,
539212260350885908] # Channels everyone can access
community_channels = [269333940928512010,
438839875970662400,
404722395845361668,
435687501068501002,
286612533757083648] # Channels requiring community role
# Channels that are lists that are controlled by the lists cog.
list_channels = [680129341958783086,
680129375705890839]
# Controls which roles are blocked during lockdown
lockdown_configs = {
# Used as a default value for channels without a config
"default": {
"channels": general_channels,
"roles": [named_roles["participant"]]
},
"community": {
"channels": community_channels,
"roles": [named_roles["community"], named_roles["hacker"]]
}
}
# The maximum amount of messages that can be purged before a sanity check is put in place
purge_warning_limit = 50
# Mute role is applied to users when they're muted
# As we no longer have mute role on ReSwitched, I set it to 0 here
mute_role = 0 # Mute role in ReSwitched
# Channels that will be cleaned every minute/hour
minutely_clean_channels = []
hourly_clean_channels = []
# Edited and deletes messages in these channels will be logged
spy_channels = general_channels
# Channels and roles where users can pin messages
allowed_pin_channels = []
allowed_pin_roles = []
# Used for the pinboard. Leave empty if you don't wish for a gist pinboard.
github_oauth_token = ""
| 1.804688 | 2 |
gluon/gluoncv2/models/shakedropresnet_cifar.py | naviocean/imgclsmob | 2,649 | 12759165 | """
ShakeDrop-ResNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'ShakeDrop Regularization for Deep Residual Learning,' https://arxiv.org/abs/1802.02375.
"""
__all__ = ['CIFARShakeDropResNet', 'shakedropresnet20_cifar10', 'shakedropresnet20_cifar100', 'shakedropresnet20_svhn']
import os
import numpy as np
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block
from .resnet import ResBlock, ResBottleneck
class ShakeDrop(mx.autograd.Function):
"""
ShakeDrop function.
Parameters:
----------
p : float
ShakeDrop specific probability (of life) for Bernoulli random variable.
"""
def __init__(self, p):
super(ShakeDrop, self).__init__()
self.p = p
def forward(self, x):
if mx.autograd.is_training():
b = np.random.binomial(n=1, p=self.p)
alpha = mx.nd.random.uniform_like(x.slice(begin=(None, 0, 0, 0), end=(None, 1, 1, 1)), low=-1.0, high=1.0)
y = mx.nd.broadcast_mul(b + alpha - b * alpha, x)
self.save_for_backward(b)
else:
y = self.p * x
return y
def backward(self, dy):
b, = self.saved_tensors
beta = mx.nd.random.uniform_like(dy.slice(begin=(None, 0, 0, 0), end=(None, 1, 1, 1)), low=0.0, high=1.0)
return mx.nd.broadcast_mul(b + beta - b * beta, dy)
class ShakeDropResUnit(HybridBlock):
"""
ShakeDrop-ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_prob : float
Residual branch life probability.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
life_prob,
**kwargs):
super(ShakeDropResUnit, self).__init__(**kwargs)
self.life_prob = life_prob
self.resize_identity = (in_channels != out_channels) or (strides != 1)
body_class = ResBottleneck if bottleneck else ResBlock
with self.name_scope():
self.body = body_class(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
# self.shake_drop = ShakeDrop(self.life_prob)
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = ShakeDrop(self.life_prob)(x) + identity
# x = self.shake_drop(x) + identity
x = self.activ(x)
return x
class CIFARShakeDropResNet(HybridBlock):
"""
ShakeDrop-ResNet model for CIFAR from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_probs : list of float
Residual branch life probability for each unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
life_probs,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARShakeDropResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
k = 0
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ShakeDropResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
life_prob=life_probs[k]))
in_channels = out_channels
k += 1
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_shakedropresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ShakeDrop-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
init_block_channels = 16
channels_per_layers = [16, 32, 64]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
total_layers = sum(layers)
final_death_prob = 0.5
life_probs = [1.0 - float(i + 1) / float(total_layers) * final_death_prob for i in range(total_layers)]
net = CIFARShakeDropResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
life_probs=life_probs,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def shakedropresnet20_cifar10(classes=10, **kwargs):
"""
ShakeDrop-ResNet-20 model for CIFAR-10 from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_cifar10", **kwargs)
def shakedropresnet20_cifar100(classes=100, **kwargs):
"""
ShakeDrop-ResNet-20 model for CIFAR-100 from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_cifar100", **kwargs)
def shakedropresnet20_svhn(classes=10, **kwargs):
"""
ShakeDrop-ResNet-20 model for SVHN from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(shakedropresnet20_cifar10, 10),
(shakedropresnet20_cifar100, 100),
(shakedropresnet20_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shakedropresnet20_cifar10 or weight_count == 272474)
assert (model != shakedropresnet20_cifar100 or weight_count == 278324)
assert (model != shakedropresnet20_svhn or weight_count == 272474)
x = mx.nd.zeros((14, 3, 32, 32), ctx=ctx)
# y = net(x)
with mx.autograd.record():
y = net(x)
y.backward()
assert (y.shape == (14, classes))
if __name__ == "__main__":
_test()
| 2.9375 | 3 |
test_project/settings.py | pctSW1/django-url-filter | 0 | 12759166 | # Bare ``settings.py`` for running tests for url_filter
import os
from sqlalchemy import create_engine
DEBUG = True
INTERNAL_IPS = ['127.0.0.1']
if os.environ.get('USE_POSTGRES') == 'True':
SQLALCHEMY_ENGINE = create_engine('postgresql://postgres:test@localhost:5432', echo=True)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': '<PASSWORD>',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
else:
SQLALCHEMY_ENGINE = create_engine('sqlite:///url_filter.sqlite', echo=True)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'url_filter.sqlite'
}
}
INSTALLED_APPS = (
'test_project.generic',
'test_project.many_to_many',
'test_project.many_to_one',
'test_project.one_to_one',
'url_filter',
'debug_toolbar',
'debug_toolbar_alchemy',
'django_extensions',
'rest_framework',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
)
STATIC_URL = '/static/'
SECRET_KEY = 'foo'
MIDDLEWARE = [
'test_project.middleware.SQLAlchemySessionMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
},
]
ROOT_URLCONF = 'test_project.urls'
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': [
'url_filter.integrations.drf.DjangoFilterBackend',
],
}
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar_alchemy.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
DEBUG_TOOLBAR_CONFIG = {
'ALCHEMY_DB_ALIASES': 'test_project.middleware.dbs',
}
| 2.015625 | 2 |
notebooks/_solutions/90_package_numpy10.py | jorisvandenbossche/DS-python-geospatial | 58 | 12759167 | <gh_stars>10-100
# Create the histogram plots
fig, (ax0, ax1) = plt.subplots(1, 2, sharey=True)
ax0.hist(b4_data.flatten(), bins=30, log=True);
ax1.hist(b4_data_f.flatten(), bins=30, log=True); | 2.578125 | 3 |
blog/models.py | mkimartinez/Lyfey | 0 | 12759168 | from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from ckeditor.fields import RichTextField
from ckeditor_uploader.fields import RichTextUploadingField
from django.urls import reverse
import django.db.models.deletion
def upload_location(instance,filename):
return "%s/%s" %(instance.id,filename)
# //blog categories
class Category(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField(max_length=100,unique=True)
# created_at = models.DateTimeField(auto_now_add=True,blank=True,default=None)
# updated_at = models.DateTimeField(auto_now=True, blank=True,default= None)
class Meta:
ordering = ('name',)
verbose_name ='category'
verbose_name_plural ='categories'
# def get_absolute_url(self):
# return reverse('blog:post_category',args=[self.slug])
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('blog:post_by_category',args=[self.slug])
# return "/blog/%s/" % self.slug
class Post(models.Model):
user = models.ForeignKey(User,default=None,null=True, on_delete=django.db.models.deletion.SET_NULL)
title = models.CharField(max_length=100)
body = RichTextUploadingField(blank=True,null=True)
date_published = models.DateTimeField(auto_now_add=True)
tags = models.CharField(max_length=100)
category = models.ForeignKey(Category, null=True, on_delete=django.db.models.deletion.SET_NULL)
image = models.ImageField(upload_to=upload_location)
post_file = models.FileField(blank=True,null=True,upload_to="blog/files/%Y/m/$D/")
def __str__(self):
return self.title
class BlogComment(models.Model):
post = models.ForeignKey(Post,default=None,on_delete=django.db.models.deletion.SET_NULL, null=True, related_name='comments')
commented_by = models.ForeignKey(User,default=None,on_delete=django.db.models.deletion.SET_NULL,null=True)
date=models.DateTimeField(auto_now_add=True)
comment= models.TextField()
approved_comment = models.BooleanField(default=False)
def approve(self):
self.approved_comment = True
self.save()
def __str__(self):
return self.comment
| 2.078125 | 2 |
conflate/parsers/parse_flickr.py | topomancy/gazetteer | 13 | 12759169 | import demjson #flickr geojson is not quite valid json, suffers from trailing commas
import sys
from shapely.geometry import asShape
import codecs
#python parse_flickr.py /path/to/flickrShapeFile.geojson | psql gaztest
#continents , counties, countries, localities, neighbourhoods, regions
def parse_flickr_geojson(flickr_file):
json_data = codecs.open(flickr_file, "r", "utf-8").read()
data = demjson.decode(json_data)
features = data['features']
for feature in features:
woe_id = str(feature['properties']['woe_id'])
name = feature['properties']['label']
if not name:
continue
feature_type = feature['properties']['place_type']
feature_code = str(feature['properties']['place_type_id'])
json_geometry = feature['geometry']
updated = "2011-01-08 00:00:00+00" #i.e. as from http://code.flickr.com/blog/2011/01/08/flickr-shapefiles-public-dataset-2-0/
geometry = asShape(json_geometry).wkt
out_line = ['F', woe_id, name, feature_type, feature_code, updated, geometry ]
print "\t".join(out_line)
return flickr_file
if __name__ == '__main__':
flickr_file = sys.argv[1]
sys.stdout = codecs.getwriter('utf8')(sys.stdout) #wtf python
#alt_file = sys.argv[2]
#print sys.stdout.encoding
print("BEGIN TRANSACTION;");
print "COPY gazetteer (source, id, name, feature_type, feature_code, updated, geom) FROM stdin;"
parse_flickr_geojson(flickr_file)
print("\\.");
print("COMMIT;");
| 2.59375 | 3 |
laptimize/lap_model.py | uichathurika/laptimize | 0 | 12759170 | import numpy as np
import pandas as pd
import pulp
from laptimize.curve_approximation import CurveApproximator
from laptimize.log import LogFactory
class LAPModel(object):
"""solve the linear approximated LP problem and sub problems"""
def __init__(self, name='nlp_problem'):
self.logger = LogFactory.get_logger()
self.lp_variables = dict()
self.segment = pd.DataFrame()
self.curve = pd.DataFrame()
self.lp_slack = pulp.LpVariable.dict('p_%s', ['p1'], lowBound=0)
self.model = pulp.LpProblem(name, pulp.LpMinimize)
self.objective_expressions = []
self.constraint_expression = []
def initialize(self, segment=pd.DataFrame(), curve=pd.DataFrame(), name='nlp_sub_problem'):
"""
initialize variables for branching sub problems
Parameters
----------
lp_variables: dict
linear problem related variables
segment: pandas data frame
updated piecewise segment for decision variables
curve: pandas data frame
function values of objective and constraints function for each segment values
name: string
problem name
Returns
-------
self
"""
self.lp_variables = dict()
self.segment = segment
self.curve = curve
self.model = pulp.LpProblem(name, pulp.LpMinimize)
self.objective_expressions = []
self.constraint_expression = []
return self
def generate_variable_names(self, no_of_segments, node_name):
"""
generate weight variables names for approximated lp problem
Parameters
---------
no_of_segments: int
no of piecewise linear segment
node_name: string
non-linear decision variable name
Returns
------
variable_names: list
weight variable names
ex:[x1_1,x1_2,x1_3]
"""
variable_names = []
for i in range(0, no_of_segments):
variable_names.append("%s_%s" % (node_name, i))
return variable_names
def define_weights_for_segment(self, variable_names, name):
"""
create linear problem related variables using piece wise variables
Parameters
---------
variable_names: list
piece wise variable list
name: string
decision variable name
Returns
-------
self.lp_variable : dict
update lp_variable dictionary with weight linear problem variables
"""
self.lp_variables[name] = pulp.LpVariable.dict('l_%s', variable_names, lowBound=0, upBound=1)
return self.lp_variables[name]
def fill_constraint_objective_arrays(self, lp_allocation, constraint):
"""
update objective and constraints expression lists for linear problem
Parameters
---------
lp_allocation: dict
linear problem variables
constraint: pandas data frame
problem data frame
Returns
-------
weights: dict
weights constraints expression
problem_expressions: dict
collection of objective and constraints expression
"""
try:
problem_expressions = pd.DataFrame()
for index in constraint.index:
constraint_expression = []
weights = []
for key in lp_allocation:
constraint_expression.append(lp_allocation[key] * self.curve.loc[key][index])
weights.append(lp_allocation[key])
problem_expressions[index] = list(constraint_expression)
return weights, problem_expressions
except Exception as err:
self.logger.info('fill_constraint_objective_arrays method ended with error ')
self.logger.error(str(err))
raise
def add_sub_problem(self, segment_key, k):
"""
add sub problem constraint related to the weight variable
Parameters
----------
segment_key: string
branching variable key
k: list
branching sub variables key ex : [x1_1, x1_2]
Returns
-------
self
"""
# adding a sub problem
for key in self.lp_variables[segment_key]:
if key in k:
continue
else:
self.model += self.lp_variables[segment_key][key] == 0
self.segment = self.segment.drop([key])
self.curve = self.curve.drop([key])
def add_weights_sum_constraint_to_model(self, weights):
self.model += pulp.lpSum(weights) == 1
def add_model_constraint_and_objective(self, constraints, values):
"""
add constraint and objective function to the pulp lp problem
Parameters
----------
constraints: pandas data frame
problem data frame
values: pandas series
right side values for the constraints
Returns
-------
self
"""
try:
# Add objective function to model.
self.model += pulp.lpSum(constraints.objective) + self.lp_slack['p1']
constraints = constraints.drop(['objective'], axis=1)
for constraint_expression in constraints:
self.model += (pulp.lpSum(constraints[constraint_expression]) + self.lp_slack['p1']) <= values[
constraint_expression]
except Exception as err:
self.logger.info('add_model_constraint_and_objective method ended with error ')
self.logger.error(str(err))
raise
def solve_model(self):
"""
problem solve method for lp problems
"""
try:
solver = pulp.PULP_CBC_CMD(msg=0)
self.model.solve(solver)
except Exception as err:
self.logger.info('solve_model method ended with error ')
self.logger.error(str(err))
raise
def model_solver(self, constraints_df, partition_len):
"""
solve the initial lp problem with piecewise linear variables(weights)
Parameters
----------
constraints_df: pandas data frame
which include problem related details,data frame version of problem dictionary
Returns
-------
lp_variables: dict
pulp solution for the lp weight variables
lp_slack: dict
value of the lp slack variable
segment: pandas data frame
segment values for each decision variable
curve: pandas data frame
function values of objective and constraints function for each segment values
"""
try:
constraint_values = pd.DataFrame()
constraints = constraints_df.drop(['value'])
# Iterate over constrains and build model.
for _, constraint in constraints.iterrows():
# piecewise linear segments.
x_array = np.append(np.arange(constraint.capacity[0], constraint.capacity[1], partition_len),
constraint.capacity[1])
no_of_segments = len(x_array)
constraint = constraint.drop(['capacity'])
variable_names = self.generate_variable_names(no_of_segments, constraint.name)
# lp variable.
lp_allocation = self.define_weights_for_segment(variable_names, constraint.name)
# segment value.
segment = pd.DataFrame({'key': [constraint.name] * len(x_array), 'segment': x_array})
segment.index = variable_names
self.segment = pd.concat([self.segment, segment])
# curve approximation for each segment.
curve = pd.DataFrame(CurveApproximator().get_curve_approximation(constraint, x_array))
curve.index = variable_names
self.curve = pd.concat([self.curve, curve])
weights, problem_values = self.fill_constraint_objective_arrays(lp_allocation, constraint)
constraint_values = pd.concat([constraint_values, problem_values], axis=0)
self.add_weights_sum_constraint_to_model(weights)
self.add_model_constraint_and_objective(constraint_values, constraints_df.loc['value'])
self.solve_model()
return self.lp_variables, self.segment, self.curve
except Exception as err:
self.logger.info('model_solver method ended with error ')
self.logger.error(str(err))
raise
def global_solver(self, segment_key, k, constraints_df):
"""
solve the given sub lp problem with branching rule
Parameters
----------
segment_key: str
branching variable key ex: x1
k: list
branching sub variables key ex : [x1_1, x1-2]
constraints_df: pandas data frame
which include problem related details,data frame version of problem dictionary
Returns
-------
lp_variables: dict
pulp solution for the lp weight variables
lp_slack: dict
value of the lp slack variable
segment: pandas data frame
segment values for each decision variable
curve: pandas data frame
function values of objective and constraints functions for each segment values
"""
# Iterate over constrains and build model.
try:
constraint_values = pd.DataFrame()
constraints = constraints_df.drop(['value'])
for _, constraint in constraints.iterrows():
constraint = constraint.drop(['capacity'])
segment = self.segment[self.segment.key == constraint.name]['segment'].to_dict()
variable_names = list(segment.keys())
lp_allocation = self.define_weights_for_segment(variable_names, constraint.name)
weights, problem_values = self.fill_constraint_objective_arrays(lp_allocation, constraint)
constraint_values = pd.concat([constraint_values, problem_values], axis=0)
self.add_weights_sum_constraint_to_model(weights)
# adding sub problem
self.add_sub_problem(segment_key, k)
self.add_model_constraint_and_objective(constraint_values, constraints_df.loc['value'])
self.solve_model()
return self.lp_variables, self.segment, self.curve
except Exception as err:
self.logger.info('global_solver method ended with error ')
self.logger.error(str(err))
raise
| 3.046875 | 3 |
x509_pki/migrations/0010_auto_20211017_0936.py | repleo/bounca | 142 | 12759171 | # Generated by Django 3.2.7 on 2021-10-17 07:36
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('x509_pki', '0009_auto_20211017_0921'),
]
operations = [
migrations.RemoveField(
model_name='keystore',
name='crl',
),
migrations.CreateModel(
name='CrlStore',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('crl', models.TextField(blank=True, null=True, verbose_name='Serialized CRL certificate')),
('certificate', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='x509_pki.certificate')),
],
),
]
| 1.570313 | 2 |
easy/415-Add Strings.py | Davidxswang/leetcode | 2 | 12759172 | """
https://leetcode.com/problems/add-strings/
Given two non-negative integers num1 and num2 represented as string, return the sum of num1 and num2.
Note:
The length of both num1 and num2 is < 5100.
Both num1 and num2 contains only digits 0-9.
Both num1 and num2 does not contain any leading zero.
You must not use any built-in BigInteger library or convert the inputs to integer directly.
"""
# Pretty easy.
# time complexity: O(n+m), space complexity: O(max(m,n)), where m and n are the lengths of the two strings respectively.
class Solution:
def addStrings(self, num1: str, num2: str) -> str:
result = ""
addone = 0
for i in range(max(len(num1),len(num2))):
if i >= len(num1):
number1 = 0
number2 = ord(num2[-1-i]) - ord('0')
elif i >= len(num2):
number1 = ord(num1[-1-i]) - ord('0')
number2 = 0
else:
number1 = ord(num1[-1-i]) - ord('0')
number2 = ord(num2[-1-i]) - ord('0')
r = number1 + number2 + addone
if r > 9:
addone = 1
r -= 10
else:
addone = 0
result = str(r)+result
if addone != 0:
result = '1' + result
return result | 3.84375 | 4 |
oas_dev/notebooks/eusaari/01-preprocess/x-preprocess-model-output_station_bins_monthly.py | sarambl/OAS-DEV | 0 | 12759173 | <gh_stars>0
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# ### Imports
# %%
# %%
from oas_dev.preprocess.launch_monthly_station_collocation import launch_monthly_station_output
from oas_dev.util.Nd.sizedist_class_v2.SizedistributionBins import SizedistributionStationBins
from oas_dev.util.collocate.collocateLONLAToutput import CollocateLONLATout
from oas_dev.constants import list_sized_vars_nonsec, list_sized_vars_noresm
import useful_scit.util.log as log
log.ger.setLevel(log.log.INFO)
import time
# %% [markdown]
# ### Settings
# %%
nr_of_bins = 5
maxDiameter = 39.6 # 23.6 #e-9
minDiameter = 5.0 # e-9
history_field='.h1.'
cases_sec = ['SECTv11_noresm2_adj','SECTv11_noresm2_eq18']#'SECTv11_noresm2_NFHIST']#'SECTv11_ctrl_fbvoc']#'SECTv11_ctrl']#,'SECTv11_ctrl_fbvoc']#'SECTv11_ctrl']
cases_orig =['noSECTv11_noresm2_ricc', 'noSECTv11_noresm2_ctrl']#'noSECTv11_noresm2_NFHIST']#'noSECTv11_ctrl_fbvoc'] #/no SECTv11_ctrl
from_t = '2007-04-01'
to_t = '2007-05-01'
cases_sec = ['SECTv21_ctrl_koagD']#'SECTv21_ctrl_def','SECTv11_noresm2_ctrl', 'SECTv11_ctrl_fbvoc','SECTv11_noresm2_adj','SECTv11_noresm2_eq18']#'SECTv11_noresm2_NFHIST']#'SECTv11_ctrl_fbvoc']#'SECTv11_ctrl']#,'SECTv11_ctrl_fbvoc']#'SECTv11_ctrl']
cases_orig =['noSECTv21_ox_ricc_dd']#,
cases_orig = ['noSECTv21_default_dd', 'noSECTv21_ox_ricc_dd']#, 'noSECTv21_ox_ricc']#'noSECTv11_noresm2_ricc', 'noSECTv11_noresm2_ctrl', 'noSECTv11_ctrl_fbvoc','noSECTv11_ctrl']#'noSECTv11_noresm2_NFHIST']#'noSECTv11_ctrl_fbvoc'] #/no SECTv11_ctrl
# cases_orig =[ 'noSECTv21_default']#'noSECTv11_noresm2_ricc', 'noSECTv11_noresm2_ctrl', 'noSECTv11_ctrl_fbvoc','noSECTv11_ctrl']#'noSECTv11_noresm2_NFHIST']#'noSECTv11_ctrl_fbvoc'] #/no SECTv11_ctrl
from_t = '2009-01-01'
to_t = '2010-01-01'
log.ger.info(f'TIMES:****: {from_t} {to_t}')
# %% [markdown]
# ## Compute collocated datasets from latlon specified output
# %% jupyter={"outputs_hidden": true}
# %%
# %%
for case_name in cases_sec:
launch_monthly_station_output(case_name, True, from_time=from_t, to_time=to_t)
for case_name in cases_orig:
launch_monthly_station_output(case_name, False, from_time=from_t, to_time=to_t)
# %%
for case_name in cases_sec:
varlist = list_sized_vars_noresm
c = CollocateLONLATout(case_name, from_t, to_t,
True,
'hour',
history_field=history_field)
if c.check_if_load_raw_necessary(varlist ):
time1 = time.time()
a = c.make_station_data_merge_monthly(varlist)
print(a)
time2 = time.time()
print('DONE : took {:.3f} s'.format( (time2-time1)))
else:
print('UUUPS')
for case_name in cases_orig:
varlist = list_sized_vars_nonsec# list_sized_vars_noresm
c = CollocateLONLATout(case_name, from_t, to_t,
False,
'hour',
history_field=history_field)
if c.check_if_load_raw_necessary(varlist ):
time1 = time.time()
a = c.make_station_data_merge_monthly(varlist)
print(a)
time2 = time.time()
print('DONE : took {:.3f} s'.format( (time2-time1)))
else:
print('UUUPS')
# %%
# %% [markdown]
# ## Compute binned dataset
# %%
# Make station N50 etc.
for case_name in cases_sec:
s = SizedistributionStationBins(case_name, from_t, to_t, [minDiameter, maxDiameter], True, 'hour',
nr_bins=nr_of_bins, history_field=history_field)
s.compute_Nd_vars()
for case_name in cases_orig:
s = SizedistributionStationBins(case_name, from_t, to_t, [minDiameter, maxDiameter], False, 'hour',
nr_bins=nr_of_bins, history_field=history_field)
s.compute_Nd_vars()
| 1.460938 | 1 |
src/phemex.py | ruiwynt/l3_data_collection | 2 | 12759174 | import websockets
import asyncio
import time
import json
from normalise.phemex_normalisation import NormalisePhemex
from helpers.read_config import get_symbols
from sink_connector.kafka_producer import KafkaProducer
from sink_connector.ws_to_kafka import produce_messages
from source_connector.websocket_connector import connect
url = 'wss://phemex.com/ws'
async def main():
raw_producer = KafkaProducer("phemex-raw")
normalised_producer = KafkaProducer("phemex-normalised")
trades_producer = KafkaProducer("phemex-trades")
symbols = get_symbols('phemex')
await connect(url, handle_phemex, raw_producer, normalised_producer, trades_producer, symbols)
async def handle_phemex(ws, raw_producer, normalised_producer, trades_producer, symbols):
for symbol in symbols:
subscribe_message = {
"id": 1234, # random id
"method": "orderbook.subscribe",
"params": [symbol]
}
await ws.send(json.dumps(subscribe_message))
subscribe_message['method'] = "trade.subscribe"
await ws.send(json.dumps(subscribe_message))
await produce_messages(ws, raw_producer, normalised_producer, trades_producer, NormalisePhemex().normalise)
if __name__ == "__main__":
asyncio.run(main()) | 2.359375 | 2 |
splices2npz.py | gcunhase/AnnotatedMV-PreProcessing | 3 | 12759175 | <reponame>gcunhase/AnnotatedMV-PreProcessing<gh_stars>1-10
import numpy as np
from moviepy.editor import VideoFileClip
import utils
from skimage import color
import librosa
import glob
from natsort import natsorted
import pandas as pd
import math
from scipy.io.wavfile import read
"""
Pre-processes data considering already spliced video and audio only
Synchronize video and audio
TODO: add emotion and text
"""
__author__ = "<NAME>"
params = {
'fps': 10,
# 'root': '/media/ceslea/DATA/VideoEmotion/DataWithEmotionTags_noText_correctedAudio_hsio/',
'root': utils.project_dir_name() + 'data/cognimuse_10secs/',
'new_size': 100,
'sr': 16000,
'results_dir': utils.project_dir_name() + 'data/cognimuse_10secs/',
'seconds': 10,
'audio_len': 10*16000, # seconds*sr
}
def load_video(filename, params_substitute=None):
# To use in external scripts
if params_substitute is not None:
params = params_substitute
# Load videos (fps = 30)
clip = VideoFileClip(filename)
# Resize to 100 x 100
clip_resized = clip.resize(newsize=(params['new_size'], params['new_size']))
# Downsample
downsampled_frames, _ = utils.downsample_video(clip_resized, params, save_downsampled=False)
# Frames colour conversion
frame_hsv_arr = []
for frame in downsampled_frames:
frame_hsv = color.rgb2hsv(frame)
frame_hsv_arr.append(frame_hsv)
return frame_hsv_arr
def process_audio(audio_arr, pad_size=48000):
# Check audio files size and pad
audio_arr_padded = []
for audio in audio_arr:
f_length = len(audio)
if f_length < pad_size:
audio_pad = np.zeros([pad_size])
audio_pad[0:f_length] = audio
audio_arr_padded.append(audio_pad)
else:
audio_arr_padded.append(audio[0:pad_size])
return audio_arr_padded
def save_npz(videos, type='train', audio_type='instrumental', emotion_dim='1D', emotion_root='emotion/',
text_root='text/', data_type='with_incomplete', include_audio=True):
"""
:param videos:
:param type:
:param audio_type:
:param emotion_dim:
:param emotion_root:
:param text_root:
:param data_type: related to text, whether to include samples with empty text (incomplete) or containing some text
(complete). Options =['with_incomplete', 'only_complete'].
:param include_audio:
:return:
"""
print(videos)
seconds = params['seconds']
frame_hsv_arr, audio_arr, emotion_arr, text_arr = [], [], [], []
for v in videos:
# data_path = params['root'] + "Video_emotion_" + v + "_noText/"
data_path = params['root'] + v + "/"
# Load video and corresponding audio
# video_path = data_path + "selected_avi/*.avi"
video_path = data_path + "video_splices_{}secs/*.mp4".format(seconds)
video_filenames = glob.glob(video_path)
video_filenames = natsorted(video_filenames)
# Load corresponding audio
# audio_path = data_path + "selected_wav_eq/*.wav"
if audio_type == 'orig':
audio_path = data_path + "audio_splices_{}secs_16000_c1_16bits/*.wav".format(seconds)
else:
audio_path = data_path + "audio_splices_{}secs_wav2mid2wav_16000_c1_16bits/*.wav".format(seconds)
audio_filenames = glob.glob(audio_path)
audio_filenames = natsorted(audio_filenames)
# Load corresponding emotion
emotion_csv = pd.read_csv(data_path + "{}intended_1_{}_splices_{}secs.csv".format(emotion_root, emotion_dim, seconds))
emotion_data = emotion_csv['emotion']
# Load corresponding text
text_csv = pd.read_csv(data_path + "{}text_splices_{}secs.csv".format(text_root, seconds))
text_data = text_csv['text']
for v_filename, a_filename, emotion, text in zip(video_filenames, audio_filenames, emotion_data, text_data):
text = "" if isinstance(text, float) else text
if data_type == 'with_incomplete' or (data_type == 'only_complete' and len(text) != 0):
print('Video {}: {}, audio: {}, emotion: {}, text: {}'.
format(v, v_filename.split('/')[-1], a_filename.split('/')[-1], emotion, text))
frame_hsv_arr.append(load_video(v_filename, params_substitute=params))
# audio, _ = librosa.load(a_filename, sr=params['sr']) # float numbers
rate, audio = read(a_filename) # int numbers -> necessary for SAMPLERNN and CNNSEQ2SEQ models
# print(rate) # 16000 OKAY
audio_arr.append(audio)
emotion_arr.append(emotion)
text_arr.append(text)
# Transpose from (N, 30, 100, 100, 3) to (N, 30, 3, 100, 100)
frame_hsv_arr_transpose = np.transpose(frame_hsv_arr, (0, 1, 4, 2, 3))
# Pad audio to audio_len if not already
audio_arr_padded = process_audio(audio_arr, pad_size=params['audio_len'])
print("Shapes - video: {}/{}, audio: {}/{}".format(np.shape(frame_hsv_arr), np.shape(frame_hsv_arr_transpose),
np.shape(audio_arr), np.shape(audio_arr_padded)))
# Save in .npz
utils.ensure_dir(params['results_dir'])
data_type_prefix = '_' + data_type
save_npz_filename_root = '{}video_feats_HSL_{}fps_{}secs'.format(params['results_dir'], params['fps'], seconds)
if include_audio:
if audio_type == 'orig':
save_npz_filename = save_npz_filename_root + '_origAudio_intAudio_{}_{}{}.npz'.format(type, emotion_dim, data_type_prefix)
else:
save_npz_filename = save_npz_filename_root + '_intAudio_{}_{}{}.npz'.format(type, emotion_dim, data_type_prefix)
np.savez_compressed(save_npz_filename, HSL_data=frame_hsv_arr_transpose, audio=audio_arr, emotion=emotion_arr,
text=text_arr)
else:
save_npz_filename = save_npz_filename_root + '_{}_{}{}_noAudio.npz'.format(type, emotion_dim, data_type_prefix)
np.savez_compressed(save_npz_filename, HSL_data=frame_hsv_arr_transpose, emotion=emotion_arr, text=text_arr)
# Padded audio
if include_audio:
if audio_type == 'orig':
save_npz_filename = save_npz_filename_root + '_origAudio_intAudio_pad_{}_{}{}.npz'.format(type, emotion_dim, data_type_prefix)
else:
save_npz_filename = save_npz_filename_root + '_intAudio_pad_{}_{}{}.npz'.format(type, emotion_dim, data_type_prefix)
np.savez_compressed(save_npz_filename, HSL_data=frame_hsv_arr_transpose, audio=audio_arr_padded,
emotion=emotion_arr, text=text_arr)
if __name__ == '__main__':
# audio_type = 'orig'
audio_type = 'instrumental'
print(audio_type)
# videos = ['test']
# save_npz(videos, type='test', audio_type=audio_type)
# videos = ['BMI']
# save_npz(videos, type='test', audio_type=audio_type)
videos = ['BMI', 'CHI', 'FNE', 'GLA', 'LOR']
# save_npz(videos, type='train', audio_type=audio_type, emotion_dim='1D', data_type='only_complete', include_audio=False)
save_npz(videos, type='train', audio_type=audio_type, emotion_dim='1D', data_type='with_incomplete', include_audio=True)
#save_npz(videos, type='train', audio_type=audio_type, emotion_dim='2D', data_type='only_complete', include_audio=False)
#save_npz(videos, type='train', audio_type=audio_type, emotion_dim='2D', data_type='with_incomplete', include_audio=False)
#videos = ['CRA', 'DEP']
#save_npz(videos, type='test', audio_type=audio_type, emotion_dim='1D', data_type='only_complete', include_audio=False)
#save_npz(videos, type='test', audio_type=audio_type, emotion_dim='1D', data_type='with_incomplete', include_audio=True)
#save_npz(videos, type='test', audio_type=audio_type, emotion_dim='2D', data_type='only_complete', include_audio=False)
#save_npz(videos, type='test', audio_type=audio_type, emotion_dim='2D', data_type='with_incomplete', include_audio=False)
| 2.453125 | 2 |
data-api/EDR/provider/xmlConfig.py | ShaneMill1/NCPP_EDR_API | 0 | 12759176 | <reponame>ShaneMill1/NCPP_EDR_API<gh_stars>0
#
# Name: xmlConfig.py
# Purpose: To provide TAC->IWXXM and TAC->USWX decoders & encoders with
# site-specific information and customized settings
#
# Author: <NAME>
# Organization: NOAA/NWS/OSTI/Meteorological Development Laboratory
# Contact Info: <EMAIL>
# Date: 16 January 2020
#
import os
#
_author = '<NAME> - NOAA/NWS/OSTI/MDL/WIAB'
_email = '<EMAIL>'
#
# IWXXM versioning
_iwxxm = '3.0'
_release = '3.0'
#
IWXXM_URI = 'http://icao.int/iwxxm/%s' % _iwxxm
IWXXM_URL = 'https://schemas.wmo.int/iwxxm/%s/iwxxm.xsd' % _release
#
# IWXXM-US versioning
_us_iwxxm = '3.0'
_us_iwxxm_release = '3.0'
#
# USWX versioning
_uswx = '1.0'
_uswx_release = '1.0'
#
# The full name and indentifier of entity running this software
#
TranslationCentreName = 'NCEP Central Operations'
TranslationCentreDesignator = 'KWNO'
TRANSLATOR = True
#
# Path to file containing codes obtained from WMO Code Registry in
# RDF/XML format
#
CodesFilePath = os.path.join('/EDR/provider', 'Codes.rdf')
#CodesFilePath = os.path.join('/home/shane.mill/WIAB-NDFD-data-api-edr/data-api/EDR/provider', 'Codes.rdf')
#
# WMO Code Registry Vocabularies contained in 'CodesFilePath' file
PRSRTNDCY_CONTAINER_ID = '0-10-063'
SEACND_CONTAINER_ID = '0-22-061'
RWYDEPST_CONTAINER_ID = '0-20-086'
RWYCNTMS_CONTAINER_ID = '0-20-087'
WEATHER_CONTAINER_ID = '4678'
COLOUR_CODES = 'AviationColourCode'
#
# If prevailing horizontal visibility falls below this value (metres), RVR information should be supplied
RVR_MaximumDistance = 1500
#
# How many elements in the aerodrome's ARP geo-location, either 2 or 3. Shall be set
# to two (2) for the indefinite future.
#
srsDimension = '2'
srsName = 'https://www.opengis.net/def/crs/EPSG/0/4326'
axisLabels = 'Lat Long'
#
# If srsDimensions is equal to 3, then vertical datum must be set correctly for the elevation used
#
# Allowed values are: 'EGM_96', 'AHD', 'NAVD88', or string matching regular expression pattern
# 'OTHER:(\w|_){1,58}'
#
verticalDatum = 'EGM_96'
#
# Elevation value unit of measure (UOM). Either 'FT' or 'M' or string matching regular expression
# pattern OTHER:(\w|_){1,58}
#
elevationUOM = 'M'
#
# URLs to miscellaneous WMO Code Registry tables and entries
#
# NIL reasons
NIL_NOSIGC_URL = 'http://codes.wmo.int/common/nil/noSignificantChange'
NIL_NOOBSV_URL = 'http://codes.wmo.int/common/nil/notObservable'
NIL_NOOPRSIG_URL = 'http://codes.wmo.int/common/nil/nothingOfOperationalSignificance'
NIL_NOAUTODEC_URL = 'http://codes.wmo.int/common/nil/notDetectedByAutoSystem'
NIL_NA_URL = 'http://codes.wmo.int/common/nil/inapplicable'
NIL_MSSG_URL = 'http://codes.wmo.int/common/nil/missing'
NIL_UNKNWN_URL = 'http://codes.wmo.int/commmon/nil/unknown'
NIL_WTHLD_URL = 'http://codes.wmo.int/common/nil/withheld'
NIL_SNOCLO_URL = 'http://codes.wmo.int/bufr4/codeflag/0-20-085/1'
#
CLDCVR_URL = 'http://codes.wmo.int/49-2/CloudAmountReportedAtAerodrome/'
RWYFRCTN_URL = 'http://codes.wmo.int/bufr4/codeflag/0-20-089/'
#
CUMULONIMBUS = 'http://codes.wmo.int/49-2/SigConvectiveCloudType/CB'
TWRNGCUMULUS = 'http://codes.wmo.int/49-2/SigConvectiveCloudType/TCU'
#
PRSRTNDCY = 'http://codes.wmo.int/bufr4/codeflag/%s' % PRSRTNDCY_CONTAINER_ID
ACCUMLTN = 'http://codes.wmo.int/grib2/codeflag/4.10/1'
MAXIMUM = 'http://codes.wmo.int/grib2/codeflag/4.10/2'
MINIMUM = 'http://codes.wmo.int/grib2/codeflag/4.10/3'
AERONAUTICALVIS = 'http://codes.wmo.int/common/quantity-kind/aeronauticalVisibility'
SKYCATALOG = 'http://codes.wmo.int/bufr4/codeflag/0-20-012'
#
# Bit masks
Weather = 1 << 0
CloudAmt = 1 << 1
CloudType = 1 << 2
SeaCondition = 1 << 3
RunwayDeposit = 1 << 4
AffectedRunwayCoverage = 1 << 5
RunwayFriction = 1 << 6
#
# xlink:title attributes are optional in IWXXM XML documents. TITLES determines
# whether they are displayed.
#
# If no xlink:title attributes (with rare exceptions) are wanted in IWXXM XML documents,
# set TITLES to 0 (zero). Otherwise, set bits appropriately.
#
TITLES = 0
# TITLES=(CloudAmt|CloudType|SeaCondition|RunwayDeposit|AffectedRunwayCoverage|RunwayFriction)
#
# If xlink:titles are to appear in the document, set preferred language. English, 'en', is
# the default if the desired language is not found in the WMO Code Registry.
#
PreferredLanguageForTitles = 'en'
#
# The following dictionaries are defined here because the resulting dictionaries constructed
# from the WMO code registry are 'inadequate.'
#
# Therefore..."Thou shalt not change these dictionaries' keys!" However, the plain text in them
# can be changed (i.e. different language, consistent with preferred language above), if desired.
#
RunwayFrictionValues = {'91': 'Braking action poor', '92': 'Braking action medium to poor',
'93': 'Braking action medium', '94': 'Braking action medium to good',
'95': 'Braking action good', '99': 'Unreliable'}
#
CldCvr = {'CLR': 'Sky clear within limits', 'SKC': 'Sky clear', 'FEW': 'Few',
'SCT': 'Scattered', 'BKN': 'Broken', 'OVC': 'Overcast'}
#
# US Code Registry for Meteorological Services
OFCM_CODE_REGISTRY_URL = 'https://codes.nws.noaa.gov'
#
# IWXXM-US URI and URLs
IWXXM_US_URI = 'http://www.weather.gov/iwxxm-us/%s' % _us_iwxxm
IWXXM_US_URL = 'https://nws.weather.gov/schemas/iwxxm-us/%s/' % _us_iwxxm_release
IWXXM_US_METAR_URL = 'https://nws.weather.gov/schemas/iwxxm-us/%s/metarSpeci.xsd' % _us_iwxxm_release
#
# USWX_URI and URLs
USWX_URI = 'http://www.weather.gov/uswx/%s' % _uswx
USWX_URL = 'https://nws.weather.gov/schemas/uswx/%s/' % _uswx_release
| 1.53125 | 2 |
06_lists2_indexing.py | vierth/humanitiesTutorial | 18 | 12759177 | # Lists always stay in the same order, so you can get
# information out very easily. List indexes act very similar
# to string indexs
intList = [1, 2, 3, 4, 5]
# Get the first item
print(intList[0])
# Get the last item
print(intList[-1])
# Alternatively:
print(intList[len(intList) - 1])
# Get the 2nd to 4th items
print(intList[1:5])
# Instead of find, lists have "index"
print(intList.index(2)) | 4.25 | 4 |
trashy_rm.py | EvanDurfee/trashy_rm | 0 | 12759178 | <reponame>EvanDurfee/trashy_rm
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage: trashy_rm [OPTION].. [FILE]...
Interactive file remover for safe deletion and recycling
Options:
-f, --force Ignore nonexistent files and arguments, never prompt
-r, --recursive Remove directories and their contents recursively
-d, --dir Remove empty directories
-c, --recycle Move the files to the trash / recycle bin
--direct Standard rm files even if they're in the trash path
-s, --shred Shred the files for secure deletion
-i Prompt before removal for every file
-I Prompt once before removing more than CUTOFF files, or when
removing recursively. This is the default mode.
--interactive[=WHEN] Prompt according to WHEN: never, once (-I), or
always (-i). Without WHEN, prompt always.
-v, --verbose Explain what is being done
-h, --help Display this text and exit
--version Output version information and exit
-- Treats all following arguments as file names
- Read file names from stdin
Trashy rm defaults to '--interactive=once' (-I) mode, prompting the user if
more than CUTOFF (defaults to 3) files and directories will be removed. An
attempt is made to provide a useful overview of what files and directories will
be removed before prompting y/n.
Trashy rm also supports different removal modes.
Recycle mode (-c or --recycle) will move the given files to the best matching
trash / recycle bin for the device and OS.
Normal mode will unlink the files via standard rm. If a file resides in the
configured trash path, the file will instead be moved to the trash.
Direct mode (--direct) will unlink files via standard rm, ignoring the trash
path.
Shred mode (-s, --shred) will use shred / gshred to more securely delete a
file, bypassing the other removal modes. Shred mode will attempt to
repeatedly overwrite the file bytes for more secure deletion than rm, which
leaves the data intact but unlinked. Shred's effectiveness is limited by
filesystem type (e.g. journaling or Copy On Write) and device behavior; see
GNU shred's documentation for more information
See man trashy_rm(1) for configuration information
"""
# TODO: Might be useful in a man page / README later
# General purpose file remover wrapping standard rm, mv, and shred commands to
# handle unlinking, recycling, and safe deletion of files and directories. Also
# provides different interactive modes to get a preview of what files will be
# affected.
import configparser
import os
import platform
import sys
from enum import Enum
from subprocess import call
from typing import List
DEFAULT_CUTOFF = 3
class InteractiveMode(Enum):
NEVER = 1
NORMAL = 2
ALWAYS = 3
class TrashMode(Enum):
"""Mode for trashing / recycling files
NORMAL will move items to trash if they are in a trash dir specified in the
config, otherwise normal rm will be used
"""
NEVER = 1
NORMAL = 2
ALWAYS = 3
class AppConfig:
"""Defines the configuration of trashy_rm itself"""
def __init__(self, cutoff=DEFAULT_CUTOFF, trashy_dirs=None):
# number of files / dirs to remove before a prompt is given
if trashy_dirs is None:
trashy_dirs = list()
self.cutoff = cutoff
# directories in which to default to trash instead of rm
self.trashy_dirs = trashy_dirs
class ExecConfig:
"""Defines the configuration for this trashy_rm execution"""
# Show help prompt and exit
help = False
# Return the current trash dir and exit
get_trash = False
# get version and exit
version = False
# Never prompt, ignore non-existent files and arguments
force = False
# Prompt mode (never, once / normal, or always
interactive_mode = InteractiveMode.NORMAL
# handle empty directories
handle_dirs = False
# Recurse though directories
recursive = False
# Root preservation (off, /, all) not supported at this time
# preservation = None
# Verbosity
verbose = False
# Enable trash / recycle mode (default is to recycle items in the configured dirs, rm elsewhere)
trash_mode = TrashMode.NORMAL
# Shred (overrides recycle)
shred = False
# Dry run, do not modify the file system
dry_run = False
# Read targets from the input stream
read_targets = False
# target files / dirs
targets = []
def load_app_config(file_names: List[str]) -> AppConfig:
parser = configparser.ConfigParser()
conf = AppConfig()
for file_name in file_names:
parser.read(file_name)
cutoff = parser.getint('prompt', 'cutoff', fallback=None)
if cutoff is not None:
conf.cutoff = cutoff
if parser.has_section('trash_path'):
conf.trashy_dirs += [os.path.expanduser(os.path.expandvars(path)) for _, path in parser.items('trash_path')]
return conf
class OptParseError(Exception):
"""Exception indicating an invalid command line argument"""
def parse_opts(opts: List[str]) -> ExecConfig:
"""Parse command line arguments"""
conf = ExecConfig()
it = iter(opts)
while True:
try:
opt = next(it)
if opt == '--':
# Everything after this is a file
conf.targets += it
elif opt == '-':
# We should read targets from the input stream as well
conf.read_targets = True
elif not opt.startswith('-'):
conf.targets.append(opt)
else:
# Start by removing the leading '-' and splitting short opts into a list of characters
for o in [opt[1:]] if opt.startswith('--') else list(opt[1:]):
# Handle the standard rm args first
if o == 'f' or o == '-force':
conf.force = True
elif o == 'd' or o == '-dir':
conf.handle_dirs = True
elif o == 'r' or o == '-recursive':
conf.recursive = True
elif o == 'v' or o == '-verbose':
conf.verbose = True
elif o == 'i' or o == '-interactive' or o == '-interactive=always' or o == '-interactive=yes':
conf.interactive_mode = InteractiveMode.ALWAYS
elif o == 'I' or o == '-interactive=once':
conf.interactive_mode = InteractiveMode.NORMAL
elif o == '-interactive=never' or o == '-interactive=no' or o == '-interactive=none':
conf.interactive_mode = InteractiveMode.NEVER
# TODO: preserve-root flags?
# Handle trashy_rm args
elif o == 'h' or o == '-help':
# Stop processing if we see a help flag
conf.help = True
raise StopIteration()
elif o == '-get-trash':
# Stop processing if we see a get-trash flag
conf.get_trash = True
raise StopIteration()
elif o == 'c' or o == '-recycle':
conf.trash_mode = TrashMode.ALWAYS
elif o == '-direct':
conf.trash_mode = TrashMode.NEVER
elif o == 's' or o == '-shred':
conf.shred = True
elif o == '-dryrun':
conf.dry_run = True
# sys.stderr.write('Dry Run. Not actually removing files.\n\n')
else:
raise OptParseError('careful_rm: invalid option -- \'' + o + '\'')
except StopIteration:
break
return conf
class UnsupportedSystemError(Exception):
"""Exception indicating the system is not supported by trashy rm"""
class LinuxSystemInfo:
def __int__(self):
self.uid = os.getuid()
self.configs = LinuxSystemInfo.get_configs()
self.user_trash = LinuxSystemInfo.get_user_trash()
self.shredder = LinuxSystemInfo.get_shredder()
@classmethod
def get_user_trash(cls) -> str:
xdg_data_home = os.path.expanduser(os.path.expandvars(os.getenv('XDG_DATA_HOME', '~/.local/share')))
user_trash = os.path.join(xdg_data_home, 'Trash')
return user_trash
@classmethod
def get_configs(cls) -> List[str]:
xdg_config_home = os.path.expanduser(os.path.expandvars(os.getenv('XDG_CONFIG_HOME', '~/.config')))
user_config = os.path.join(xdg_config_home, 'trashy_rm', 'config')
return [user_config] if os.path.isfile(user_config) else []
@classmethod
def get_shredder(cls) -> str:
# File shredding
if call('hash shred 2>/dev/null', shell=True) == 0:
shredder = 'shred'
elif call('hash gshred 2>/dev/null', shell=True) == 0:
shredder = 'gshred'
else:
shredder = None
return shredder
def get_trash_dir(self, target):
"""Get the best matching trash directory for the target file / dir"""
raise NotImplementedError()
def get_system_info():
system_type = platform.system()
if system_type == 'Linux':
return LinuxSystemInfo()
elif system_type == 'Darwin':
# TODO: get support from someone with an Apple Macintosh
raise UnsupportedSystemError("Support for Darwin systems not implemented")
else:
# TODO: BSD should be usable as well
raise UnsupportedSystemError("Unsupported system: " + system_type)
def run(sys_info, conf: AppConfig, opts: ExecConfig,
inp=sys.stdin, out=sys.stdout, err=sys.stderr) -> int:
"""Run trashy rm with the given configurations"""
if opts.help:
out.write(__doc__)
return 0
else:
raise NotImplementedError()
def main() -> int:
"""trashy_rm run harness"""
# TODO: handle and test exceptions
sys_info = get_system_info()
opts = parse_opts(sys.argv)
conf = load_app_config(sys_info.configs)
return run(sys_info, conf, opts)
if __name__ == '__main__':
sys.exit(main())
| 3.5 | 4 |
mpa/modules/datasets/composed_dataloader.py | openvinotoolkit/model_preparation_algorithm | 0 | 12759179 | # Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from mpa.utils.logger import get_logger
logger = get_logger()
class CDLIterator:
def __init__(self, cdl):
self._cdl = cdl
self._index = 0
self._cdl_iter = [iter(dl) for dl in self._cdl.loaders]
def __next__(self):
if self._index < self._cdl.max_iter:
batches = {}
for i, it in enumerate(self._cdl_iter):
if i == 0:
batches = next(it)
else:
try:
batches[f'extra_{i-1}'] = next(it)
except StopIteration:
self._cdl_iter[1] = iter(self._cdl.loaders[1])
batches[f'extra_{i-1}'] = next(self._cdl_iter[1])
self._index += 1
return batches
raise StopIteration
class ComposedDL(object):
class DummySampler(object):
''' dummy sampler class to relay set_epoch() call to the
list of data loaders in the CDL
'''
def __init__(self, cdl):
self.cdl = cdl
def set_epoch(self, epoch):
loaders = self.cdl.loaders
for loader in loaders:
loader.sampler.set_epoch(epoch)
def __init__(self, loaders=[]):
self.loaders = loaders
self.max_iter = len(self.loaders[0])
logger.info(f'possible max iterations = {self.max_iter}')
self._sampler = ComposedDL.DummySampler(self)
def __len__(self):
return self.max_iter
def __iter__(self):
return CDLIterator(self)
@property
def sampler(self):
return self._sampler
| 2.25 | 2 |
Distance2Self.py | APERIM-EU/WP3-Distance2Self | 1 | 12759180 | #!/usr/bin/env python
"""
Command line tool for distance 2 self calculation
"""
import sys
import argparse
import csv
import logging
import os
import pandas
import itertools as itr
from Distance2SelfBinding import Distance2Self
from DistanceMatrix import DistanceMatrix
from Fred2.Core import Allele
def read_hla_input(input, hla_header):
"""
reads in the hla file
header are defined as:
:param hla_file:
:return: list(Allele)
"""
return map(Allele, set(pandas.DataFrame.from_csv(input, sep="\t", index_col=False)[hla_header]))
def load_blossum(blos):
"""
loads a BLOSUM matrix
:param str blos: Specifeis the BLOSUm matrix to lead
:return: dict(str, dict(str, float)) - A BLOSUM1 matrix
"""
try:
mod = __import__('DistanceMatrices', fromlist=[blos])
return getattr(mod, blos)
except:
mod = __import__('DistanceMatrices', fromlist=["BLOSUM50_distances"])
return DistanceMatrix(getattr(mod, "BLOSUM50_distances"))
def main():
parser = argparse.ArgumentParser(
description="Distance to self calculation",
)
subparsers = parser.add_subparsers(help='Distance2Self offers two sub-command', dest="sub_command")
parser_gen = subparsers.add_parser('generate',
help='Command lets you generate an distance trie based on a provided peptide list')
parser_gen.add_argument("-i", "--input",
required=True,
type=str,
help="Peptide with immunogenicity file (from epitopeprediction)",
)
parser_gen.add_argument("-s", "--sequence",
required=False,
default="neopeptide",
type=str,
help="The columns name of the peptide sequences",
)
parser_gen.add_argument("-o", "--output",
required=True,
type=str,
help="Specifies the output path. Results will be written to CSV",
)
parser_gen.add_argument("-b", "--blosum",
required=False,
default="BLOSUM50",
type=str,
help="Specifies BLOSUM distance matrix (default BLOSUM50; available BLOSUM45, BLOSUM90)",
)
#Prediction sub-command
parser_pred = subparsers.add_parser('predict',
help='Command calculates the distance to self for a provided list of peptides')
parser_pred.add_argument("-t", "--trie",
required=False,
default=None,
type=str,
help="Specifies a custom distance trie to use"
)
parser_pred.add_argument("-s", "--sequence",
required=False,
default="neopeptide",
type=str,
help="The columns name of the peptide sequences",
)
parser_pred.add_argument("-k", "--k",
required=False,
default=1,
type=int,
help="Specifies the number of closest self-peptides to find"
)
parser_pred.add_argument("-b", "--blosum",
required=False,
default="BLOSUM50",
type=str,
help="Specifies BLOSUm distance matrix (default BLOSUM50; available BLOSUM45, BLOSUM90)",
)
parser_pred.add_argument("-a", "--alleles",
required=False,
default="HLA",
type=str,
help="Specifies the HLA allele column header of the peptide input file",
)
parser_pred.add_argument("-i", "--input",
required=True,
type=str,
help="Peptide with immunogenicity file (from epitopeprediction)",
)
parser_pred.add_argument("-o", "--output",
required=True,
type=str,
help="Specifies the output path. Results will be written to CSV",
)
args = parser.parse_args()
blos = load_blossum("{blos}_distance".format(blos=args.blosum.strip().upper()))
dist2self = Distance2Self(blos,saveTrieFile=True)
df = pandas.DataFrame.from_csv(args.input, sep="\t", index_col=False)
peps = list(set(df[args.sequence]))
if args.sub_command == "generate":
peps.sort(key=len)
for plength, peps in itr.groupby(peps, key=len):
dist2self.generate_trie(peps, peptideLength=plength, outfile="{path}_l{peplength}.trie".format(
path=os.path.splitext(args.output)[0],
peplength=plength))
else:
peps.sort(key=len)
for plength, peps in itr.groupby(peps, key=len):
alleles = read_hla_input(args.input, args.alleles)
pathToTrie = args.trie if args.trie is not None and os.path.isfile(args.trie) else None
res = dist2self.calculate_distances(peps, alleles=alleles, hla_header=args.alleles, pep_header=args.sequence,
pathToTrie=pathToTrie, n=args.k)
merged = pandas.merge(df, res, how="outer",on=[args.sequence,args.alleles])
merged.to_csv(args.output, sep="\t",index=False)
if __name__ == "__main__":
sys.exit(main())
| 3.234375 | 3 |
packages/w3af/w3af/core/ui/api/resources/index.py | ZooAtmosphereGroup/HelloPackages | 3 | 12759181 | """
index.py
Copyright 2015 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from w3af.core.ui.api import app
from w3af.core.ui.api.utils.auth import requires_auth
from flask import jsonify
@app.route('/', methods=['GET'])
@requires_auth
def index():
return jsonify({'docs': 'http://docs.w3af.org/en/latest/api/index.html'})
| 1.46875 | 1 |
src/oci/log_analytics/models/log_analytics_em_bridge_summary_report.py | Manny27nyc/oci-python-sdk | 249 | 12759182 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class LogAnalyticsEmBridgeSummaryReport(object):
"""
Log-Analytics EM Bridge counts summary.
"""
def __init__(self, **kwargs):
"""
Initializes a new LogAnalyticsEmBridgeSummaryReport object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param compartment_id:
The value to assign to the compartment_id property of this LogAnalyticsEmBridgeSummaryReport.
:type compartment_id: str
:param active_em_bridge_count:
The value to assign to the active_em_bridge_count property of this LogAnalyticsEmBridgeSummaryReport.
:type active_em_bridge_count: int
:param creating_em_bridge_count:
The value to assign to the creating_em_bridge_count property of this LogAnalyticsEmBridgeSummaryReport.
:type creating_em_bridge_count: int
:param needs_attention_em_bridge_count:
The value to assign to the needs_attention_em_bridge_count property of this LogAnalyticsEmBridgeSummaryReport.
:type needs_attention_em_bridge_count: int
:param deleted_em_bridge_count:
The value to assign to the deleted_em_bridge_count property of this LogAnalyticsEmBridgeSummaryReport.
:type deleted_em_bridge_count: int
:param total_em_bridge_count:
The value to assign to the total_em_bridge_count property of this LogAnalyticsEmBridgeSummaryReport.
:type total_em_bridge_count: int
"""
self.swagger_types = {
'compartment_id': 'str',
'active_em_bridge_count': 'int',
'creating_em_bridge_count': 'int',
'needs_attention_em_bridge_count': 'int',
'deleted_em_bridge_count': 'int',
'total_em_bridge_count': 'int'
}
self.attribute_map = {
'compartment_id': 'compartmentId',
'active_em_bridge_count': 'activeEmBridgeCount',
'creating_em_bridge_count': 'creatingEmBridgeCount',
'needs_attention_em_bridge_count': 'needsAttentionEmBridgeCount',
'deleted_em_bridge_count': 'deletedEmBridgeCount',
'total_em_bridge_count': 'totalEmBridgeCount'
}
self._compartment_id = None
self._active_em_bridge_count = None
self._creating_em_bridge_count = None
self._needs_attention_em_bridge_count = None
self._deleted_em_bridge_count = None
self._total_em_bridge_count = None
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this LogAnalyticsEmBridgeSummaryReport.
Compartment Identifier `OCID]`__.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this LogAnalyticsEmBridgeSummaryReport.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this LogAnalyticsEmBridgeSummaryReport.
Compartment Identifier `OCID]`__.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this LogAnalyticsEmBridgeSummaryReport.
:type: str
"""
self._compartment_id = compartment_id
@property
def active_em_bridge_count(self):
"""
**[Required]** Gets the active_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Total number of ACTIVE enterprise manager bridges.
:return: The active_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:rtype: int
"""
return self._active_em_bridge_count
@active_em_bridge_count.setter
def active_em_bridge_count(self, active_em_bridge_count):
"""
Sets the active_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Total number of ACTIVE enterprise manager bridges.
:param active_em_bridge_count: The active_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:type: int
"""
self._active_em_bridge_count = active_em_bridge_count
@property
def creating_em_bridge_count(self):
"""
**[Required]** Gets the creating_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Number of enterprise manager bridges in CREATING state.
:return: The creating_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:rtype: int
"""
return self._creating_em_bridge_count
@creating_em_bridge_count.setter
def creating_em_bridge_count(self, creating_em_bridge_count):
"""
Sets the creating_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Number of enterprise manager bridges in CREATING state.
:param creating_em_bridge_count: The creating_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:type: int
"""
self._creating_em_bridge_count = creating_em_bridge_count
@property
def needs_attention_em_bridge_count(self):
"""
**[Required]** Gets the needs_attention_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Number of enterprise manager bridges in NEEDS_ATTENTION state.
:return: The needs_attention_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:rtype: int
"""
return self._needs_attention_em_bridge_count
@needs_attention_em_bridge_count.setter
def needs_attention_em_bridge_count(self, needs_attention_em_bridge_count):
"""
Sets the needs_attention_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Number of enterprise manager bridges in NEEDS_ATTENTION state.
:param needs_attention_em_bridge_count: The needs_attention_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:type: int
"""
self._needs_attention_em_bridge_count = needs_attention_em_bridge_count
@property
def deleted_em_bridge_count(self):
"""
**[Required]** Gets the deleted_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Number of enterprise manager bridges in DELETED state.
:return: The deleted_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:rtype: int
"""
return self._deleted_em_bridge_count
@deleted_em_bridge_count.setter
def deleted_em_bridge_count(self, deleted_em_bridge_count):
"""
Sets the deleted_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Number of enterprise manager bridges in DELETED state.
:param deleted_em_bridge_count: The deleted_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:type: int
"""
self._deleted_em_bridge_count = deleted_em_bridge_count
@property
def total_em_bridge_count(self):
"""
**[Required]** Gets the total_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Total number of enterprise manager bridges.
:return: The total_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:rtype: int
"""
return self._total_em_bridge_count
@total_em_bridge_count.setter
def total_em_bridge_count(self, total_em_bridge_count):
"""
Sets the total_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Total number of enterprise manager bridges.
:param total_em_bridge_count: The total_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:type: int
"""
self._total_em_bridge_count = total_em_bridge_count
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 2.109375 | 2 |
tpcx_bb/xbb_tools/cupy_metrics.py | williamBlazing/gpu-bdb | 0 | 12759183 | <filename>tpcx_bb/xbb_tools/cupy_metrics.py
#
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cupy as cp
def cupy_conf_mat(y, y_pred):
"""
Simple, fast confusion matrix for two class models designed to match sklearn.
Assumes the classes are one of [0, 1]. It will fail edge cases, which are fairly
numerous.
Implementation taken from rapidsai/cuml#1524
"""
nclasses = len(cp.unique(y))
assert nclasses == 2
res = cp.zeros((2, 2))
pos_pred_ix = cp.where(y_pred == 1)
neg_pred_ix = cp.where(y_pred != 1)
tn_sum = (y[neg_pred_ix] == 0).sum()
fn_sum = (y[neg_pred_ix] == 1).sum()
tp_sum = (y[pos_pred_ix] == 1).sum()
fp_sum = (y[pos_pred_ix] == 0).sum()
res[0, 0] = tn_sum
res[1, 0] = fn_sum
res[0, 1] = fp_sum
res[1, 1] = tp_sum
return res
def cupy_precision_score(y, y_pred):
"""
Simple precision score method for two class models.
It is assumed that the positive class has label 1.
Implementation taken from rapidsai/cuml#1522
"""
pos_pred_ix = cp.where(y_pred == 1)
tp_sum = (y_pred[pos_pred_ix] == y[pos_pred_ix]).sum()
fp_sum = (y_pred[pos_pred_ix] != y[pos_pred_ix]).sum()
return (tp_sum / (tp_sum + fp_sum)).item()
| 2.0625 | 2 |
mt940/models.py | pretix/mt940 | 0 | 12759184 | import re
import decimal
import datetime
import collections
from . import processors
from . import _compat
import mt940
class Model(object):
pass
class Date(datetime.date, Model):
'''Just a regular date object which supports dates given as strings
Args:
year (str): The year (0-100), will automatically add 2000 when needed
month (str): The month
day (str): The day
'''
def __new__(cls, *args, **kwargs):
if kwargs:
year = kwargs.get('year')
month = kwargs.get('month')
day = kwargs.get('day')
year = int(year, 10)
if year < 1000:
year += 2000
month = int(month, 10)
day = int(day, 10)
return datetime.date.__new__(cls, year, month, day)
else:
# For pickling the date object uses it's own binary format
# No need to do anything special there :)
return datetime.date.__new__(cls, *args, **kwargs)
class Amount(Model):
'''Amount object containing currency and amount
Args:
amount (str): Amount using either a , or a . as decimal separator
status (str): Either C or D for credit or debit respectively
currency (str): A 3 letter currency (e.g. EUR)
>>> Amount('123.45', 'C', 'EUR')
<123.45 EUR>
>>> Amount('123.45', 'D', 'EUR')
<-123.45 EUR>
'''
def __init__(self, amount, status, currency=None, **kwargs):
self.amount = decimal.Decimal(amount.replace(',', '.'))
self.currency = currency
# C = credit, D = debit
if status == 'D':
self.amount = -self.amount
def __repr__(self):
return '<%s %s>' % (
self.amount,
self.currency,
)
class Balance(Model):
'''Parse balance statement
Args:
status (str): Either C or D for credit or debit respectively
amount (Amount): Object containing the amount and currency
date (date): The balance date
>>> balance = Balance('C', '0.00', Date(2010, 7, 22))
>>> balance.status
'C'
>>> balance.amount.amount
Decimal('0.00')
>>> isinstance(balance.date, Date)
True
>>> balance.date.year, balance.date.month, balance.date.day
(2010, 7, 22)
>>> Balance()
<None @ None>
'''
def __init__(self, status=None, amount=None, date=None, **kwargs):
if amount and not isinstance(amount, Amount):
amount = Amount(amount, status, kwargs.get('currency'))
self.status = status
self.amount = amount
self.date = date
def __repr__(self):
return '<%s>' % self
def __str__(self):
return '%s @ %s' % (
self.amount,
self.date,
)
class Transactions(collections.Sequence):
'''
Collection of :py:class:`Transaction` objects with global properties such
as begin and end balance
'''
#: Using the processors you can pre-process data before creating objects
#: and modify them after creating the objects
DEFAULT_PROCESSORS = dict(
pre_account_identification=[],
post_account_identification=[],
pre_available_balance=[],
post_available_balance=[],
pre_closing_balance=[],
post_closing_balance=[],
pre_intermediate_closing_balance=[],
post_intermediate_closing_balance=[],
pre_final_closing_balance=[],
post_final_closing_balance=[],
pre_forward_available_balance=[],
post_forward_available_balance=[],
pre_opening_balance=[],
post_opening_balance=[],
pre_intermediate_opening_balance=[],
post_intermediate_opening_balance=[],
pre_final_opening_balance=[],
post_final_opening_balance=[],
pre_related_reference=[],
post_related_reference=[],
pre_statement=[],
post_statement=[processors.date_cleanup_post_processor],
pre_statement_number=[],
post_statement_number=[],
pre_transaction_details=[],
post_transaction_details=[],
pre_transaction_reference_number=[],
post_transaction_reference_number=[],
)
def __init__(self, processors=None):
self.processors = self.DEFAULT_PROCESSORS.copy()
if processors:
self.processors.update(processors)
self.transactions = []
self.data = {}
@property
def currency(self):
balance = mt940.utils.coalesce(
self.data.get('final_opening_balance'),
self.data.get('opening_balance'),
self.data.get('intermediate_opening_balance'),
self.data.get('available_balance'),
self.data.get('forward_available_balance'),
self.data.get('final_closing_balance'),
self.data.get('closing_balance'),
self.data.get('intermediate_closing_balance'),
)
if balance:
return balance.amount.currency
def parse(self, data):
'''Parses mt940 data, expects a string with data
Args:
data (str): The MT940 data
Returns: :py:class:`list` of :py:class:`Transaction`
'''
# We don't like carriage returns in case of Windows files so let's just
# replace them with nothing
data = data.replace('\r', '')
# The pattern is a bit annoying to match by regex, even with a greedy
# match it's difficult to get both the beginning and the end so we're
# working around it in a safer way to get everything.
tag_re = re.compile(
r'^:(?P<full_tag>(?P<tag>[0-9]{2})(?P<sub_tag>[A-Z])?):',
re.MULTILINE)
matches = list(tag_re.finditer(data))
transaction = Transaction(self)
self.transactions.append(transaction)
for i, match in enumerate(matches):
tag_id = int(match.group('tag'))
assert tag_id in mt940.tags.TAG_BY_ID, 'Unknown tag %r' \
'in line: %r' % (tag_id, match.group(0))
tag = mt940.tags.TAG_BY_ID.get(match.group('full_tag')) \
or mt940.tags.TAG_BY_ID[tag_id]
# Nice trick to get all the text that is part of this tag, python
# regex matches have a `end()` and `start()` to indicate the start
# and end index of the match.
if matches[i + 1:]:
tag_data = data[match.end():matches[i + 1].start()].strip()
else:
tag_data = data[match.end():].strip()
tag_dict = tag.parse(self, tag_data)
# Preprocess data before creating the object
for processor in self.processors.get('pre_%s' % tag.slug):
tag_dict = processor(self, tag, tag_dict)
result = tag(self, tag_dict)
# Postprocess the object
for processor in self.processors.get('post_%s' % tag.slug):
result = processor(self, tag, tag_dict, result)
if isinstance(tag, mt940.tags.Statement):
if transaction.data.get('id'):
transaction = Transaction(self, result)
self.transactions.append(transaction)
else:
transaction.data.update(result)
elif tag.scope is Transaction:
# Combine multiple results together as one string, Rabobank has
# multiple :86: tags for a single transaction
for k, v in _compat.iteritems(result):
if k in transaction.data:
transaction.data[k] += '\n%s' % v.strip()
else:
transaction.data[k] = v
elif tag.scope is Transactions: # pragma: no branch
self.data.update(result)
return self.transactions
def __getitem__(self, key):
return self.transactions[key]
def __len__(self):
return len(self.transactions)
def __repr__(self):
return '<%s[%s]>' % (
self.__class__.__name__,
']['.join('%s: %s' % (k.replace('_balance', ''), v)
for k, v in _compat.iteritems(self.data)
if k.endswith('balance'))
)
class Transaction(Model):
def __init__(self, transactions, data=None):
self.transactions = transactions
self.data = {}
self.update(data)
def update(self, data):
if data:
self.data.update(data)
def __repr__(self):
return '<%s[%s] %s>' % (
self.__class__.__name__,
self.data.get('date'),
self.data.get('amount'),
)
| 3.390625 | 3 |
stressTest/testManager.py | bhill-slac/epics-stress-tests | 0 | 12759185 | #!/usr/bin/env python
# Name: testManager.py
# Abstract:
# A python tool to launch and manage EPICS CA and PVA stress tests
# Uses threading and paramiko ssh transport to run needed clients and servers on
# each host machine which will be used in the test.
#
# Example:
# stressTest/testManager.py --testDir /path/to/test/top --testName yourTestName
#
# Requested features to be added:
#
#==============================================================
from __future__ import print_function
import argparse
import concurrent.futures
import io
import datetime
import glob
import locale
import os
import re
import pprint
#import paramiko
#import procServUtils
import signal
import socket
import string
import subprocess
import sys
import tempfile
import textwrap
import threading
import time
procList = []
activeTests = []
testFutures = {}
testExecutor = None
testDir = None
def makePrintable( rawOutput ):
if isinstance( rawOutput, str ) and rawOutput.startswith( "b'" ):
rawOutput = eval(rawOutput)
if isinstance( rawOutput, bytes ):
rawOutput = rawOutput.decode()
if isinstance( rawOutput, list ):
filtered = []
for line in rawOutput:
filtered.append( makePrintable( line ) )
return filtered
if not isinstance( rawOutput, str ):
return str(rawOutput)
# Filter string for printable characters
printable = string.printable.replace( '\r', '' )
return ''.join(c for c in rawOutput if c in printable )
def getDateTimeFromFile( filePath ):
dateTime = None
try:
with open( filePath, 'r' ) as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if len(line.strip()) == 0:
continue
dateTime = datetime.datetime.strptime( line, "%a %b %d %H:%M:%S %Z %Y" )
break
except:
pass
#if dateTime:
# print( "file %s dateTime: %s" % ( filePath, dateTime ) )
return dateTime
class StressTest(object):
'''class StressTest( pathToTestTop )
Path must contain ...
'''
def __init__( self, pathToTestTop ):
self._pathToTestTop = pathToTestTop
self._clientList = []
self._testDuration = None
self._startTest = None
def startTest( self ):
self._startTest = datetime.datetime.now()
print( "Start: %s at %s" % ( self._pathToTestTop, self._startTest.strftime("%c") ) )
try:
# Remove any stale stopTest file
os.remove( os.path.join( self._pathToTestTop, "stopTest" ) )
except:
pass
def stopTest( self ):
print( "Stop: %s" % self._pathToTestTop )
activeTests.remove( self )
def monitorTest( self ):
print( "Monitor: %s" % self._pathToTestTop )
stopTime = self.getStopTime()
if stopTime:
currentTime = datetime.datetime.now()
if currentTime > stopTime:
self.stopTest()
def getTestTop( self ):
return self._pathToTestTop
def getTestDuration( self ):
return self._testDuration
def getStopTime( self ):
stopTime = getDateTimeFromFile( os.path.join( self._pathToTestTop, "stopTest" ) )
if self._testDuration is not None:
schedStop = self._startTest + datetime.timedelta( seconds=self._testDuration )
if not stopTime or stopTime > schedStop:
stopTime = schedStop
#if stopTime:
# print( "test %s stopTime: %s" % ( self._pathToTestTop, stopTime ) )
return stopTime
def isActiveTest( pathToTestTop ):
for test in activeTests:
if pathToTestTop == test.getTestTop():
return True
return False
def checkStartTest( startTestPath, options ):
stressTestTop = os.path.split( startTestPath )[0]
if isActiveTest( stressTestTop ):
return
#print( "checkStartTime( %s )" % ( startTestPath ) )
currentTime = datetime.datetime.now()
startTime = getDateTimeFromFile( startTestPath )
if startTime is None:
return
timeSinceStart = currentTime - startTime
if timeSinceStart.total_seconds() > 2:
#print( "checkStartTime( %s ) was %d seconds ago." % ( startTestPath, timeSinceStart.total_seconds() ) )
return
stressTest = StressTest( stressTestTop )
activeTests.append( stressTest )
stressTest.startTest()
return
# Pre-compile regular expressions for speed
macroDefRegExp = re.compile( r"^\s*([a-zA-Z0-9_]*)\s*=\s*(\S*)\s*$" )
macroDefQuotedRegExp = re.compile( r"^\s*([a-zA-Z0-9_]*)\s*=\s*'([^']*)'\s*$" )
macroDefDQuotedRegExp = re.compile( r'^\s*([a-zA-Z0-9_]*)\s*=\s*"([^"]*)"\s*$' )
macroRefRegExp = re.compile( r"^([^\$]*)\$([a-zA-Z0-9_]+)(.*)$" )
def expandMacros( strWithMacros, macroDict ):
#print( "expandMacros(%s)" % strWithMacros )
global macroRefRegExp
if type(strWithMacros) is list:
expandedStrList = []
for unexpandedStr in strWithMacros:
expandedStr = expandMacros( unexpandedStr, macroDict )
expandedStrList += [ expandedStr ]
return expandedStrList
while True:
macroMatch = macroRefRegExp.search( strWithMacros )
if not macroMatch:
break
macroName = macroMatch.group(2)
if macroName in macroDict:
# Expand this macro and continue
strWithMacros = macroMatch.group(1) + macroDict[macroName] + macroMatch.group(3)
#print( "expandMacros: Expanded %s in %s ..." % ( macroName, strWithMacros ) )
continue
# Check for other macros in the string
return macroMatch.group(1) + '$' + macroMatch.group(2) + expandMacros( macroMatch.group(3), macroDict )
return strWithMacros
def hasMacros( strWithMacros ):
global macroRefRegExp
macrosFound = False
if type(strWithMacros) is list:
for unexpandedStr in strWithMacros:
if ( hasMacros( unexpandedStr ) ):
macrosFound = True
return macrosFound
if macroRefRegExp.search( strWithMacros ) is not None:
macrosFound = True
return macrosFound
def getClientConfig( config, clientName ):
for c in config.get('servers'):
if c.get('CLIENT_NAME') == clientName:
return c
for c in config.get('clients'):
if c.get('CLIENT_NAME') == clientName:
return c
return None
def getEnvFromFile( fileName, env, verbose=False ):
if verbose:
print( "getEnvFromFile: %s" % fileName )
try:
with open( fileName, 'r' ) as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if line.startswith('#'):
continue
match = ( macroDefRegExp.search(line) or \
macroDefQuotedRegExp.search(line) or \
macroDefDQuotedRegExp.search(line) )
if not match:
continue
macroName = match.group(1)
macroValue = match.group(2)
env[macroName] = macroValue
if verbose:
print( "getEnvFromFile: %s = %s" % ( macroName, macroValue ) )
except:
pass
return env
def readClientConfig( clientConfig, clientName, verbose=False ):
'''Duplicates the readIfFound env handling in launch_client.sh.'''
clientConfig[ 'CLIENT_NAME' ] = clientName
SCRIPTDIR = clientConfig[ 'SCRIPTDIR' ]
testTop = clientConfig[ 'TEST_TOP' ]
getEnvFromFile( os.path.join( SCRIPTDIR, 'stressTestDefault.env' ), clientConfig, verbose=verbose )
getEnvFromFile( os.path.join( SCRIPTDIR, 'stressTestDefault.env.local' ), clientConfig, verbose=verbose )
getEnvFromFile( os.path.join( testTop, '..', 'siteDefault.env' ), clientConfig, verbose=verbose )
getEnvFromFile( os.path.join( testTop, 'siteDefault.env' ), clientConfig, verbose=verbose )
#getEnvFromFile( os.path.join( TEST_HOST_DIR, 'host.env' ), clientConfig, verbose=verbose )
getEnvFromFile( os.path.join( testTop, 'test.env' ), clientConfig, verbose=verbose )
# Read env from clientName.env to get TEST_APPTYPE
if 'TEST_APPTYPE' in clientConfig:
print( "TODO: TEST_APPTYPE %s already defined in %s clientConfig!" % ( clientConfig['TEST_APPTYPE'], clientName ) )
else:
getEnvFromFile( os.path.join( testTop, clientName + '.env' ), clientConfig, verbose=verbose )
if 'TEST_APPTYPE' in clientConfig:
getEnvFromFile( os.path.join( SCRIPTDIR, clientConfig['TEST_APPTYPE'] + 'Default.env' ), clientConfig, verbose=verbose )
# Reread env from clientName.env to override ${TEST_APPTYPE}Default.env
getEnvFromFile( os.path.join( testTop, clientName + '.env' ), clientConfig, verbose=verbose )
# Make sure PYPROC_ID isn't in the clientConfig so it doesn't get expanded
if 'PYPROC_ID' in clientConfig:
del clientConfig['PYPROC_ID']
# Expand macros in clientConfig
for key in clientConfig:
clientConfig[key] = expandMacros( clientConfig[key], clientConfig )
return clientConfig
def runRemote( *args, **kws ):
config = args[0]
clientName = args[1]
testTop = config[ 'TEST_TOP' ]
verbose = kws.get( 'verbose', False )
if verbose:
print( "runRemote client %s:" % clientName )
clientConfig = getClientConfig( config, clientName )
if not clientConfig:
print( "runRemote client %s unable to read test config!" % clientName )
return None
TEST_START_DELAY = clientConfig.get( 'TEST_START_DELAY', 0 )
if TEST_START_DELAY:
try:
TEST_START_DELAY = float(TEST_START_DELAY)
time.sleep( TEST_START_DELAY )
except ValueError:
print( "client %s config has invalid TEST_START_DELAY: %s" % ( clientName, TEST_START_DELAY ) )
else:
TEST_START_DELAY = 0.0
TEST_LAUNCHER = clientConfig.get('TEST_LAUNCHER')
TEST_LAUNCHER = expandMacros( TEST_LAUNCHER, clientConfig )
if hasMacros( TEST_LAUNCHER ):
print( "runRemote Error: TEST_LAUNCHER has unexpanded macros!\n\t%s\n" % TEST_LAUNCHER )
return
hostName = clientConfig.get('TEST_HOST')
if not hostName:
print( "runRemote Error: client %s TEST_HOST not specified!\n" % clientName )
return
cmdList = [ 'ssh', '-t', '-t', hostName ]
cmdList += TEST_LAUNCHER.split()
sshRemote = subprocess.Popen( cmdList, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE )
#sshRemote = subprocess.Popen( cmdList, stdin=None, stdout=subprocess.PIPE )
procList.append( sshRemote )
TEST_DURATION = clientConfig.get( 'TEST_DURATION' )
if TEST_DURATION:
try:
TEST_DURATION = float(TEST_DURATION)
print( "client %s sleeping for TEST_DURATION %f" % ( clientName, TEST_DURATION ), flush=True )
time.sleep( TEST_DURATION )
except ValueError:
print( "client %s config has invalid TEST_DURATION: %s" % ( clientName, TEST_DURATION ) )
print( "client %s terminate remote" % ( clientName ), flush=True )
#testRemote.stop()
sshRemote.terminate()
while True:
if verbose:
print( "client %s fetching output ...\r" % ( clientName ), flush=True )
try:
(out,err) = sshRemote.communicate( timeout=1 )
break
except subprocess.TimeoutExpired:
pass
print( "ssh client %s done." % ( clientName ), flush=True )
#print( "ssh output type is %s." % ( type(out) ), flush=True )
return makePrintable( out )
def generateGatewayPVLists( clientConfig, verbose=False ):
gwPrefix = clientConfig['TEST_GW_PREFIX']
testTop = clientConfig['TEST_TOP']
provider = clientConfig['TEST_PROVIDER']
gwPvList = []
if provider == 'pva':
gwPvList.append( gwPrefix + 'cache' )
gwPvList.append( gwPrefix + 'clients' )
gwPvList.append( gwPrefix + 'ds:byhost:rx' )
gwPvList.append( gwPrefix + 'ds:byhost:tx' )
gwPvList.append( gwPrefix + 'ds:bypv:rx' )
gwPvList.append( gwPrefix + 'ds:bypv:tx' )
gwPvList.append( gwPrefix + 'refs' )
gwPvList.append( gwPrefix + 'stats' )
gwPvList.append( gwPrefix + 'us:byhost:rx' )
gwPvList.append( gwPrefix + 'us:byhost:tx' )
gwPvList.append( gwPrefix + 'us:bypv:rx' )
gwPvList.append( gwPrefix + 'us:bypv:tx' )
elif provider == 'ca':
gwPvList.append( gwPrefix + 'vctotal' )
gwPvList.append( gwPrefix + 'pvtotal' )
gwPvList.append( gwPrefix + 'connected' )
gwPvList.append( gwPrefix + 'active' )
gwPvList.append( gwPrefix + 'inactive' )
gwPvList.append( gwPrefix + 'unconnected' )
gwPvList.append( gwPrefix + 'connecting' )
gwPvList.append( gwPrefix + 'disconnected' )
gwPvList.append( gwPrefix + 'dead' )
gwPvList.append( gwPrefix + 'clientEventRate' )
gwPvList.append( gwPrefix + 'clientPostRate' )
gwPvList.append( gwPrefix + 'existTestRate' )
gwPvList.append( gwPrefix + 'loopRate' )
gwPvList.append( gwPrefix + 'cpuFract' )
gwPvList.append( gwPrefix + 'load' )
gwPvList.append( gwPrefix + 'serverEventRate' )
gwPvList.append( gwPrefix + 'serverPostRate' )
else:
print( "generateGatewayPVLists: Invalid TEST_PROVIDER: %s" % provider )
return
clientHost = clientConfig.get( 'TEST_HOST' )
clientName = clientConfig.get( 'CLIENT_NAME' )
nClients = int( clientConfig.get( 'TEST_N_CLIENTS' ) )
clientPvFileName = os.path.join( testTop, clientHost, 'clients', '%s00' % ( clientName ), "pvs.list" )
os.makedirs( os.path.dirname( clientPvFileName ), mode=0o775, exist_ok=True )
print( "generateGatewayPVLists: Writing %d pvs to %s" % ( len(gwPvList), clientPvFileName ) )
with open( clientPvFileName, 'w' ) as f:
for pv in gwPvList:
f.write( "%s\n" % pv )
def generateClientPVLists( testTop, config, verbose=False ):
'''Create PV Lists for clients.'''
allCounterPvs = []
allCircBuffPvs = []
allRatePvs = []
servers = config.get( 'servers' )
for s in servers:
serverConfig = getClientConfig( config, s.get('CLIENT_NAME') )
pvPrefix = serverConfig[ 'TEST_PV_PREFIX' ]
serverHost = serverConfig[ 'TEST_HOST' ]
serverName = serverConfig[ 'CLIENT_NAME' ]
nCounters = int( serverConfig[ 'TEST_N_COUNTERS' ] )
nServers = int( serverConfig[ 'TEST_N_SERVERS' ] )
for iServer in range( nServers ):
# Generate list of Count and CircBuff PVs for each server
CounterPvs = [ "%s%02u:Count%02u" % ( pvPrefix, iServer, n ) for n in range( nCounters ) ]
CircBuffPvs = [ "%s%02u:CircBuff%02u" % ( pvPrefix, iServer, n ) for n in range( nCounters ) ]
RatePvs = [ "%s%02u:Rate%02u" % ( pvPrefix, iServer, n ) for n in range( nCounters ) ]
allCounterPvs += CounterPvs
allCircBuffPvs += CircBuffPvs
allRatePvs += RatePvs
# Write server pvs.list (not read by loadServer)
# Each loadServer instance gets it's PV's via $TEST_DB
serverPvFileName = os.path.join( testTop, serverHost, 'clients', '%s%02u' % ( serverName, iServer ), "pvs.list" )
os.makedirs( os.path.dirname( serverPvFileName ), mode=0o775, exist_ok=True )
if verbose:
print( "generateClientPVLists: Writing %d pvs to\n%s" % ( len(CounterPvs) *3, serverPvFileName ) )
with open( serverPvFileName, 'w' ) as f:
for pv in CounterPvs:
f.write( "%s\n" % pv )
for pv in CircBuffPvs:
f.write( "%s\n" % pv )
for pv in RatePvs:
f.write( "%s\n" % pv )
clients = config.get( 'clients' )
nPvs = len(allCounterPvs)
for clientConfig in clients:
appType = clientConfig.get( 'TEST_APPTYPE' )
if appType == 'pvGetGateway':
generateGatewayPVLists( clientConfig, verbose=False )
continue
clientHost = clientConfig[ 'TEST_HOST' ]
clientName = clientConfig[ 'CLIENT_NAME' ]
nClients = int( clientConfig[ 'TEST_N_CLIENTS' ] )
nClientsTotal = nClients * len(clients)
nPvPerClient = int( len(allCounterPvs) / nClients )
for iClient in range( nClients ):
if appType == 'pvGetArray':
clientPvList = allCircBuffPvs[ iClient : len(allCircBuffPvs) : nClients ]
else:
clientPvList = allCounterPvs[ iClient : len(allCounterPvs) : nClients ]
clientPvList += allRatePvs[ iClient : len(allRatePvs) : nClients ]
clientPvFileName = os.path.join( testTop, clientHost, 'clients', '%s%02u' % ( clientName, iClient ), "pvs.list" )
os.makedirs( os.path.dirname( clientPvFileName ), mode=0o775, exist_ok=True )
if verbose:
print( "generateClientPVLists: Writing %d of %d pvs to\n%s" % ( len(clientPvList), nPvs, clientPvFileName ) )
with open( clientPvFileName, 'w' ) as f:
for pv in clientPvList:
f.write( "%s\n" % pv )
return
def clientFetchResult( future ):
clientName = testFutures[future]
try:
clientResult = future.result()
except Exception as e:
print( "%s: Exception: %s" % ( clientName, e ) )
else:
print( "clientResult for %s:" % ( clientName ) )
if clientResult:
#print( "clientResult type is %s." % ( type(clientResult) ), flush=True )
#if isinstance( clientResult, str ) and clientResult.startswith( "b'" ):
# clientResult = eval(clientResult)
# print( "eval clientResult type is %s." % ( type(clientResult) ), flush=True )
#if isinstance( clientResult, bytes ):
# clientResult = clientResult.decode()
# print( "decoded clientResult type is %s." % ( type(clientResult) ), flush=True )
clientResult = makePrintable( clientResult )
#print( "filtered clientResult type is %s." % ( type(clientResult) ), flush=True )
if isinstance( clientResult, list ):
for line in clientResult:
print( "%s" % line )
else:
#if isinstance( clientResult, str ):
# clientResult = clientResult.splitlines()
# print( "split clientResult type is %s." % ( type(clientResult) ), flush=True )
print( clientResult )
else:
print( clientResult )
def runTest( testTop, config, verbose=False ):
servers = config.get( 'servers' )
clients = config.get( 'clients' )
TEST_NAME = config[ 'TEST_NAME' ]
if verbose:
print( "runTest %s for %d servers and %d clients:" % ( TEST_NAME, len(servers), len(clients) ) )
for s in servers:
print( "%20s: host %16s, TEST_LAUNCHER: %s" % ( s.get('CLIENT_NAME'), s.get('TEST_HOST'), s.get('TEST_LAUNCHER') ) )
for c in clients:
print( "%20s: host %16s, TEST_LAUNCHER: %s" % ( c.get('CLIENT_NAME'), c.get('TEST_HOST'), c.get('TEST_LAUNCHER') ) )
# Update test configuration
with open( os.path.join( testTop, 'testConfig.json' ), 'w' ) as f:
f.write( '# Generated file: Updated on each test run from $TEST_TOP/*.env\n' )
pprint.pprint( config, stream = f )
# Create PV lists
generateClientPVLists( testTop, config, verbose=verbose )
global testExecutor
global testFutures
testExecutor = concurrent.futures.ThreadPoolExecutor( max_workers=None )
testFutures = {}
for c in servers:
clientName = c.get('CLIENT_NAME')
testFutures[ testExecutor.submit( runRemote, config, clientName, verbose=verbose ) ] = clientName
for c in clients:
clientName = c.get('CLIENT_NAME')
testFutures[ testExecutor.submit( runRemote, config, clientName, verbose=verbose ) ] = clientName
print( "Launched %d testFutures ..." % len(testFutures), flush=True )
for future in testFutures:
future.add_done_callback( clientFetchResult )
while True:
( done, not_done ) = concurrent.futures.wait( testFutures, timeout=1.0 )
if len(not_done) == 0:
break
if verbose:
print( "Waiting on %d futures ...\r" % len(not_done) )
print( "shutdown testExecutor...", flush=True )
testExecutor.shutdown( wait=True )
return
def killProcesses( ):
global procList
global testDir
global testFutures
if testDir:
killGlob = os.path.join( testDir, "*", "clients", "*.killer" )
print( 'killProcesses: Checking for killFiles: %s' % killGlob )
for killFile in glob.glob( os.path.join( testDir, "*", "*.killer" ) ):
hostName = os.path.split( os.path.split( os.path.split(killFile)[0] )[0] )[1]
print( 'killProcesses: ssh %s %s' % ( hostName, killFile ), flush=True )
#subprocess.check_status( "ssh %s %s" % ( hostName, killFile ) )
# killFile already has "ssh $host pid"
subprocess.check_status( "%s" % ( killFile ) )
time.sleep(0.5)
time.sleep(1.0)
for proc in procList:
if proc is not None and proc.returncode is None:
print( 'killProcesses: kill process %d' % ( proc.pid ), flush=True )
proc.kill()
#proc.terminate()
time.sleep(1.0)
print( 'killProcesses: Checking %d testFutures ...' % ( len(testFutures) ), flush=True )
# First kill clients
for future in testFutures:
if not future.done():
clientName = testFutures[future]
if clientName.find('Server') < 0:
print( 'killProcesses: Cancel future for %s' % ( clientName ), flush=True )
time.sleep(0.5)
future.cancel()
time.sleep(1.0)
# kill remaining futures
for future in testFutures:
if not future.done():
clientName = testFutures[future]
print( 'killProcesses: Cancel future for %s' % ( clientName ), flush=True )
time.sleep(0.5)
future.cancel()
print( 'killProcesses: Shutdown testExecutor', flush=True )
time.sleep(0.5)
testExecutor.shutdown( wait=True )
def stressTest_signal_handler( signum, frame ):
print( "\nstressTest_signal_handler: Received signal %d" % signum, flush=True )
killProcesses()
print( 'stressTest_signal_handler: done.', flush=True )
time.sleep(0.5)
# Install signal handler
signal.signal( signal.SIGINT, stressTest_signal_handler )
signal.signal( signal.SIGTERM, stressTest_signal_handler )
# Can't catch SIGKILL
#signal.signal( signal.SIGKILL, stressTest_signal_handler )
def process_options():
#if argv is None:
# argv = sys.argv[1:]
description = 'stressTest/testManager.py manages launching one or more remote stressTest clients and/or servers.\n'
epilog_fmt = '\nExamples:\n' \
'stressTest/testManager.py -t "/path/to/testTop/*"\n'
epilog = textwrap.dedent( epilog_fmt )
parser = argparse.ArgumentParser( description=description, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog )
#parser.add_argument( 'cmd', help='Command to launch. Should be an executable file.' )
#parser.add_argument( 'arg', nargs='*', help='Arguments for command line. Enclose options in quotes.' )
parser.add_argument( '-v', '--verbose', action="store_true", help='show more verbose output.' )
parser.add_argument( '-t', '--testDir', action="store", required=True, help='Path to test directory. Can contain * and other glob syntax.' )
options = parser.parse_args( )
return options
def main( options, argv=None):
#if options.verbose:
# print( "logDir=%s\n" % options.logDir )
if options.verbose:
print( "testDir=%s\n" % options.testDir )
global testDir
testDir = options.testDir
testConfig = {}
servers = []
clients = []
# Read test.env
SCRIPTDIR = os.path.abspath( os.path.dirname( __file__ ) )
TEST_NAME = os.path.split(testDir)[1]
testConfig[ 'SCRIPTDIR'] = SCRIPTDIR
testConfig[ 'TEST_NAME'] = TEST_NAME
testConfig[ 'TEST_TOP' ] = testDir
getEnvFromFile( os.path.join( options.testDir, "test.env" ), testConfig, verbose=options.verbose )
for envFile in glob.glob( os.path.join( options.testDir, "*.env" ) ):
baseName = os.path.split( envFile )[1]
if baseName == "test.env":
continue
# Client configuration
clientConfig = testConfig.copy()
clientName = baseName.replace( ".env", "" )
readClientConfig( clientConfig, clientName, verbose=options.verbose )
if baseName.find( "Server" ) >= 0:
servers.append( clientConfig.copy() )
else:
clients.append( clientConfig.copy() )
testConfig[ 'servers' ] = servers
testConfig[ 'clients' ] = clients
return runTest( options.testDir, testConfig, verbose=options.verbose )
if __name__ == '__main__':
status = 0
options = process_options()
debug = 1
if debug:
status = main( options )
try:
if not debug:
status = main( options )
print( "main() status=" , status )
except BaseException as e:
print( e )
print( "Caught exception during main!" )
pass
# Kill any processes still running
killProcesses()
sys.exit(status)
| 2.34375 | 2 |
tests/test_argument_resolver.py | EvgenySmekalin/winter | 1 | 12759186 | <filename>tests/test_argument_resolver.py
import pytest
from mock import Mock
from winter import ArgumentsResolver
from winter import GenericArgumentResolver
from winter.argument_resolver import ArgumentNotSupported
from winter.core import ComponentMethod
from winter.core import ComponentMethodArgument
@pytest.mark.parametrize('arg_name, arg_type, resolver_arg_name, resolver_arg_type, expected_supported', [
('a', int, 'a', int, True),
('a', int, 'b', int, False),
('a', int, 'a', str, False),
('a', int, 'b', str, False),
])
def test_generic_argument_resolver_is_supported(arg_name, arg_type, resolver_arg_name, resolver_arg_type,
expected_supported):
resolve_argument_mock = Mock()
generic_argument_resolver = GenericArgumentResolver(resolver_arg_name, resolver_arg_type, resolve_argument_mock)
argument = ComponentMethodArgument(Mock(), arg_name, arg_type)
# Act
is_supported = generic_argument_resolver.is_supported(argument)
# Assert
assert is_supported is expected_supported
resolve_argument_mock.assert_not_called()
def test_generic_argument_resolver_resolve_argument():
resolve_argument_mock = Mock()
generic_argument_resolver = GenericArgumentResolver('a', int, resolve_argument_mock)
argument = ComponentMethodArgument(Mock(), 'a', int)
request = Mock()
response_headers = Mock()
# Act
generic_argument_resolver.resolve_argument(argument, request, response_headers)
# Assert
resolve_argument_mock.assert_called_once_with(argument, request, response_headers)
def test_resolve_arguments_returns_empty_dict_for_empty_arguments():
def func():
pass
expected_resolved_arguments = {}
method = ComponentMethod(func)
arguments_resolver = ArgumentsResolver()
# Act
resolved_arguments = arguments_resolver.resolve_arguments(method, request=Mock(), response_headers={})
# Assert
assert resolved_arguments == expected_resolved_arguments
def test_resolve_arguments_resolves_argument_with_the_first_resolver():
def func(a: int):
pass
expected_resolved_value = 1
expected_resolved_arguments = {
'a': expected_resolved_value,
}
method = ComponentMethod(func)
arguments_resolver = ArgumentsResolver()
resolver = Mock()
resolver.is_supported.return_value = True
resolver.resolve_argument.return_value = expected_resolved_value
arguments_resolver.add_argument_resolver(resolver)
# Act
resolved_arguments = arguments_resolver.resolve_arguments(method, request=Mock(), response_headers={})
# Assert
assert resolved_arguments == expected_resolved_arguments
def test_resolve_arguments_resolves_argument_with_the_second_resolver():
def func(a: int):
pass
expected_resolved_value = 1
expected_resolved_arguments = {
'a': expected_resolved_value,
}
method = ComponentMethod(func)
arguments_resolver = ArgumentsResolver()
resolver1 = Mock()
resolver1.is_supported.return_value = False
arguments_resolver.add_argument_resolver(resolver1)
resolver2 = Mock()
resolver2.is_supported.return_value = True
resolver2.resolve_argument.return_value = expected_resolved_value
arguments_resolver.add_argument_resolver(resolver2)
# Act
resolved_arguments = arguments_resolver.resolve_arguments(method, request=Mock(), response_headers={})
# Assert
assert resolved_arguments == expected_resolved_arguments
def test_resolve_arguments_fails():
def func(a: int):
pass
arg_name = 'a'
method = ComponentMethod(func)
arguments_resolver = ArgumentsResolver()
# Assert
with pytest.raises(ArgumentNotSupported, match=f'Unable to resolve argument {arg_name}: int'):
# Act
arguments_resolver.resolve_arguments(method, request=Mock(), response_headers={})
| 2.375 | 2 |
run.py | doncat99/FinanceAnalysis | 6 | 12759187 | <filename>run.py
import warnings
warnings.filterwarnings("ignore")
import logging
import time
import pandas as pd
import plotly.graph_objects as go
from zvt.api.data_type import Region, Provider
from zvt.factors.squeeze_factor import SqueezeFactor
# from zvt.contract.reader import DataReader
# from zvt.domain import Stock1dKdata, Stock
import zvt.stats as qs
logger = logging.getLogger(__name__)
def chart(dfs):
def sub_chart(title, df):
candlestick = go.Candlestick(x=df['timestamp'], open=df['open'], high=df['high'], low=df['low'], close=df['close'])
upper_band = go.Scatter(x=df['timestamp'], y=df['upper_band'], name='Upper Bollinger Band', line={'color': 'red'})
lower_band = go.Scatter(x=df['timestamp'], y=df['lower_band'], name='Lower Bollinger Band', line={'color': 'red'})
upper_keltner = go.Scatter(x=df['timestamp'], y=df['upper_keltner'], name='Upper Keltner Channel', line={'color': 'blue'})
lower_keltner = go.Scatter(x=df['timestamp'], y=df['lower_keltner'], name='Lower Keltner Channel', line={'color': 'blue'})
data = [candlestick, upper_band, lower_band, upper_keltner, lower_keltner]
layout = go.Layout(
title='Stock Market Data Analysis - ' + title,
xaxis=dict(
title='Date',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'),
rangeselector=dict(
buttons=list([
dict(count=1,
label="1m",
step="month",
stepmode="backward"),
dict(count=6,
label="6m",
step="month",
stepmode="backward"),
dict(count=1,
label="YTD",
step="year",
stepmode="todate"),
dict(count=1,
label="1y",
step="year",
stepmode="backward"),
dict(step="all")
])
),
rangeslider=dict(
visible=True
),
type="date"
),
yaxis=dict(
title='Stock market price',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f')
)
)
return go.Figure(data=data, layout=layout)
total = len(dfs)
if total > 0:
for key, df in dfs.items():
fig = sub_chart(key, df)
fig.show()
if __name__ == '__main__':
from datetime import datetime
import time
now = time.time()
# reader = DataReader(region=Region.US,
# codes=['FB', 'AMD'],
# data_schema=Stock1dKdata,
# entity_schema=Stock,
# provider=Provider.Yahoo)
# gb = reader.data_df.groupby('code')
# dfs = {x : gb.get_group(x) for x in gb.groups}
factor = SqueezeFactor(region=Region.US,
codes=['FB', 'AMD'],
start_timestamp='2015-01-01',
end_timestamp=datetime.now().strftime("%Y-%m-%d"),
kdata_overlap=4,
provider=Provider.Yahoo)
gb = factor.result_df.groupby('code')
dfs = {x: gb.get_group(x) for x in gb.groups}
print("1", time.time() - now)
target = pd.Series(dfs['FB'].close.pct_change().tolist(), index=dfs['FB'].timestamp)
bench = pd.Series(dfs['AMD'].close.pct_change().tolist(), index=dfs['AMD'].timestamp)
target_len = len(target)
bench_len = len(bench)
if bench_len > target_len:
bench = bench[-target_len:]
print("2", time.time() - now)
qs.reports.html(returns=target, benchmark=bench, output='b.html')
print("3", time.time() - now)
chart(dfs)
print("4", time.time() - now)
| 2.4375 | 2 |
samplescripts/tgs.py | pavan2004it/bash-script | 0 | 12759188 | import boto3
import click
elbList = boto3.client('elbv2')
rgapi = boto3.client('resourcegroupstaggingapi')
"""
def lst_targets(lbar):
tgs = []
tgroups = elbList.describe_target_groups(LoadBalancerArn=lbar)
for tg in tgroups['TargetGroups']:
targetgps = tg['TargetGroupArn']
print(targetgps)
return tgs
@click.group()
def cli():
""awssnapelb manages snapshots""
@cli.group('tgroups')
def tgroups():
""Commands for listing target groups based on loadbalancers""
@tgroups.command('listtgs')
@click.option('--lbar', default=None,help="only the elb's for the project (tag Project:<name>)")
def lst_tgroups(lbar):
target_groups = lst_targets(lbar)
for tg in target_groups:
print(tg)
if __name__ == '__main__':
cli()
"""
"""
def lst_tg(project):
tgs = []
resources = rgapi.get_resources(TagFilters=[{'Key':'Project','Values':[project]}],ResourceTypeFilters=['elasticloadbalancing:targetgroup'])
for tg in resources['ResourceTagMappingList']:
print(tg['ResourceARN'])
return tgs
@click.group()
def cli():
""list the target groups""
@cli.group('tgroups')
def tgroups():
"" Command for listing targetgroups""
@tgroups.command('list')
@click.option('--project', default=None,help="only the elb's for the project (tag Project:<name>)")
def list_tg(project):
target_groups = lst_tg(project)
for tg in target_groups:
print(tg)
if __name__ == '__main__':
cli()
"""
import boto3
import click
elbList = boto3.client('elbv2')
rgapi = boto3.client('resourcegroupstaggingapi')
def list_tg(lbname):
target = []
loadbalancers = elbList.describe_load_balancers(Names=[lbname])
for lb in loadbalancers['LoadBalancers']:
lbalancer = lb['LoadBalancerName']
lbalancerarn = lb['LoadBalancerArn']
tgs = elbList.describe_target_groups(LoadBalancerArn=lbalancerarn)
for tg in tgs['TargetGroups']:
targetgps = tg['TargetGroupArn']
print(targetgps)
return target
@click.group()
def cli():
"""list the target groups"""
@cli.group('tgroups')
def tgroups():
""" Command for listing targetgroups"""
@tgroups.command('list')
@click.option('--lbname', default=None,help="only the elb's for the project (tag Project:<name>)")
def lst_tg(lbname):
target_groups = list_tg(lbname)
for tg in target_groups:
print(tg)
if __name__ == '__main__':
cli()
| 2.234375 | 2 |
test_keypad_pin_lcd.py | amusarra/raspberry-pi-access-via-ts-cns | 4 | 12759189 | <reponame>amusarra/raspberry-pi-access-via-ts-cns<filename>test_keypad_pin_lcd.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This Python script test_keypad_pin_lcd.py check if the the PIN code that inserted with the key pad
it's correct and display the instructions on the LCD 16x2 display.
MIT License
Raspberry Pi - Access via Smart Card TS-CNS
Copyright (c) 2020 <NAME>'s Blog - https://www.dontesta.it
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in the
Software without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020 Antonio Musarra's Blog"
__credits__ = ["<NAME>"]
__version__ = "1.0.0"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from pad4pi import rpi_gpio
from modules.PCF8574 import PCF8574_GPIO
from modules.Adafruit_LCD1602 import Adafruit_CharLCD
import time
import sys
# Check I2C address via command i2cdetect -y 1
PCF8574_address = 0x27 # I2C address of the PCF8574 chip.
PCF8574A_address = 0x3F # I2C address of the PCF8574A chip.
# Create PCF8574 GPIO adapter.
try:
mcp = PCF8574_GPIO(PCF8574_address)
except:
try:
mcp = PCF8574_GPIO(PCF8574A_address)
except:
print("I2C Address Error !")
exit(1)
# Create LCD, passing in MCP GPIO adapter.
lcd = Adafruit_CharLCD(pin_rs=0, pin_e=2, pins_db=[4, 5, 6, 7], GPIO=mcp)
KEYPAD = [
[1, 2, 3, "A"],
[4, 5, 6, "B"],
[7, 8, 9, "C"],
["*", 0, "#", "D"]
]
ROW_PINS = [18, 12, 20, 21] # BCM numbering
COL_PINS = [10, 22, 27, 17] # BCM numbering
entered_pin = ""
correct_pin = "1234"
# CleanUp the resources
def cleanup():
global keypad
lcd.clear()
lcd.message("Goodbye...\n")
lcd.backlight = False
keypad.cleanup()
# Check entered PIN code
def check_pin(key):
global entered_pin, correct_pin
if len(entered_pin) == len(correct_pin) or key == "#":
if entered_pin == correct_pin:
correct_pin_entered()
else:
incorrect_pin_entered()
# Display info on corrected PIN code and exit
def correct_pin_entered():
lcd.clear()
lcd.message("Access granted\n")
lcd.message("Accepted PIN\n")
print("PIN accepted. Access granted.")
time.sleep(5)
cleanup()
sys.exit()
# Construct the entered PIN code
def digit_entered(key):
global entered_pin, correct_pin
entered_pin += str(key)
print(entered_pin)
lcd.clear()
lcd.message("PIN: " + entered_pin + "\n")
lcd.message("# to confirm")
check_pin(key)
# Display info on in-corrected PIN code and exit
def incorrect_pin_entered():
lcd.clear()
lcd.message("Access denied\n")
lcd.message("Incorrect PIN\n")
print("Incorrect PIN. Access denied.")
time.sleep(5)
cleanup()
sys.exit()
# Initialize the I2C LCD 1602 Display
def initialize_lcd():
mcp.output(3, 1) # turn on LCD backlight
lcd.begin(16, 2) # set number of LCD lines and columns
lcd.message("Enter your PIN\n")
lcd.message("Press * to clear")
# Manage no PIN code key
def non_digit_entered(key):
global entered_pin
if key == "*" and len(entered_pin) > 0:
entered_pin = entered_pin[:-1]
lcd.clear()
lcd.message("PIN: " + entered_pin + "\n")
lcd.message("# to confirm")
print(entered_pin)
if key == "#" and len(entered_pin) > 0:
check_pin(key)
# Press handler key
def key_pressed(key):
try:
int_key = int(key)
if 0 <= int_key <= 9:
digit_entered(key)
except ValueError:
non_digit_entered(key)
try:
factory = rpi_gpio.KeypadFactory()
keypad = factory.create_keypad(keypad=KEYPAD, row_pins=ROW_PINS, col_pins=COL_PINS)
keypad.registerKeyPressHandler(key_pressed)
initialize_lcd()
print("Enter your PIN:")
print("Press * to clear previous digit.")
print("Press # to confirm.")
while True:
time.sleep(1)
except KeyboardInterrupt:
print("Goodbye")
finally:
cleanup()
| 2.8125 | 3 |
rosters/apps.py | Drazerr/roster-wizard | 0 | 12759190 | from django.apps import AppConfig
class RostersConfig(AppConfig):
name = "rosters"
| 1.164063 | 1 |
API/Project.py | Gabicolombo/Python-exercicios | 0 | 12759191 | <reponame>Gabicolombo/Python-exercicios
import json
import requests
'''
Define a function, called get_movies_from_tastedive. It should take one input parameter, a string that is the name of a movie or music artist.
The function should return the 5 TasteDive results that are associated with that string; be sure to only get movies, not other kinds of media.
It will be a python dictionary with just one key, ‘Similar’.
'''
def get_movies_from_tastedive(query):
baseurl = "https://tastedive.com/api/similar"
diction = {}
diction['q'] = query
diction['type'] = 'movies'
diction['limit'] = 5
resp = requests.get(baseurl, params=diction)
resp_d = json.loads(resp.text)
return resp_d
'''
Please copy the completed function from above into this active code window. Next, you will need to write a function that extracts just the
list of movie titles from a dictionary returned by get_movies_from_tastedive. Call it extract_movie_titles.
'''
def extract_movie_titles(d):
return [lst["Name"] for lst in d["Similar"]["Results"]]
'''
Please copy the completed functions from the two code windows above into this active code window. Next, you’ll write a function,
called get_related_titles. It takes a list of movie titles as input. It gets five related movies for each from TasteDive,
extracts the titles for all of them, and combines them all into a single list. Don’t include the same movie twice.
'''
def get_related_titles(lst):
new_lst = []
for i in range(len(lst)):
d = get_movies_from_tastedive(lst[i])
for c in d["Similar"]["Results"]:
if c["Name"] not in new_lst and c["Name"] != lst[i]:
new_lst.append(c["Name"])
return new_lst
'''
Your next task will be to fetch data from OMDB. The documentation for the API is at https://www.omdbapi.com/
Define a function called get_movie_data. It takes in one parameter which is a string that should represent the title of a movie you want to search.
The function should return a dictionary with information about that movie.
Again, use requests_with_caching.get(). For the queries on movies that are already in the cache, you won’t need an api key.
You will need to provide the following keys: t and r. As with the TasteDive cache, be sure to only include those two parameters in order
to extract existing data from the cache.
'''
def get_movie_data(title):
baseurl = "http://www.omdbapi.com/"
diction = {}
diction["t"] = title
diction["r"] = "json"
resp = requests.get(baseurl, params=diction)
#resp_d = json.loads(resp.text)
return resp.json()
print(get_movie_data("Venom"))
'''
Please copy the completed function from above into this active code window. Now write a function called get_movie_rating.
It takes an OMDB dictionary result for one movie and extracts the Rotten Tomatoes rating as an integer. For example, if given the OMDB dictionary
for “Black Panther”, it would return 97. If there is no Rotten Tomatoes rating, return 0.
'''
def get_movie_rating(d):
try:
for source in d['Ratings']:
print(source["Value"])
if source['Source']=='Rotten Tomatoes':
print(source['Value'][:-1])
return int(source['Value'][:-1])
except:
return 0
else:
return 0
'''
Now, you’ll put it all together. Don’t forget to copy all of the functions that you have previously defined into this code window.
Define a function get_sorted_recommendations. It takes a list of movie titles as an input. It returns a sorted list of related movie
titles as output, up to five related movies for each input movie title. The movies should be sorted in descending order by their Rotten
Tomatoes rating, as returned by the get_movie_rating function. Break ties in reverse alphabetic order, so that ‘Yahşi Batı’ comes before ‘Eyyvah Eyvah’.
'''
def get_sorted_recommendations(lst):
unsorted_list = get_related_titles(lst)
sorted_list = sorted(unsorted_list, key=lambda x: (get_movie_rating(get_movie_data(x)), x), reverse=True)
return sorted_list
| 3.65625 | 4 |
api/coreapp/consumers.py | NeuronQ/histo-bot | 2 | 12759192 | <reponame>NeuronQ/histo-bot<gh_stars>1-10
import json
from asgiref.sync import async_to_sync
from channels.generic.websocket import WebsocketConsumer
from .models import MLModel
from utils.utils import pp
class TrainingProgressConsumer(WebsocketConsumer):
def connect(self):
self.model_id = int(self.scope['url_route']['kwargs']['model_id'])
self.ts_id = int(self.scope['url_route']['kwargs']['training_session_id'])
self.group_name = f'training.model-{self.model_id}.ts-{self.ts_id}'
# join group:
# a group that progress can be broascast to is created so that progress
# reporting keeps working after connection drops or page refreshes
async_to_sync(self.channel_layer.group_add)(
self.group_name,
self.channel_name
)
self.accept()
def disconnect(self, close_code):
# leave group
async_to_sync(self.channel_layer.group_discard)(
self.group_name,
self.channel_name
)
# receive message from channel/WebSocket
def receive(self, text_data):
msg = json.loads(text_data)
if msg['type'] == 'train':
self.handle_train(msg)
def handle_train(self, msg):
# send message to WebSocket
self.send_to_channel_and_group({
'type': 'training_started',
'training_session_id': self.ts_id
})
model = MLModel.objects.get(pk=self.model_id)
def on_epoch_done(epoch, *args, **kwargs):
self.send_to_channel_and_group({
'type': 'training_progress',
'training_session_id': self.ts_id,
'epoch': epoch + 1,
})
fake = msg.get('__fake__', False)
params = dict(
dataset_ids=list(map(int, msg.get('dataset_ids'))),
training_hparams=msg.get('training_hparams'),
on_epoch_done=on_epoch_done,
_fake=fake,
)
print(f"\n====== training:\n", pp(params))
# import pdb; pdb.set_trace()
res = model.train(**params)
if not fake:
json_safe_res = {
'scores': res['scores'],
'results': [
(str(k), list(map(str, v))) for k, v in res['results'].items()
],
'new_parameters_path': (
str(res['new_parameters_path'])
if res.get('new_parameters_path', None) else
None),
'new_learner_path': (
str(res['new_learner_path'])
if res.get('new_learner_path', None) else
None),
}
else:
json_safe_res = {}
self.send_to_channel_and_group({
'type': 'training_done',
'result': json_safe_res,
})
# receive message from room group
def broadcast_training_progress(self, msg):
# for current consumer the message has already been sent to socket
if msg['_origin_consumer_id_'] == id(self):
return
# send message to WebSocket
self.send_json({**msg, 'type': 'training_progress'})
def broadcast_training_started(self, msg):
if msg['_origin_consumer_id_'] == id(self):
return
self.send_json({**msg, 'type': 'training_started'})
def broadcast_training_done(self, msg):
if msg['_origin_consumer_id_'] == id(self):
return
self.send_json({**msg, 'type': 'training_done'})
def send_json(self, msg):
self.send(text_data=json.dumps(msg))
def send_to_group(self, msg):
# self.channel_layer.group_send(self.group_name, msg)
async_to_sync(self.channel_layer.group_send)(self.group_name, msg)
def send_to_channel_and_group(self, msg):
self.send_json(msg)
self.send_to_group({
**msg,
'type': 'broadcast_' + msg['type'],
# use _origin_consumer_id_ so current consumer can ignore this
'_origin_consumer_id_': id(self)
})
| 2.1875 | 2 |
Python/Learning/Language/variables.py | prynix/learning-programming | 2 | 12759193 | <gh_stars>1-10
cars = 100
space_in_car = 4
drivers = 30
passengers = 90
cars_not_driven = cars - drivers
cars_driven = drivers
carpool_capacity = cars_driven * space_in_car
average_passengers_per_car = passengers / cars_driven
print("Cars: ", cars)
print(drivers)
print(cars_not_driven)
print(carpool_capacity)
print(passengers)
print(average_passengers_per_car)
| 3.4375 | 3 |
openstack-congress-9.0.0/congress/z3/z3types.py | scottwedge/OpenStack-Stein | 50 | 12759194 | <reponame>scottwedge/OpenStack-Stein
# Copyright 2018 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Type translators between Congress and Z3."""
import abc
import six
from congress import data_types
from congress import exception
try:
import z3
Z3_AVAILABLE = True
except ImportError:
Z3_AVAILABLE = False
z3 = None
MYPY = False
if MYPY:
# pylint: disable = unused-import
from typing import Any, Union, List, Optional # noqa
Z3OPT = z3
@six.add_metaclass(abc.ABCMeta)
class Z3Type(object):
"""Translate Openstack values to Z3"""
def __init__(self, name, type_instance):
self.name = name
self.type_instance = type_instance
@abc.abstractmethod
def to_z3(self, val, strict=False):
# type: (Any, bool) -> z3.ExprRef
"""Transforms a value from OpenStack in a Z3 value"""
raise NotImplementedError
@abc.abstractmethod
def to_os(self, val):
# type: (z3.ExprRef) -> Any
"""Transforms a value from Z3 back to python"""
raise NotImplementedError
def type(self):
# type: () -> z3.SortRef
"""Gives back the Z3 type"""
return self.type_instance
def reset(self):
"""Reset internal state of type transformer"""
pass
class BoolType(Z3Type):
"""Transcode boolean in Z3"""
def __init__(self):
super(BoolType, self).__init__(u'Bool', z3.BoolSort())
def to_z3(self, val, strict=False):
return z3.BoolVal(val)
def to_os(self, val):
return val.decl().kind() == z3.Z3_OP_TRUE
class StringType(Z3Type):
"""Transcode strings in Z3"""
def __init__(self, name, size=16):
super(StringType, self).__init__(name, z3.BitVecSort(size))
self.map = {}
self.back = {}
def to_z3(self, val, strict=False):
if val in self.map:
return self.map[val]
code = len(self.map)
bvect = z3.BitVecVal(code, self.type_instance)
self.map[val] = bvect
self.back[code] = val
return bvect
def to_os(self, val):
return self.back[val.as_long()]
def reset(self):
self.map = {}
self.back = {}
class FiniteType(StringType):
"""Z3 Coding for data_types with a finite number of elements
This is the counterpart to data_types.CongressTypeFiniteDomain.
"""
def __init__(self, name, domain):
size = (len(domain) + 1).bit_length()
super(FiniteType, self).__init__(name, size)
self.domain = domain
def to_z3(self, val, strict=False):
if val in self.map:
return self.map[val]
if val not in self.domain and val is not None:
if strict:
raise exception.PolicyRuntimeException(
"Z3 Finite type: {} is not a value of {}".format(
val, self.name))
else:
val = '__OTHER__'
code = len(self.map)
bvect = z3.BitVecVal(code, self.type_instance)
self.map[val] = bvect
self.back[code] = val
return bvect
class IntType(Z3Type):
"""Transcode numbers in Z3"""
def __init__(self, name, size=32):
super(IntType, self).__init__(name, z3.BitVecSort(size))
self.map = {}
self.back = {}
def to_z3(self, val, strict=False):
return z3.BitVecVal(val, self.type_instance)
def to_os(self, val):
return val.as_long()
class DummyType(Z3Type):
"""Dummy type when Z3 not available"""
def to_z3(self, val, strict=False):
pass
def to_os(self, val):
pass
class TypeRegistry(object):
"""A registry of Z3 types and their translators"""
def __init__(self):
self.type_dict = {} # type: Dict[Str, Z3Type]
self.top_type = DummyType('dummy', None)
self.init()
def register(self, typ):
# type: (Z3Type) -> None
"""Registers a new Z3 type"""
self.type_dict[typ.name] = typ
def init(self):
"""Initialize the registry"""
if Z3_AVAILABLE:
self.top_type = StringType(u'Scalar', 34)
for typ in [self.top_type, StringType(u'Str', 32),
IntType(u'Int', 32), BoolType(),
StringType('IPAddress', 32),
StringType('IPNetwork', 32),
StringType('UUID', 32)]:
self.register(typ)
def get_type(self, name):
# type: (str) -> z3.SortRef
"""Return a Z3 type given a type name"""
return self.get_translator(name).type()
def get_translator(self, name):
# type: (str) -> Z3Type
"""Return the translator for a given type name"""
trans = self.type_dict.get(name, None)
if trans is None:
try:
congress_type = data_types.TypesRegistry.type_class(name)
except KeyError:
raise exception.PolicyRuntimeException(
"Z3 typechecker: Unknown congress type {}".format(name))
if issubclass(congress_type, data_types.CongressTypeFiniteDomain):
trans = FiniteType(name, congress_type.DOMAIN)
self.register(trans)
else:
raise exception.PolicyRuntimeException(
"Z3 typechecker: cannot handle type {}".format(name))
return trans
def reset(self):
# type: () -> None
"""Reset the internal tables of all types"""
for typ in six.itervalues(self.type_dict):
typ.reset()
def z3_to_array(expr):
# type: (z3.BoolRef) -> Union[bool, List[List[Any]]]
"""Compiles back a Z3 result to a matrix of values"""
def extract(item):
"""Extract a row"""
kind = item.decl().kind()
if kind == z3.Z3_OP_AND:
return [x.children()[1] for x in item.children()]
elif kind == z3.Z3_OP_EQ:
return [item.children()[1]]
else:
raise exception.PolicyRuntimeException(
"Bad Z3 result not translatable {}: {}".format(expr, kind))
kind = expr.decl().kind()
if kind == z3.Z3_OP_OR:
return [extract(item) for item in expr.children()]
elif kind == z3.Z3_OP_AND:
return [[item.children()[1] for item in expr.children()]]
elif kind == z3.Z3_OP_EQ:
return [[expr.children()[1]]]
elif kind == z3.Z3_OP_FALSE:
return False
elif kind == z3.Z3_OP_TRUE:
return True
else:
raise exception.PolicyRuntimeException(
"Bad Z3 result not translatable {}: {}".format(expr, kind))
| 1.953125 | 2 |
peppermynt/processors.py | valrus/mynt | 0 | 12759195 | <reponame>valrus/mynt<filename>peppermynt/processors.py
# -*- coding: utf-8 -*-
from calendar import timegm
from collections import defaultdict
from datetime import datetime
from importlib import import_module
from os import path as op
import re
from pkg_resources import DistributionNotFound, iter_entry_points, load_entry_point
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name
from pygments.util import ClassNotFound
from peppermynt.containers import Config, Container, Item, Items, Posts, SiteContent, Page
from peppermynt.exceptions import ConfigException, ContentException, ParserException, RendererException
from peppermynt.fs import File
from peppermynt.utils import get_logger, dest_path, Timer, unescape, Url
logger = get_logger('peppermynt')
class Reader:
def __init__(self, src, temp, dest, site, writer):
self._writer = writer
self._parsers = {}
self._extensions = defaultdict(list)
self._cache = {}
self.src = src
self.temp = temp
self.dest = dest
self.site = site
self._find_parsers()
def _find_parsers(self):
for parser in iter_entry_points('peppermynt.parsers'):
name = parser.name
try:
Parser = parser.load()
except DistributionNotFound as e:
logger.debug('@@ The %s parser could not be loaded due to a missing requirement: %s.', name, str(e))
continue
for extension in Parser.accepts:
if 'parsers' in self.site and self.site['parsers'].get(extension.lstrip('.')) == name:
self._extensions[extension].insert(0, name)
else:
self._extensions[extension].append(name)
self._parsers[name] = Parser
def _get_date(self, mtime, date):
if not date:
return mtime
d = [None, None, None, 0, 0]
for i, v in enumerate(date.split('-')):
d[i] = v
if not d[3]:
d[3], d[4] = mtime.strftime('%H %M').split()
elif not d[4]:
d[4] = '{0:02d}'.format(d[4])
return datetime.strptime('-'.join(d), '%Y-%m-%d-%H-%M')
def _get_parser(self, item, parser = None):
if not parser:
try:
parser = self._extensions[item.extension()][0]
except KeyError:
raise ParserException('No parser found that accepts \'{0}\' files.'.format(item.extension()),
'src: {0}'.format(item))
if parser in self._cache:
return self._cache[parser]
options = self.site.get(parser, None)
if parser in self._parsers:
Parser = self._parsers[parser](options)
else:
try:
Parser = import_module('peppermynt.parsers.{0}'.format(parser)).Parser(options)
except ImportError:
raise ParserException('The {0} parser could not be found.'.format(parser))
self._cache[parser] = Parser
return Parser
def _parse_filename(self, f):
date, text = re.match(r'(?:(\d{4}(?:-\d{2}-\d{2}){1,2})-)?(.+)', f.name).groups()
return (text, self._get_date(f.mtime, date))
def _init_container(self, container):
for f in container.path:
container.add(self._init_item(container.config, f))
container.sort()
container.tag()
container.archive()
return container
def _init_item(self, config, f, simple = False):
Timer.start()
frontmatter, bodymatter = self._parse_item_frontmatter(f)
item = Item(f.path)
text, date = self._parse_filename(f)
item['date'] = date.strftime(self.site['date_format'])
item['timestamp'] = timegm(date.utctimetuple())
if simple:
item['url'] = Url.from_path(f.root.path.replace(self.src.path, ''), text)
else:
item['tags'] = []
item['url'] = Url.from_format(config['url'], text, date, frontmatter)
item['dest'] = dest_path(self.dest.path, item['url'])
item.update(frontmatter)
item['raw_content'] = bodymatter
return item
def parse_item(self, config, item, simple = False):
bodymatter = item.pop('raw_content')
parser = self._get_parser(item, item.get('parser', config.get('parser', None)))
content = parser.parse(self._writer.from_string(bodymatter, item))
item['content'] = content
if not simple:
item['excerpt'] = re.search(r'\A.*?(?:<p>(.+?)</p>)?', content, re.M | re.S).group(1)
logger.debug('.. (%.3fs) %s', Timer.stop(), str(item).replace(self.src.path, ''))
return item
def _parse_item_frontmatter(self, f):
try:
frontmatter, bodymatter = re.search(r'\A---\s+^(.+?)$\s+---\s*(.*)\Z', f.content, re.M | re.S).groups()
frontmatter = Config(frontmatter)
except AttributeError:
raise ContentException('Invalid frontmatter.',
'src: {0}'.format(f.path),
'frontmatter must not be empty')
except ConfigException:
raise ConfigException('Invalid frontmatter.',
'src: {0}'.format(f.path),
'fontmatter contains invalid YAML')
if 'layout' not in frontmatter:
raise ContentException('Invalid frontmatter.',
'src: {0}'.format(f.path),
'layout must be set')
frontmatter.pop('url', None)
return frontmatter, bodymatter
def init_parse(self):
posts = self._init_container(Posts(self.src, self.site))
containers = {}
miscellany = Container('miscellany', self.src, None)
pages = posts.pages
feeds = []
for name, config in self.site['containers'].items():
container = self._init_container(Items(name, self.src, config))
containers[name] = container
pages.extend(container.pages)
for f in miscellany.path:
if f.extension in self._extensions:
miscellany.add(self._init_item(miscellany.config, f, True))
elif f.extension == '.xml':
# Assume for now that the only xml files are feeds
feeds.append(Page(f.path.replace(self.src.path, ''), None, None))
elif f.extension in ('.html', '.htm'):
pages.append(Page(f.path.replace(self.src.path, ''), None, None))
pages.extend(miscellany.pages)
return SiteContent(posts, containers, pages, feeds)
class Writer:
def __init__(self, src, temp, dest, site):
self.src = src
self.temp = temp
self.dest = dest
self.site = site
self._renderer = self._get_renderer()
def _get_renderer(self):
renderer = self.site['renderer']
options = self.site.get(renderer, None)
try:
Renderer = load_entry_point('peppermynt', 'peppermynt.renderers', renderer)
except DistributionNotFound as e:
raise RendererException('The {0} renderer requires {1}.'.format(renderer, str(e)))
except ImportError:
try:
Renderer = import_module('peppermynt.renderers.{0}'.format(renderer)).Renderer
except ImportError:
raise RendererException('The {0} renderer could not be found.'.format(renderer))
return Renderer(self.src.path, options)
def _highlight(self, match):
language, code = match.groups()
formatter = HtmlFormatter(linenos = 'table')
code = unescape(code)
try:
code = highlight(code, get_lexer_by_name(language), formatter)
except ClassNotFound:
code = highlight(code, get_lexer_by_name('text'), formatter)
return '<div class="code"><div>{0}</div></div>'.format(code)
def _pygmentize(self, html):
return re.sub(r'<pre><code[^>]+data-lang="([^>]+)"[^>]*>(.+?)</code></pre>', self._highlight, html, flags = re.S)
def from_string(self, string, data = None):
return self._renderer.from_string(string, data)
def register(self, data):
self._renderer.register(data)
def render_path(self, template, _data = None, url = None):
return dest_path(self.dest.path, url or template)
def render(self, template, data = None, url = None):
path = self.render_path(template, data, url)
try:
Timer.start()
content = self._renderer.render(template, data)
if self.site['pygmentize']:
content = self._pygmentize(content)
logger.debug('.. (%.3fs) %s', Timer.stop(), path.replace(self.dest.path, ''))
except RendererException as e:
raise RendererException(
e.message,
'{0} in container item {1}'.format(template, data.get('item', url or template))
)
return File(path, content)
| 2.1875 | 2 |
base/views.py | Antonio-Neves/My-Portfolio | 1 | 12759196 | <reponame>Antonio-Neves/My-Portfolio<gh_stars>1-10
from django.urls import reverse_lazy
from django.views.generic import FormView
from principal.forms import ContactForm
class ContactFormView(FormView):
form_class = ContactForm
success_url = reverse_lazy('index')
def form_valid(self, form, *args, **kwargs):
form.send_mail()
return super().form_valid(form)
def form_invalid(self, form, *args, **kwargs):
return super().form_invalid(form)
| 2.15625 | 2 |
tests/test_serialize.py | glucoseinc/CircleCore | 3 | 12759197 | import uuid
import pytest
from circle_core.message import ModuleMessage
from circle_core.serialize import serialize
from circle_core.types import BlobMetadata
from circle_core.workers.blobstore import StoredBlob
@pytest.mark.parametrize( # noqa: F811
('payload', 'expected'),
[
(
{'data': BlobMetadata('text/plain', 'deadbeafdeadbeafdeadbeafdeadbeaf', None)},
'''\
{"boxId": "539ce356a7cb4bfc853ec1a8147f021f", "counter": 0, "payload": {"data": {"$data": null, \
"$source": "text/plain", "$type": "deadbeafdeadbeafdeadbeafdeadbeaf"}}, "timestamp": "1545895047.000"}\
'''
),
(
{'data': StoredBlob('text/plain', 'deadbeafdeadbeafdeadbeafdeadbeaf', None)},
'''\
{"boxId": "539ce356a7cb4bfc853ec1a8147f021f", "counter": 0, "payload": {"data": {"$data": null, \
"$source": "text/plain", "$type": "deadbeafdeadbeafdeadbeafdeadbeaf"}}, "timestamp": "1545895047.000"}\
'''
)
]
)
def test_message_jsonize(payload, expected):
message = ModuleMessage(uuid.UUID('539CE356-A7CB-4BFC-853E-C1A8147F021F'), '1545895047.000', 0, payload)
serialized = serialize(message)
assert serialized == expected
| 2.25 | 2 |
tests/plugintest/test_plugin.py | andy-maier/python-garbagetracker | 0 | 12759198 | """
Testcases for the pytest plugin of Yagot.
Note: 'testdir' is a fixture provided by the pytester plugin of pytest.
See https://docs.pytest.org/en/latest/reference.html#testdir for details.
"""
import pytest
def test_help_message(testdir):
"""
Test that the Yagot plugin's options appear in the pytest help message.
"""
result = testdir.runpytest(
'--help',
)
result.stdout.fnmatch_lines([
'*Yagot:',
'* --yagot*',
'* --yagot-leaks-only*',
'* --yagot-ignore-types=*',
])
assert result.ret == 0
def test_disabled(testdir):
"""
Test with the Yagot plugin disabled.
"""
test_code = """
def test_clean():
_ = dict()
"""
testdir.makepyfile(test_code)
result = testdir.runpytest()
assert result.ret == 0
def test_collected_clean(testdir):
"""
Test with the Yagot plugin enabled for collected objects but no collected
objects produced.
"""
test_code = """
def test_clean():
_ = dict()
"""
testdir.makepyfile(test_code)
result = testdir.runpytest('--yagot')
assert result.ret == 0
def test_uncollectable_clean(testdir):
"""
Test with the Yagot plugin enabled for uncollectable objects but no
uncollectable objects produced.
"""
test_code = """
def test_clean():
_ = dict()
"""
testdir.makepyfile(test_code)
result = testdir.runpytest('--yagot', '--yagot-leaks-only')
assert result.ret == 0
def test_collected_selfref(testdir):
"""
Test with the Yagot plugin enabled for collected objects and collected
objects produced as self-referencing dict.
"""
test_code = """
def test_clean():
d1 = dict()
d1['self'] = d1
"""
testdir.makepyfile(test_code)
result = testdir.runpytest('--yagot')
result.stdout.fnmatch_lines([
'*There were 1 collected or uncollectable object(s) '
'caused by function test_collected_selfref.py::test_clean*',
])
assert result.ret == 1
def test_collected_selfref_ignored(testdir):
"""
Test with the Yagot plugin enabled for collected objects and collected
objects produced as self-referencing dict, ignoring dict types.
"""
test_code = """
def test_clean():
d1 = dict()
d1['self'] = d1
"""
testdir.makepyfile(test_code)
result = testdir.runpytest('--yagot', '--yagot-ignore-types=dict,list')
assert result.ret == 0
def test_collected_selfref_failed(testdir):
"""
Test with the Yagot plugin enabled for collected objects and collected
objects produced as self-referencing dict, but testcase failed.
"""
test_code = """
def test_fail():
d1 = dict()
d1['self'] = d1
assert False
"""
testdir.makepyfile(test_code)
result = testdir.runpytest('--yagot')
result.stdout.fnmatch_lines([
'*test_collected_selfref_failed.py:4: AssertionError*',
])
assert result.ret == 1
@pytest.mark.xfail(reason="Increased reference count is not detected")
def test_uncollectable_incref(testdir):
"""
Test with the Yagot plugin enabled for uncollectable objects and
uncollectable object produced with increased reference count.
"""
test_code = """
import sys
import gc
import yagot
import test_leaky
def test_leak():
l1 = [1, 2]
assert gc.is_tracked(l1)
assert sys.getrefcount(l1) == 2
test_leaky.incref(l1)
assert sys.getrefcount(l1) == 3
"""
testdir.makepyfile(test_code)
result = testdir.runpytest('--yagot', '--yagot-leaks-only')
result.stdout.fnmatch_lines([
'*There were 1 uncollectable object(s) '
'caused by function test_leak.py::test_leak*',
])
assert result.ret == 1
| 2.328125 | 2 |
nextgen/bcbio/distributed/lsf.py | bgruening/bcbb | 339 | 12759199 | <gh_stars>100-1000
"""Commandline interaction with LSF schedulers.
"""
import re
import subprocess
_jobid_pat = re.compile("Job <(?P<jobid>\d+)> is")
def submit_job(scheduler_args, command):
"""Submit a job to the scheduler, returning the supplied job ID.
"""
cl = ["bsub"] + scheduler_args + command
status = subprocess.check_output(cl)
match = _jobid_pat.search(status)
return match.groups("jobid")[0]
def stop_job(jobid):
cl = ["bkill", jobid]
subprocess.check_call(cl)
def are_running(jobids):
"""Check if all of the submitted job IDs are running.
"""
run_info = subprocess.check_output(["bjobs"])
running = []
for parts in (l.split() for l in run_info.split("\n") if l.strip()):
if len(parts) >= 3:
pid, _, status = parts[:3]
if status.lower() in ["run"]:
running.append(pid)
want_running = set(running).intersection(set(jobids))
return len(want_running) == len(jobids)
| 2.734375 | 3 |
03_picnic/picnic.py | Fleid/tiny_python_projects | 0 | 12759200 | #!/usr/bin/env python3
"""
Author : fleide <<EMAIL>>
Date : 2020-12-04
Purpose: Picnic
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Rock the Casbah',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('foods',
metavar='foods',
nargs='+', # we can take more than one
help='Item(s) to bring')
parser.add_argument('-s',
'--sorted',
help='Sort the items (default: False)',
action='store_true') # magic!
return parser.parse_args()
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
sorted = args.sorted
foods = args.foods
if sorted:
foods.sort()
bringing = ''
if (len(foods) == 1):
bringing = foods[0]
elif (len(foods) == 2):
bringing = f'{foods[0]} and {foods[1]}'
else:
last_food = foods.pop(-1)
bringing = ', '.join(foods) + f', and {last_food}'
print(f'You are bringing {bringing}.')
# --------------------------------------------------
if __name__ == '__main__':
main()
| 3.390625 | 3 |
eth2_sim/simulation/sim_config.py | hwwhww/trinity | 2 | 12759201 |
class Config(object):
# Measuration Parameters
TOTAL_TICKS = 80
PRECISION = 1
INITIAL_TIMESTAMP = 1
# Acceleration Parameters
MINIMIZE_CHECKING = True
GENERATE_STATE = False
LOGGING_NETWORK = False
# SPEC Parameters
SLOTS_PER_EPOCH = 8
# System Parameters
NUM_VALIDATORS = 8
# Network Parameters
LATENCY = 1.5 / PRECISION
RELIABILITY = 1.0
NUM_PEERS = 10
SHARD_NUM_PEERS = 5
TARGET_TOTAL_TPS = 1
MEAN_TX_ARRIVAL_TIME = ((1 / TARGET_TOTAL_TPS) * PRECISION) * NUM_VALIDATORS
# Validator Parameters
TIME_OFFSET = 1
PROB_CREATE_BLOCK_SUCCESS = 0.999
DISCONNECT_THRESHOLD = 5
| 1.695313 | 2 |
plays/add_test_data.py | shchigi/lfl | 0 | 12759202 | <filename>plays/add_test_data.py
# -*- coding: utf-8 -*-
__author__ = 'rakot'
import datetime
from models import Person, Team, Match, Card, Goal
def add_test_data():
mipt_team = Team(name=u"Физтех", start_date=datetime.date.today())
mipt_team.save()
anytime_team = Team(name=u"Энитайм", start_date=datetime.date.today())
anytime_team.save()
den = Person(first_name=u"Денис",
last_name=u"Щигельский",
position=Person.BACK,
is_active=True,
is_captain=False,
start_date=datetime.date.today(),
cell_phone=u"+79151164158",
email=u"<EMAIL>",
team=mipt_team)
den.save()
stan = Person(first_name=u"Илья",
last_name=u"Станиславский",
position=Person.HALFBACK,
is_active=True,
is_captain=False,
start_date=datetime.date.today(),
cell_phone=u"+79670614948",
team=mipt_team)
stan.save()
burov = Person(first_name=u"Александр",
last_name=u"Буров",
position=Person.FORWARD,
is_active=True,
is_captain=True,
start_date=datetime.date.today(),
cell_phone=u"89197711249",
team=mipt_team)
burov.save()
ahyan = Person(first_name=u"Ара",
last_name=u"Ахян",
position=Person.FORWARD,
is_active=True,
is_captain=True,
start_date=datetime.date.today(),
cell_phone=u"89123711249",
team=anytime_team)
ahyan.save()
mipt_anytime = Match(date=datetime.date.today(),
time=datetime.datetime.now(),
home_team=mipt_team,
guest_team=anytime_team,
guest_team_score=0,
home_team_score=0)
mipt_anytime.save()
anytime_mipt = Match(date=datetime.date.today().replace(day=30),
time=datetime.datetime.now(),
home_team=anytime_team,
guest_team=mipt_team,
guest_team_score=0,
home_team_score=0)
anytime_mipt.save()
g1 = Goal(player_scored=stan,
player_assisted=burov,
own_goal=False,
match=mipt_anytime,
minute=11,
is_penalty=False)
g1.save()
print mipt_anytime.home_team_score, mipt_anytime.guest_team_score
g2 = Goal(player_scored=stan,
player_assisted=den,
own_goal=False,
match=mipt_anytime,
minute=15,
is_penalty=False)
g2.save()
print mipt_anytime.home_team_score, mipt_anytime.guest_team_score
g3 = Goal(player_scored=burov,
own_goal=True,
match=mipt_anytime,
minute=58,
is_penalty=False)
g3.save()
print mipt_anytime.home_team_score, mipt_anytime.guest_team_score
g4 = Goal(player_scored=ahyan,
own_goal=False,
match=mipt_anytime,
minute=59,
is_penalty=False)
g4.save()
print mipt_anytime.home_team_score, mipt_anytime.guest_team_score
card1 = Card(type='Y',
person=den,
minute=24)
card1.save() | 2.046875 | 2 |
tests/test_find_recipes.py | y4izus/recipe-manager | 0 | 12759203 | <gh_stars>0
import unittest
from bs4 import BeautifulSoup
from find_recipes import get_recipes_with, get_recipes_info
class TestFindRecipesMethods(unittest.TestCase):
def test_get_recipes_with_food(self):
'''should show the first 10 recipe <a> links for berenjenas'''
food = 'berenjenas'
recipes_anchors = get_recipes_with(food)
recipes_anchors_bs4 = set(map(lambda a: BeautifulSoup(a).a, [
'<a href="/robot-cocina/recetas/verduras/berenjenas-la-crema-con-gambas-y-jamon">Berenjenas a la crema con gambas y jamón</a>',
'<a href="/robot-cocina/recetas/verduras/berenjenas-rellenas">Berenjenas rellenas</a>',
'<a href="/robot-cocina/recetas/verduras/berenjenas-rellenas-la-mallorquina">Berenjenas rellenas a la mallorquina</a>',
'<a href="/robot-cocina/recetas/sopas-y-cremas/crema-de-berenjenas-y-puerro">Crema de berenjenas y puerro</a>',
'<a href="/robot-cocina/recetas/pates/pate-de-berenjenas">Paté de berenjenas</a>',
'<a href="/robot-cocina/recetas/huevos/tortilla-de-berenjenas">Tortilla de berenjenas</a>',
]))
self.assertEqual(recipes_anchors, recipes_anchors_bs4)
def test_get_recipes_info(self):
'''should get the nutritial information of the recipes'''
recipe_tag_str = '<a href="/robot-cocina/recetas/huevos/tortilla-de-berenjenas">Tortilla de berenjenas</a>'
recipe_tag = BeautifulSoup(recipe_tag_str, "html.parser")
recipes_info = get_recipes_info(recipe_tag)
self.assertEqual(
recipes_info,
[
{'title': 'Tortilla de berenjenas',
'url': '/robot-cocina/recetas/huevos/tortilla-de-berenjenas',
'kcal': '293.00',
'fats': '25.50',
'proteins': '8.63',
'carbohydrates': '5.20',
'fiber': '3.80'}
]
)
if __name__ == '__main__':
unittest.main()
| 2.796875 | 3 |
vendor-local/lib/python/billiard/synchronize.py | Mozilla-GitHub-Standards/6f0d85288b5b0ef8beecb60345173dc14c98e40f48e1307a444ab1e08231e695 | 1 | 12759204 | <filename>vendor-local/lib/python/billiard/synchronize.py
#
# Module implementing synchronization primitives
#
# multiprocessing/synchronize.py
#
# Copyright (c) 2006-2008, <NAME>
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
__all__ = [
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event',
]
import itertools
import os
import signal
import sys
import threading
from time import time as _time
from ._ext import _billiard, ensure_SemLock
from .process import current_process
from .util import Finalize, register_after_fork, debug
from .forking import assert_spawning, Popen
from .compat import bytes, closerange
# Try to import the mp.synchronize module cleanly, if it fails
# raise ImportError for platforms lacking a working sem_open implementation.
# See issue 3770
ensure_SemLock()
#
# Constants
#
RECURSIVE_MUTEX, SEMAPHORE = range(2)
SEM_VALUE_MAX = _billiard.SemLock.SEM_VALUE_MAX
try:
sem_unlink = _billiard.SemLock.sem_unlink
except AttributeError:
sem_unlink = None
#
# Base class for semaphores and mutexes; wraps `_billiard.SemLock`
#
def _semname(sl):
try:
return sl.name
except AttributeError:
pass
class SemLock(object):
_counter = itertools.count()
def __init__(self, kind, value, maxvalue):
from .forking import _forking_is_enabled
unlink_immediately = _forking_is_enabled or sys.platform == 'win32'
if sem_unlink:
sl = self._semlock = _billiard.SemLock(
kind, value, maxvalue, self._make_name(), unlink_immediately)
else:
sl = self._semlock = _billiard.SemLock(kind, value, maxvalue)
debug('created semlock with handle %s', sl.handle)
self._make_methods()
if sem_unlink:
if sys.platform != 'win32':
def _after_fork(obj):
obj._semlock._after_fork()
register_after_fork(self, _after_fork)
if _semname(self._semlock) is not None:
# We only get here if we are on Unix with forking
# disabled. When the object is garbage collected or the
# process shuts down we unlink the semaphore name
Finalize(self, sem_unlink, (self._semlock.name,),
exitpriority=0)
# In case of abnormal termination unlink semaphore name
_cleanup_semaphore_if_leaked(self._semlock.name)
def _make_methods(self):
self.acquire = self._semlock.acquire
self.release = self._semlock.release
def __enter__(self):
return self._semlock.__enter__()
def __exit__(self, *args):
return self._semlock.__exit__(*args)
def __getstate__(self):
assert_spawning(self)
sl = self._semlock
state = (Popen.duplicate_for_child(sl.handle), sl.kind, sl.maxvalue)
try:
state += (sl.name, )
except AttributeError:
pass
return state
def __setstate__(self, state):
self._semlock = _billiard.SemLock._rebuild(*state)
debug('recreated blocker with handle %r', state[0])
self._make_methods()
@staticmethod
def _make_name():
return '/%s-%s-%s' % (current_process()._semprefix,
os.getpid(), SemLock._counter.next())
class Semaphore(SemLock):
def __init__(self, value=1):
SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)
def get_value(self):
return self._semlock._get_value()
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<Semaphore(value=%s)>' % value
class BoundedSemaphore(Semaphore):
def __init__(self, value=1):
SemLock.__init__(self, SEMAPHORE, value, value)
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<BoundedSemaphore(value=%s, maxvalue=%s)>' % \
(value, self._semlock.maxvalue)
class Lock(SemLock):
'''
Non-recursive lock.
'''
def __init__(self):
SemLock.__init__(self, SEMAPHORE, 1, 1)
def __repr__(self):
try:
if self._semlock._is_mine():
name = current_process().name
if threading.currentThread().name != 'MainThread':
name += '|' + threading.currentThread().name
elif self._semlock._get_value() == 1:
name = 'None'
elif self._semlock._count() > 0:
name = 'SomeOtherThread'
else:
name = 'SomeOtherProcess'
except Exception:
name = 'unknown'
return '<Lock(owner=%s)>' % name
class RLock(SemLock):
'''
Recursive lock
'''
def __init__(self):
SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1)
def __repr__(self):
try:
if self._semlock._is_mine():
name = current_process().name
if threading.currentThread().name != 'MainThread':
name += '|' + threading.currentThread().name
count = self._semlock._count()
elif self._semlock._get_value() == 1:
name, count = 'None', 0
elif self._semlock._count() > 0:
name, count = 'SomeOtherThread', 'nonzero'
else:
name, count = 'SomeOtherProcess', 'nonzero'
except Exception:
name, count = 'unknown', 'unknown'
return '<RLock(%s, %s)>' % (name, count)
class Condition(object):
'''
Condition variable
'''
def __init__(self, lock=None):
self._lock = lock or RLock()
self._sleeping_count = Semaphore(0)
self._woken_count = Semaphore(0)
self._wait_semaphore = Semaphore(0)
self._make_methods()
def __getstate__(self):
assert_spawning(self)
return (self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore)
def __setstate__(self, state):
(self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore) = state
self._make_methods()
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def _make_methods(self):
self.acquire = self._lock.acquire
self.release = self._lock.release
def __repr__(self):
try:
num_waiters = (self._sleeping_count._semlock._get_value() -
self._woken_count._semlock._get_value())
except Exception:
num_waiters = 'unkown'
return '<Condition(%s, %s)>' % (self._lock, num_waiters)
def wait(self, timeout=None):
assert self._lock._semlock._is_mine(), \
'must acquire() condition before using wait()'
# indicate that this thread is going to sleep
self._sleeping_count.release()
# release lock
count = self._lock._semlock._count()
for i in xrange(count):
self._lock.release()
try:
# wait for notification or timeout
ret = self._wait_semaphore.acquire(True, timeout)
finally:
# indicate that this thread has woken
self._woken_count.release()
# reacquire lock
for i in xrange(count):
self._lock.acquire()
return ret
def notify(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
if self._sleeping_count.acquire(False): # try grabbing a sleeper
self._wait_semaphore.release() # wake up one sleeper
self._woken_count.acquire() # wait for sleeper to wake
# rezero _wait_semaphore in case a timeout just happened
self._wait_semaphore.acquire(False)
def notify_all(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify*() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
sleepers = 0
while self._sleeping_count.acquire(False):
self._wait_semaphore.release() # wake up one sleeper
sleepers += 1
if sleepers:
for i in xrange(sleepers):
self._woken_count.acquire() # wait for a sleeper to wake
# rezero wait_semaphore in case some timeouts just happened
while self._wait_semaphore.acquire(False):
pass
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
class Event(object):
def __init__(self):
self._cond = Condition(Lock())
self._flag = Semaphore(0)
def is_set(self):
self._cond.acquire()
try:
if self._flag.acquire(False):
self._flag.release()
return True
return False
finally:
self._cond.release()
def set(self):
self._cond.acquire()
try:
self._flag.acquire(False)
self._flag.release()
self._cond.notify_all()
finally:
self._cond.release()
def clear(self):
self._cond.acquire()
try:
self._flag.acquire(False)
finally:
self._cond.release()
def wait(self, timeout=None):
self._cond.acquire()
try:
if self._flag.acquire(False):
self._flag.release()
else:
self._cond.wait(timeout)
if self._flag.acquire(False):
self._flag.release()
return True
return False
finally:
self._cond.release()
if sys.platform != 'win32':
#
# Protection against unlinked semaphores if the program ends abnormally
# and forking has been disabled.
#
def _cleanup_semaphore_if_leaked(name):
name = name.encode('ascii') + bytes('\0', 'ascii')
if len(name) > 512:
# posix guarantees that writes to a pipe of less than PIPE_BUF
# bytes are atomic, and that PIPE_BUF >= 512
raise ValueError('name too long')
fd = _get_unlinkfd()
bits = os.write(fd, name)
assert bits == len(name)
def _get_unlinkfd():
cp = current_process()
if cp._unlinkfd is None:
r, w = os.pipe()
pid = os.fork()
if pid == 0:
try:
from setproctitle import setproctitle
setproctitle("[sem_cleanup for %r]" % cp.pid)
except:
pass
# Fork a process which will survive until all other processes
# which have a copy of the write end of the pipe have exited.
# The forked process just collects names of semaphores until
# EOF is indicated. Then it tries unlinking all the names it
# has collected.
_collect_names_then_unlink(r)
os._exit(0)
os.close(r)
cp._unlinkfd = w
return cp._unlinkfd
def _collect_names_then_unlink(r):
# protect the process from ^C and "killall python" etc
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
# close all fds except r
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
closerange(0, r)
closerange(r + 1, MAXFD)
# collect data written to pipe
data = []
while 1:
try:
s = os.read(r, 512)
except:
# XXX IO lock might be held at fork, so don't try
# printing unexpected exception - see issue 6721
pass
else:
if not s:
break
data.append(s)
# attempt to unlink each collected name
for name in bytes('', 'ascii').join(data).split(bytes('\0', 'ascii')):
try:
sem_unlink(name.decode('ascii'))
except:
# XXX IO lock might be held at fork, so don't try
# printing unexpected exception - see issue 6721
pass
| 2.09375 | 2 |
pronto/xref.py | flying-sheep/pronto | 182 | 12759205 | """Cross-reference object definition.
"""
import typing
import fastobo
from .utils.meta import roundrepr, typechecked
__all__ = ["Xref"]
@roundrepr
class Xref(object):
"""A cross-reference to another document or resource.
Cross-references (xrefs for short) can be used to back-up definitions of
entities, synonyms, or to link ontological entities to other resources
they may have been derived from. Although originally intended to provide
links to databases, cross-references in OBO ontologies gained additional
purposes, such as helping for header macros expansion, or being used to
alias external relationships with local unprefixed IDs.
The OBO format version 1.4 expects references to be proper OBO identifiers
that can be translated to actual IRIs, which is a breaking change from the
previous format. Therefore, cross-references are encouraged to be given as
plain IRIs or as prefixed IDs using an ID from the IDspace mapping defined
in the header.
Example:
A cross-reference in the Mammalian Phenotype ontology linking a term
to some related Web resource:
>>> mp = pronto.Ontology.from_obo_library("mp.obo")
>>> mp["MP:0030151"].name
'abnormal buccinator muscle morphology'
>>> mp["MP:0030151"].xrefs
frozenset({Xref('https://en.wikipedia.org/wiki/Buccinator_muscle')})
Caution:
`Xref` instances compare only using their identifiers; this means it
is not possible to have several cross-references with the same
identifier and different descriptions in the same set.
Todo:
Make sure to resolve header macros for xrefs expansion (such as
``treat-xrefs-as-is_a``) when creating an ontology, or provide a
method on `~pronto.Ontology` doing so when called.
"""
id: str
description: typing.Optional[str]
__slots__ = ("__weakref__", "id", "description") # noqa: E0602
@typechecked()
def __init__(self, id: str, description: typing.Optional[str] = None):
"""Create a new cross-reference.
Arguments:
id (str): the identifier of the cross-reference, either as a URL,
a prefixed identifier, or an unprefixed identifier.
description (str or None): a human-readable description of the
cross-reference, if any.
"""
# check the id is valid using fastobo
if not fastobo.id.is_valid(id):
raise ValueError("invalid identifier: {}".format(id))
self.id: str = id
self.description = description
def __eq__(self, other: object) -> bool:
if isinstance(other, Xref):
return self.id == other.id
return False
def __gt__(self, other: object) -> bool:
if isinstance(other, Xref):
return self.id > other.id
return NotImplemented
def __ge__(self, other: object) -> bool:
if isinstance(other, Xref):
return self.id >= other.id
return NotImplemented
def __lt__(self, other: object) -> bool:
if isinstance(other, Xref):
return self.id < other.id
return NotImplemented
def __le__(self, other: object) -> bool:
if isinstance(other, Xref):
return self.id <= other.id
return NotImplemented
def __hash__(self):
return hash(self.id)
| 2.796875 | 3 |
hyperplan/hpcmd.py | hyperplan-io/cli | 1 | 12759206 | <filename>hyperplan/hpcmd.py
from cmd import Cmd
from hyperplan.api import Api
from hyperplan.features_descriptors import create_features, describe_feature, list_features
from hyperplan.labels_descriptors import create_labels, describe_label, list_labels
from hyperplan.project import create_project, list_projects, update_project, describe_project, delete_project
from hyperplan.algorithm import create_algorithm
from hyperplan.predict import predict
import logging
class HyperplanPrompt(Cmd):
prompt = 'hyperplan> '
intro = "hyperplan-cli, Type ? to list commands"
def __init__(self, api, logger):
Cmd.__init__(self)
self.api = api
self.logger = logger
def do_exit(self, inp):
print("Bye")
raise Exception('')
def do_login(self, inp):
self.api.authenticate(self.logger)
def help_list(self):
print('list requires an argument: features, labels, algorithms, projects')
def complete_list(self, text, line, begidx, endidx):
return [i
for i in ('features', 'labels', 'algorithms', 'projects')
if i.startswith(text)]
def help_debug(self):
print('debug requires true or false')
def do_loglevel(self, inp):
args = inp.split(' ')
if len(args) > 0 and args[0] != '':
arg = args[0]
if arg.lower() == 'error':
self.logger.setLevel(level=logging.ERROR)
logging.basicConfig(level=logging.ERROR)
print('log level updated')
elif arg.lower() == 'warn':
self.logger.setLevel(level=logging.WARN)
logging.basicConfig(level=logging.WARN)
print('log level updated')
elif arg.lower() == 'info':
self.logger.setLevel(level=logging.INFO)
logging.basicConfig(level=logging.INFO)
print('log level updated')
elif arg.lower() == 'debug':
self.logger.setLevel(level=logging.DEBUG)
logging.basicConfig(level=logging.DEBUG)
print('log level updated')
else:
print('{} is not a valid log level'.format(arg))
else:
self.help_debug()
def do_list(self, inp):
args = inp.split(' ')
if len(args) > 0 and args[0] != '':
arg = args[0]
if arg == 'features':
list_features(self.api, self.logger)
elif arg == 'labels':
list_labels(self.api, self.logger)
elif arg == 'algorithms':
pass
elif arg == 'projects':
list_projects(self.api, self.logger)
else:
print('Unknown argument {}'.format(arg))
else:
self.help_list()
def help_describe(self):
print('describe requires the entity type (feature, label, algorithm, project) and the entity id')
def help_create(self, example=None):
if example == None:
print('create requires an argument: feature, label, algorithm, project and the id')
print('example: create feature myFeature')
else:
print('create requires an argument: feature, label, algorithm, project and the id')
print('example: create {} my{}'.format(example, example.capitalize()))
def help_update(self):
print('update requires an argument: project and the id')
print('example: update project myProject')
def complete_describe(self, text, line, begidx, endidx):
return [i
for i in ('feature', 'label', 'algorithm', 'project')
if i.startswith(text)]
def complete_create(self, text, line, begidx, endidx):
return [i
for i in ('feature', 'label', 'algorithm', 'project')
if i.startswith(text)]
def complete_update(self, text, line, begidx, endidx):
return [i
for i in (['project'])
if i.startswith(text)]
def do_create(self, inp):
args = inp.split(' ')
if len(args) > 1 and args[0] != '' and args[1] != '':
arg = args[0]
entity_id = args[1]
if arg == 'feature':
create_features(self.api, self.logger, entity_id)
elif arg == 'label':
create_labels(self.api, self.logger, entity_id)
elif arg == 'algorithm':
create_algorithm(self.api, self.logger, entity_id)
elif arg == 'project':
create_project(self.api, self.logger, entity_id)
else:
print('Unknown argument {}'.format(arg))
elif len(args) == 1 and args[0] != '':
self.help_create(example=args[0])
else:
self.help_create(example=None)
def do_update(self, inp):
args = inp.split(' ')
if len(args) > 1 and args[0] != '' and args[1] != '':
arg = args[0]
entity_id = args[1]
update_project(self.api, self.logger, entity_id)
else:
self.help_update()
def do_delete(self, inp):
args = inp.split(' ')
if len(args) > 1 and args[0] != '' and args[1] != '':
arg = args[0]
entity_id = args[1]
delete_project(self.api, self.logger, entity_id)
else:
self.help_delete()
def do_describe(self, inp):
args = inp.split(' ')
if len(args) > 1 and args[0] != ''and args[1] != '':
arg = args[0]
entity_id = args[1]
if arg == 'feature':
describe_feature(self.api, self.logger, entity_id)
elif arg == 'label':
describe_label(self.api, self.logger, entity_id)
elif arg == 'algorithm':
pass
elif arg == 'project':
describe_project(self.api, self.logger, entity_id)
else:
print('Unknown argument {}'.format(arg))
else:
self.help_describe()
def help_predict(self):
print('predict requires a project id as argument')
def do_predict(self, inp):
args = inp.split(' ')
if len(args) > 0 and args[0] != '':
project_id = args[0]
prediction = predict(self.api, self.logger, project_id, log=True)
else:
self.help_predict()
def help_exit(self):
print('exit the application. Shorthand: x q Ctrl-D.')
def default(self, inp):
if inp == 'x' or inp == 'q':
return self.do_exit(inp)
do_EOF = do_exit
help_EOF = help_exit
| 2.359375 | 2 |
bin/old/mapview2tiling.py | PapenfussLab/Srtools | 0 | 12759207 | <gh_stars>0
#!/usr/bin/env python
"""
mapview2tiling.py
Convert mapview format to a tiling
Author: <NAME>
Date: Mon Jun 16 15:17:20 EST 2008
"""
import os, sys, copy
from useful import progressMessage
from gff import Feature
if '-h' in sys.argv:
sys.exit(__doc__)
def loadChrSizes(filename):
data = {}
for line in open(filename):
tokens = line.strip().split('\t')
data[tokens[0]] = int(tokens[1])
return data
iFilename = '/Users/papenfuss/databases/platypus/venom/solexa/mapview_filtered.txt' # sys.argv[1]
oFilename = 'tiling.txt'
tileSize = 35
chrSizeFilename = '/Users/papenfuss/databases/chromSizes/ornAna5.txt'
chrSizes = loadChrSizes(chrSizeFilename)
iFile = open(iFilename)
headers = iFile.readline().strip().split('\t')
oFile = open(oFilename, 'w')
format = "%s\t%i\t%i"
chrom = None
lastChrom = None
countDict = {}
for i,line in enumerate(iFile):
if (i % 1000)==0:
progressMessage('# reads %s', i, 28000000)
tokens = line.strip().split('\t')
d = dict(zip(headers, tokens))
chrom = d['chrom']
if chrom=='MT':
continue
elif 'Ultra' in chrom or 'Contig' in chrom:
pass
else:
chrom = 'chr%s' % chrom
start = int(d['start'])
if chrom!=lastChrom and countDict:
print chrom
for _wStart in xrange(1, chrSizes[lastChrom], tileSize):
counts = countDict.get((lastChrom, _wStart), 0)
print >> oFile, format % (lastChrom, _wStart, counts)
oFile.flush()
countDict = {}
wStart = 1+tileSize*int((start-1)/tileSize)
key = (chrom, wStart)
try:
countDict[key] += 1
except KeyError:
countDict[key] = 1
lastChrom = copy.copy(chrom)
if countDict:
for _wStart in xrange(1, chrSizes[chrom], 35):
counts = countDict.get((chrom, _wStart), 0)
print >> oFile, format % (chrom, _wStart, counts)
oFile.close()
| 2.671875 | 3 |
blesuite/connection_manager.py | decidedlygray/BLESuite | 198 | 12759208 | from blesuite.pybt.roles import LECentral, LEPeripheral
from blesuite.pybt.core import Connection
from blesuite.pybt.gatt import UUID, AttributeDatabase, Server
from blesuite.pybt.gap import GAP
from blesuite.gatt_procedures import gatt_procedure_write_handle, gatt_procedure_write_handle_async, \
gatt_procedure_read_handle, gatt_procedure_read_handle_async, \
gatt_procedure_read_uuid, gatt_procedure_read_uuid_async, \
gatt_procedure_discover_primary_services, \
gatt_procedure_discover_secondary_services, \
gatt_procedure_discover_characteristics, \
gatt_procedure_discover_includes, \
gatt_procedure_discover_descriptors, gatt_procedure_prepare_write_handle, \
gatt_procedure_prepare_write_handle_async, gatt_procedure_execute_write, \
gatt_procedure_execute_write_async, gatt_procedure_write_command_handle, \
gatt_procedure_read_multiple_handles, \
gatt_procedure_read_multiple_handles_async, \
gatt_procedure_read_blob_handle, gatt_procedure_read_blob_handle_async
from blesuite.smart_scan import blesuite_smart_scan
from blesuite.entities.gatt_device import BLEDevice
from blesuite.event_handler import BTEventHandler
import logging
import gevent
import os
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
ROLE_CENTRAL = 0x00
ROLE_PERIPHERAL = 0x01
PUBLIC_DEVICE_ADDRESS = 0x00
RANDOM_DEVICE_ADDRESS = 0x01
class BLEConnection(object):
"""
BLEConnection is used to represent a connection between the BLEConnection manager
and a BLE device. This object is commonly returned to the user to represent a connection and is passed
to further BLEConnectionManager functions to interact with the connections.
:param address: The address of the peer BLEDevice that the HCI device is connected to.
:param address_type: The address type of the peer BLEDevice [Central = 0x00 | Peripheral = 0x01]
:param connection_handle: The connection handle used to interact with the associated peer BLE device.
:type address: str
:type address_type: int
:type connection_handle: int
"""
def __init__(self, address, address_type, connection_handle=None):
self.address = address
self.address_type = address_type
self.connection_handle = connection_handle
self.interval_min = None
self.interval_max = None
self.mtu = 23 # default as per spec
def __repr__(self):
return '<{} address={}, type={}>'.format(
self.__class__.__name__,
self.address,
{0: "random", 1: "public"}.get(self.address_type, "Unknown")
)
class BLEConnectionManager(object):
"""
BLEConnectionManager is used to manage connections to Bluetooth Low Energy Devices.
The connection manager is associated with an HCI device, such as a Bluetooth USB adapter,
and is responsible for creating the BLE stack and providing a user-friendly interface for
interacting with the BLE stack in order to send and receive packets.
:param adapter: BTLE adapter on host machine to use for connection (defaults to first found adapter). If left blank, the host's default adapter is used.
:param role: Type of role to create for the HCI device [central | peripheral]
:param our_address_type: Type of address for our Bluetooth Adapter. [public | random] (default: "public"). Note: We currently only support static random addresses, not resolvable or non-resolvable private addresses.
:param random_address: If our address type is set to random, supply a random address or one will be randomly generated ("AA:BB:CC:DD:EE:FF") (default: None)
:param psm: Specific PSM (default: 0)
:param mtu: Specific MTU (default: 23 as per spec BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part G] 5.2.1)
:param gatt_server: GATT Server from pybt. Used to assign a custom blesuite.pybt.gatt Server object as the server for a peripheral. Alternatively, by default if the peripheral role is configured, a GATT Server object will be created with no services or characteristics that the user can add to through BLEConnectionManager class methods.
:param event_handler: BTEventHandler class instance that will be called when packets are received by the blesuite.pybt.core packet routing class (SocketHandler).
:param att_operation_event_hook: ATT operation hook functions triggered when the ATT server receives an ATT request
:param att_security_event_hook: ATT security hook functions triggered when the ATT server receives an ATT request and security checks are made
:type att_security_event_hook: blesuite.event_handler.ATTSecurityHook
:type att_operation_event_hook: blesuite.event_handler.ATTEventHook
:type adapter: int
:type role: str
:type our_address_type: str
:type random_address: str
:type psm: int
:type mtu: int
:type gatt_server: Server
:type event_handler: BTEventHandler
"""
def __init__(self, adapter, role, our_address_type="public", random_address=None,
psm=0, mtu=23, gatt_server=None, event_handler=None, att_operation_event_hook=None,
att_security_event_hook=None):
self.role_name = role
self.adapter = adapter
self.requester = None
self.responses = []
self.response_counter = 0
self.psm = psm
self.mtu = mtu
self.gatt_server = gatt_server
self.event_handler = event_handler
self.att_operation_event_hook = att_operation_event_hook
self.att_security_event_hook = att_security_event_hook
self.address = None
self.our_address_type_name = our_address_type
if self.our_address_type_name.lower() == "random":
self.our_address_type = RANDOM_DEVICE_ADDRESS
else:
self.our_address_type = PUBLIC_DEVICE_ADDRESS
if self.our_address_type == RANDOM_DEVICE_ADDRESS and random_address is None:
self.random_address = ':'.join(map(lambda x: x.encode('hex'), os.urandom(6)))
elif self.our_address_type == RANDOM_DEVICE_ADDRESS:
self.random_address = random_address
else:
self.random_address = None
self.central = None
self.stack_connection = None
self.connections = []
if role is 'central':
logger.debug("creating central")
self._create_central()
logger.debug("creating PyBT connection")
self._create_stack_connection(ROLE_CENTRAL)
logger.debug("creating listeners")
self._start_listeners()
elif role is 'peripheral':
logger.debug("creating peripheral role")
self._create_peripheral()
logger.debug("creating PyBT connection")
self._create_stack_connection(ROLE_PERIPHERAL)
logger.debug("creating listeners")
self._start_listeners()
else:
logger.error("Unknown role: %s" % role)
raise RuntimeError("Unknown role: %s" % role)
self.address = self.role.stack.addr
def __enter__(self):
return self
def __del__(self):
if self.stack_connection is not None:
for connection in self.connections:
if self.stack_connection.is_connected(connection.connection_handle):
self.stack_connection.disconnect(connection.connection_handle, 0x16)
self.stack_connection.destroy()
self.stack_connection = None
def __exit__(self, exc_type, exc_val, exc_tb):
logger.debug("Exiting bleConnectionManager. exc_type:%s exc_val:%s exc_tb:%s" % (exc_type, exc_val, exc_tb))
if self.stack_connection is not None:
self.stack_connection.destroy()
self.stack_connection = None
if self.role is not None:
self.role.destroy()
self.role = None
def _create_central(self):
if self.adapter is None:
self.role = LECentral(address_type=self.our_address_type, random=self.random_address,
att_operation_event_hook=self.att_operation_event_hook)
else:
self.role = LECentral(adapter=self.adapter, address_type=self.our_address_type, random=self.random_address,
att_operation_event_hook=self.att_operation_event_hook)
def _create_peripheral(self):
if self.gatt_server is None:
self.attribute_db = AttributeDatabase(event_handler=self.att_security_event_hook)
self.gatt_server = Server(self.attribute_db)
self.gatt_server.set_mtu(self.mtu)
if self.adapter is None:
self.role = LEPeripheral(self.gatt_server, mtu=self.mtu, address_type=self.our_address_type,
random=self.random_address,
att_operation_event_hook=self.att_operation_event_hook)
else:
self.role = LEPeripheral(self.gatt_server, adapter=self.adapter, mtu=self.mtu,
address_type=self.our_address_type, random=self.random_address,
att_operation_event_hook=self.att_operation_event_hook)
def _create_stack_connection(self, role_type):
if self.event_handler is None:
self.event_handler = BTEventHandler(self)
self.stack_connection = Connection(self.role, role_type, self.event_handler)
def _start_listeners(self):
self.stack_connection.start()
def get_address(self):
""" Get the address of the HCI device represented by the BLEConnectionManager.
:return: The HCI device address
:rtype: str
"""
return self.address
def get_discovered_devices(self):
"""
Get a dictionary of address seen during a scan and the associated advertising data.
:return: Dictionary of seen addresses and advertising data
:rtype: dict {"<address>":(<addressTypeInt>, "<advertisingData>")}
"""
return self.stack_connection.seen
def set_event_handler(self, event_class):
"""
Set the BTEventHandler for the pybt.core.SocketHandler class that will trigger when a Bluetooth Event
is received by the stack.
:param event_class: Event handler class instance.
:type event_class: BTEventHandler
:return: Success state
:rtype: bool
"""
logger.debug("Trying to set event handler")
self.event_handler = event_class
if self.stack_connection.socket_handler is not None:
logger.debug("Stack connection found, setting event handler")
self.stack_connection.set_event_handler(event_class)
return True
return False
def set_att_operation_hook(self, event_class):
"""
Set the ATTEventHook for the pybt.att.AttributeProtocol class that will trigger when an ATT operation
against the ATT database running locally is received.
:param event_class: ATT event class hook instance.
:type event_class: ATTEventHook
:return: Success state
:rtype: bool
"""
logger.debug("Trying to set ATT operation hook")
self.att_operation_event_hook = event_class
self.role.att.event_handler = self.att_operation_event_hook
return True
def set_att_security_hook(self, event_class):
"""
Set the ATTSecurityHook for the pybt.gatt.AttributeDatabase class that will trigger when a security
check against an ATT operation acting on the ATT database occurs. These checks cover encryption,
authentication, and authorization.
:param event_class: ATT security event hook class instance.
:type event_class: ATTSecurityHook
:return: Success state
:rtype: bool
"""
logger.debug("Trying to set ATT security hook")
self.att_security_event_hook = event_class
if self.gatt_server is None:
logger.debug("No GATT server running, setting security hook failed.")
return False
self.gatt_server.db.att_security_hooks = self.att_security_event_hook
return True
def is_connected(self, connection):
""" Return whether the specified connection is connected to the peer device.
:return: Return connection status
:rtype: bool
"""
return self.stack_connection.is_connected(connection.connection_handle)
def init_connection(self, address, address_type):
"""
Create BLEConnection object that represents the host's connection to a BLE peripheral.
:param address: BD_ADDR of target BLE Peripheral
:param address_type: Address type of target BLE Peripheral [public | random]
:type address: string
:type address_type: string
:return: Return BLEConnection object that is used in any communication function.
:rtype: BLEConnection
"""
address = address.upper()
if address_type == "public":
address_type = PUBLIC_DEVICE_ADDRESS
elif address_type == "private":
address_type = RANDOM_DEVICE_ADDRESS
ble_connection = BLEConnection(address, address_type)
self.connections.append(ble_connection)
return ble_connection
def get_bleconnection_from_connection_handle(self, connection_handle):
"""
Lookup a BLEConnection based on a supplied connection handle value.
:param connection_handle: Connection handle used to look up an existing BLEConnection
:type connection_handle: int
:return: BLEConnection or None
:rtype: BLEConnection or None
"""
for connection in self.connections:
if connection.connection_handle is not None and connection.connection_handle == connection_handle:
return connection
return None
def connect(self, ble_connection, timeout=15):
"""
Initiate a connection with a peer BLEDevice.
:param ble_connection: BLEConnection that represents the connection between our HCI device and the peer
:type ble_connection: BLEConnection
:param timeout: Connection timeout in seconds (default: 15)
:type timeout: int
:return: Connected status
:rtype: bool
"""
import time
start = time.time()
if not self.stack_connection.is_connected(ble_connection.connection_handle):
request = self.stack_connection.connect(ble_connection.connection_handle, ble_connection.address,
kind=ble_connection.address_type)
while not request.has_response():
if timeout is not None and time.time() - start >= timeout:
logger.debug("Connection failed: Connection timeout reached.")
return False
logger.debug("Is not connected")
gevent.sleep(1)
ble_connection.connection_handle = request.response.conn_handle
logger.debug("Connected")
return True
def disconnect(self, connection, reason=0x16):
"""
Disconnect from a peer BLE device.
:param connection: BLEConnection to disconnect
:type connection: BLEConnection
:param reason: The reason for the disconnection (default: 0x16 - Connection terminated by local host). Reasons defined in BLUETOOTH SPECIFICATION Version 5.0 | Vol 2, Part E page 777
:type reason: int
"""
self.stack_connection.disconnect(connection.connection_handle, reason)
def pair(self, ble_connection, timeout=15):
"""
Initiate pairing with a peer BLE device. This method is blocking and will wait
until a paired connection is received, pairing fails, or the timeout is reached.
If custom pairing request parameters are required, configure
the parameters prior to calling this function.
:param ble_connection: The BLEConnection to initiate pairing on
:type ble_connection: BLEConnection
:param timeout: Pairing timeout in seconds (default: 15)
:type timeout: int
:return: Pairing status
:rtype: bool
"""
import time
self.initiate_pairing(ble_connection)
start = time.time()
while not self.role.smp.get_connection_encryption_status(ble_connection.connection_handle):
if self.role.smp.did_pairing_fail(ble_connection.address):
logger.debug("Pairing Failed")
return False
if timeout is not None and time.time() - start >= timeout:
return False
logger.debug("Pairing in progress. Pairing Failed: %s " % self.role.smp.did_pairing_fail(ble_connection.address))
gevent.sleep(1)
logger.debug("Paired")
return True
def initiate_pairing(self, ble_connection):
"""
Send pairing request to peer device. This is meant as an asynchronous way for a user to initiate pairing
and manage the connection while waiting for the pairing process to complete. Use BLEConnectionManager.pair
for a synchronous pairing procedure.
:param ble_connection: The BLEConnection to initiate pairing on
:type ble_connection: BLEConnection
:return:
:rtype:
"""
if not self.is_connected(ble_connection):
self.connect(ble_connection)
self.role.smp.send_pairing_request(ble_connection.address, ble_connection.connection_handle)
def is_pairing_in_progress(self, ble_connection):
"""
Retrieve pairing status of BLEConnection
:param ble_connection: The BLEConnection to view the pairing status of
:type ble_connection: BLEConnection
:return: Status of BLE pairing
:rtype: bool
"""
return self.role.smp.is_pairing_in_progress(ble_connection.address)
def did_pairing_fail(self, ble_connection):
"""
Lookup whether a pairing failed status was triggered
:param ble_connection: The BLEConnection to check for a pairing failure
:type ble_connection: BLEConnection
:return: Pairing failure status (True means failure was triggered)
:rtype: bool
"""
return self.role.smp.did_pairing_fail(ble_connection.address)
def is_connection_encrypted(self, ble_connection):
"""
Retrieve BLEConnection encryption status
:param ble_connection: The BLEConnection to check the encryption status of
:type ble_connection: BLEConnection
:return: Encryption status
:rtype: bool
"""
return self.role.smp.get_connection_encryption_status(ble_connection.connection_handle)
def resume_connection_encryption(self, ble_connection):
"""
Initiate BLEConnection encryption with encryption keys present in the Security Manager's LongTermKeyDatabase.
Encryption key look-up is done based on the address of the peer device's address.
:param ble_connection: The BLEConnection to resume encryption on
:type ble_connection: BLEConnection
:return: Result of encryption initiation with existing keys (True if encryption initiation was successfully start, False if encryption keys were not found)
:rtype: bool
"""
result = self.role.smp.initiate_encryption_with_existing_keys(ble_connection.address,
ble_connection.address_type,
ble_connection.connection_handle, self.address,
self.our_address_type, self.role)
return result
def get_security_manager_long_term_key_database(self):
"""
Retrieve the LongTermKeyDatabase from the Security Manager
:return: LongTermKeyDatabase from the Security Manager
:rtype: blesuite.pybt.sm.LongTermKeyDatabase
"""
return self.role.smp.long_term_key_db
def add_key_to_security_manager_long_term_key_database(self, address, address_type, ltk, ediv, rand, irk, csrk, security_mode,
security_level):
"""
Add an entry to the LongTermKeyDatabase that will be used for encryption key lookups when encryption
on a BLEConnection is initiated
:param address: Address of peer device (byte form, big-endian)
:type address: str
:param address_type: Address type of peer device
:type address_type: int
:param ltk: Long term key for peer (big-endian)
:type ltk: str
:param ediv: EDIV for peer. Required for LE Legacy encryption resumption
:type ediv: int
:param rand: Encryption Random for peer (big-endian). Required for LE Legacy encryption resumption
:type rand: str
:param irk: IRK for peer (big-endian)
:type irk: str
:param csrk: CSRK for peer
:type csrk: str
:param security_mode: Security mode associated with encryption keys. This mode will be applied to a connection encrypted with these keys.
:type security_mode: int
:param security_level: Security level associated with encryption keys. This level will be applied to a connection encrypted with these keys.
:type security_level: int
:return:
:rtype:
"""
self.role.smp.long_term_key_db.add_long_term_key_entry(address, address_type,
ltk, ediv, rand, irk, csrk, security_mode,
security_level)
def export_security_manager_long_term_key_database_for_storage(self):
"""
Export Security Manager LongTermKeyDatabase as a list of dictionary containing BLE
encryption properties (LTK, EDIV, random,
CSRK, IRK, security mode, security level) with integers and hex encoded strings
:return: LongTermKeyDatabase as a list of dictionaries with integers and hex encoded strings (user-friendly exportable version)
:rtype: dict
"""
ltk_db = self.role.smp.long_term_key_db.get_long_term_key_database()
for entry in ltk_db:
temp = entry['address']
if temp is not None:
temp = temp.encode('hex')
entry['address'] = temp
temp = entry['ltk']
if temp is not None:
temp = temp.encode('hex')
entry['ltk'] = temp
temp = entry['rand']
if temp is not None:
temp = temp.encode('hex')
entry['rand'] = temp
temp = entry['irk']
if temp is not None:
temp = temp.encode('hex')
entry['irk'] = temp
temp = entry['csrk']
if temp is not None:
temp = temp.encode('hex')
entry['csrk'] = temp
return ltk_db
def import_long_term_key_database_to_security_manager(self, long_term_key_database):
"""
Import LongTermKeyDatabase and apply it to the Security Manager. Import database format is identical
to the LongTermKeyDatabase export format with integers and hex encoded strings. The function will perform
some input validation to ensure proper encoding and value types.
:param long_term_key_database: List of dictionaries of LongTermKeyDatabase entries with integers and hex encoded strings
:type long_term_key_database: list of dict
:return:
:rtype:
"""
import blesuite.utils.validators as validator
for entry in long_term_key_database:
keys = entry.keys()
if 'address' in keys:
peer_address = entry['address'].decode('hex')
else:
peer_address = "00" * 6
if 'address_type' in keys:
peer_address_type = entry['address_type']
else:
peer_address_type = 0
if 'ltk' in keys:
ltk = validator.validate_ltk(entry['ltk']).decode('hex')
else:
raise validator.InvalidSMLTK(None)
if 'ediv' in keys:
ediv = entry['ediv']
else:
ediv = 0
if 'rand' in keys:
rand = validator.validate_rand(entry['rand']).decode('hex')
else:
rand = '\x00' * 8
if 'irk' in keys:
irk = validator.validate_irk(entry['irk']).decode('hex')
else:
irk = '\x00' * 16
if 'csrk' in keys:
csrk = validator.validate_csrk(entry['csrk']).decode('hex')
else:
csrk = '\x00' * 16
if 'security_mode' in keys:
mode = entry['security_mode']
else:
mode = 1
if 'security_level' in keys:
level = entry['security_level']
else:
level = 1
mode, level = validator.validate_att_security_mode(mode, level)
self.role.smp.long_term_key_db.add_long_term_key_entry(peer_address, peer_address_type, ltk, ediv, rand,
irk, csrk, mode, level)
def get_security_manager_protocol_default_pairing_parameters(self):
"""
Get the default pairing parameters that will be applied to Security Managers by default.
The pairing parameters are used by the devices to determine the type of pairing to use, the temporary key
sharing method (association model), and which keys will be exchanged when pairing is complete (if any).
See BLUETOOTH SPECIFICATION Version 5.0 | Vol 3, Part H
page 2340 - 2342 for more details.
(Security Managers are created per BLE connection and can be modified independently)
:return: {io_cap, oob, mitm, bond, lesc, keypress, ct2, rfu, max_key_size, initiator_key_distribution, responder_key_distribution}
:rtype: dict
"""
return self.role.smp.get_default_pairing_parameters()
def set_security_manager_protocol_default_pairing_parameters(self, default_io_cap=0x03, default_oob=0x00,
default_mitm=0x00,
default_bond=0x01, default_lesc=0x00,
default_keypress=0x00,
default_ct2=0x01, default_rfu=0x00,
default_max_key_size=16,
default_initiator_key_distribution=0x01,
default_responder_key_distribution=0x01):
"""
Set the default pairing parameters that will be applied to Security Managers by default.
The pairing parameters are used by the devices to determine the type of pairing to use, the temporary key
sharing method (association model), and which keys will be exchanged when pairing is complete (if any).
See BLUETOOTH SPECIFICATION Version 5.0 | Vol 3, Part H
page 2340 - 2342 for more details.
(Security Managers are created per BLE connection and can be modified independently)
:param default_io_cap: IO Capabilities (default: 0x03 - No Input, No Output)
:type default_io_cap: int
:param default_oob: Out-of-band Data present and available (default: 0x00)
:type default_oob: int
:param default_mitm: Request man-in-the-middle pairing protections (default: 0x01)
:type default_mitm: int
:param default_bond: Request bonding (default: 0x01)
:type default_bond: int
:param default_lesc: LE Secure Connections supported (default: 0x00)
:type default_lesc: int
:param default_keypress: Keypress notifications (default: 0x00)
:type default_keypress: int
:param default_ct2: CT2 (default: 0x01)
:type default_ct2: int
:param default_rfu: Reserved for future use bits (default: 0x00)
:type default_rfu: int
:param default_max_key_size: Max encryption key size (default: 16)
:type default_max_key_size: int
:param default_initiator_key_distribution: Requested keys to be sent by the initiator (central) (default: 0x01)
:type default_initiator_key_distribution: int
:param default_responder_key_distribution: Requested keys to be sent by the responder (peripheral) (default: 0x01)
:type default_responder_key_distribution: int
:return:
:rtype:
"""
self.role.smp.set_default_pairing_parameters(default_io_cap, default_oob, default_mitm, default_bond,
default_lesc, default_keypress, default_ct2, default_rfu,
default_max_key_size, default_initiator_key_distribution,
default_responder_key_distribution)
def get_security_manager_protocol_pairing_parameters_for_connection(self, ble_connection):
"""
Get the default pairing parameters for the Security Manager associated with a BLEConnection (based on the
peer address).
The pairing parameters are used by the devices to determine the type of pairing to use, the temporary key
sharing method (association model), and which keys will be exchanged when pairing is complete (if any).
See BLUETOOTH SPECIFICATION Version 5.0 | Vol 3, Part H
page 2340 - 2342 for more details.
:param ble_connection: BLEConnection to modify Security Manager pairing parameters of
:type ble_connection: BLEConnection
:return: {io_cap, oob, mitm, bond, lesc, keypress, ct2, rfu, max_key_size, initiator_key_distribution, responder_key_distribution}
:rtype: dict
"""
return self.role.smp.get_pairing_parameters_for_connection(ble_connection.address)
def set_security_manager_protocol_pairing_parameters_for_connection(self, ble_connection, io_cap=0x03, oob=0x00,
mitm=0x00,
bond=0x01, lesc=0x00, keypress=0x0, ct2=0x01,
rfu=0x00, max_key_size=16,
initiator_key_distribution=0x01,
responder_key_distribution=0x01):
"""
Set the default pairing parameters for the Security Manager associated with a BLEConnection (based on the
peer address).
The pairing parameters are used by the devices to determine the type of pairing to use, the temporary key
sharing method (association model), and which keys will be exchanged when pairing is complete (if any).
See BLUETOOTH SPECIFICATION Version 5.0 | Vol 3, Part H
page 2340 - 2342 for more details.
:param ble_connection: BLEConnection to modify Security Manager pairing parameters of
:type ble_connection: BLEConnection
:param io_cap: IO Capabilities (default: 0x03 - No Input, No Output)
:type io_cap: int
:param oob: Out-of-band Data present and available (default: 0x00)
:type oob: int
:param mitm: Request man-in-the-middle pairing protections (default: 0x01)
:type mitm: int
:param bond: Request bonding (default: 0x01)
:type bond: int
:param lesc: LE Secure Connections supported (default: 0x00)
:type lesc: int
:param keypress: Keypress notifications (default: 0x00)
:type keypress: int
:param ct2: CT2 (default: 0x01)
:type ct2: int
:param rfu: Reserved for future use bits (default: 0x00)
:type rfu: int
:param max_key_size: Max encryption key size (default: 16)
:type max_key_size: int
:param initiator_key_distribution: Requested keys to be sent by the initiator (central) (default: 0x01)
:type initiator_key_distribution: int
:param responder_key_distribution: Requested keys to be sent by the responder (peripheral) (default: 0x01)
:type responder_key_distribution: int
:return: Success status of pairing parameter configuration (False is returned if BLEConnection does not have a valid connection or a security manager set)
:rtype: bool
"""
return self.role.smp.set_pairing_parameters_for_connection(ble_connection.address, io_cap, oob, mitm,
bond, lesc, keypress, ct2, rfu, max_key_size,
initiator_key_distribution,
responder_key_distribution)
def decode_gap_data(self, data):
"""
Decode GAP data into GAP class object
:param data: GAP binary data
:type data: str
:return: GAP object containing the GAP data that has been parsed
:rtype: blesuite.pybt.gap.GAP
"""
gap = GAP()
try:
gap.decode(data)
except Exception as e:
if "Data too short" in str(e):
logger.debug("Data too short, leaving off malformed data")
else:
raise e
return gap
def generate_gap_data_dict(self, gap):
"""
Generates a dictionary of user-friendly strings that describe the GAP data in the supplied GAP object.
:param gap: GAP object to retrieve data from
:type gap: blesuite.pybt.gap.GAP
:return: Dictionary of readable strings that represent the GAP data stored in the object
:rtype: dict
"""
return gap.gap_dict()
# Scanning/Discovery Functions
def scan(self, timeout):
"""
Carry-out BLE scan for the specified timeout and return discovered devices.
:param timeout: Scan timeout in seconds
:type timeout: int
:return: Discovered devices
:rtype: dict
"""
import time
self.start_scan()
start = time.time() * 1000
logger.debug("Starting sleep loop")
# comparing time in ms
while ((time.time() * 1000) - start) < timeout:
logger.debug("Scanning...")
gevent.sleep(1)
self.stop_scan()
logger.debug("Done scanning!")
discovered_devices = self.get_discovered_devices()
return discovered_devices
def start_scan(self):
"""
Enable scanning on HCI device.
:return:
:rtype:
"""
self.stack_connection.scan("on")
def stop_scan(self):
"""
Stop scanning on HCI device
:return:
:rtype:
"""
self.stack_connection.scan("off")
def advertise_and_wait_for_connection(self):
"""
Begin advertising with the HCI device and wait for a connection to be established.
:return: Status of connection with a peer device and the BLEConnection
:rtype: tuple - bool, (BLEConnection | None)
"""
self.start_advertising()
while self.is_advertising():
gevent.sleep(1)
if len(self.stack_connection.connection_statuses.keys()) > 0:
connection_handle = self.stack_connection.connection_statuses.keys()[0]
peer_address = self.stack_connection.peer_addresses_by_connection_handle[connection_handle]
peer_address_type = self.stack_connection.connected_addr_type_by_connection_handle[connection_handle]
return True, BLEConnection(peer_address, peer_address_type, connection_handle=connection_handle)
else:
logger.error("Advertising stopped and no connections are present. Something went wrong.")
return False, None
def start_advertising(self):
"""
Enable advertising on HCI device.
:return:
:rtype:
"""
self.stack_connection.start_advertising()
def stop_advertising(self):
"""
Disable advertising on HCI device.
:return:
:rtype:
"""
self.stack_connection.stop_advertising()
def is_advertising(self):
"""
Retrieve advertising status of HCI device.
:return: Status of advertising
:rtype: bool
"""
return self.stack_connection.is_advertising()
def set_advertising_data(self, data):
"""
Set advertising data.
:param data: Data to include in advertising packets
:type data: str
:return:
:rtype:
"""
self.stack_connection.set_advertising_data(data)
def set_scan_response_data(self, data):
"""
Set scan response data.
:param data: Data to return when a scan packet is received.
:type data: str
:return:
:rtype:
"""
self.stack_connection.set_scan_response_data(data)
def set_advertising_parameters(self, advertisement_type, channel_map, interval_min, interval_max,
destination_addr, destination_addr_type):
"""
Set advertising parameters. See: BLUETOOTH SPECIFICATION Version 5.0 | Vol 2, Part E page 1251
:param advertisement_type: Advertising packet type (see blesuite.utils.GAP_ADV_TYPES)
:type advertisement_type: int
:param channel_map: Bit field that indicates the advertising channels to use. (Channel 37 - 0x01, Channel 38 - 0x02, Channel 39 - 0x04, all channels - 0x07)
:type channel_map: int
:param interval_min: Minimum advertising interval for undirected and low duty cycle directed advertising. (Range 0x00020 - 0x4000, default 0x0800 or 1.28 seconds. Time conversion = interval * 0.625ms)
:type interval_min: int
:param interval_max: Maximum advertising interval for undirected and low duty cycle directed advertising. (Range 0x00020 - 0x4000, default 0x0800 or 1.28 seconds. Time conversion = interval * 0.625ms)
:type interval_max: int
:param destination_addr: Destination address for directed advertising (set to 00:00:00:00:00:00 if using undirected advertising)
:type destination_addr: str
:param destination_addr_type: Destination address type (set to 0x00 if using undirected advertising)
:type destination_addr_type: int
:return:
:rtype:
"""
self.stack_connection.set_advertising_parameters(advertisement_type, channel_map, interval_min, interval_max,
destination_addr, destination_addr_type)
def set_local_name(self, name, enforce_null_termination=True):
"""
Set the local name of the HCI device. (Bluetooth Spec says the value needs to be null terminated. If it is
intended to write a string that is not null terminated, then set the enforcement flag to False).
:param name: Local name to write to HCI device
:type name: str
:param enforce_null_termination: Flag to enforce null termination (default: True)
:type enforce_null_termination: bool
:return:
:rtype:
"""
if enforce_null_termination:
if len(name) != 248:
padding = 248 - len(name)
name = name + ('\0' * padding)
self.stack_connection.set_local_name(name)
def get_gatt_server(self):
"""
Retrieve the GATT server for the BLEConnectionManager instance.
:return: GATT Server
:rtype: blesuite.pybt.gatt.Server
"""
return self.gatt_server
def set_server_mtu(self, mtu):
"""
Configures the MTU (max transmission unit) on the GATT server and ATT class instance. MTU is used
to restrict the size of data the stack returns in ATT packets. Note: The MTU used by the class
is determined by the MTUs exchanged by both connected BLE devices (uses the minimum value of the
exchanged MTUs).
:param mtu: MTU size in bytes (Bluetooth Spec default is 23 bytes)
:type mtu: int
:return:
:rtype:
"""
self.mtu = mtu
self.role.att.set_mtu(mtu)
def get_server_mtu(self):
"""
Returns the MTU size from the GATT server.
:return: GATT server MTU (bytes)
:rtype: int
"""
if self.role.att.gatt_server is not None:
return self.role.att.gatt_server.mtu
def initialize_gatt_server_from_ble_device(self, ble_device, use_handles_from_ble_device=False):
"""
Initializes the GATT server based on a supplied BLEDevice entity. All services, includes, characteristics,
and descriptors are retrieved from the BLEDevice entity and added to the GATT server using the
properties and permissions configured in the BLEDevice object.
:param ble_device: BLEDevice object to replicate with the GATT server
:type ble_device: BLEDevice
:param use_handles_from_ble_device: Flag to indicate that the GATT server should use the attribute handles specified in each BLE entity withhin the BLEDevice. If set to false (default), then the GATT server will automatically assign handles in the order that entites are added to the server.
:type use_handles_from_ble_device: bool
:return:
:rtype:
"""
from pybt.gatt import GATTService, GATTCharacteristic, GATTCharacteristicDescriptorDeclaration,\
GATTInclude, UUID
if self.gatt_server is None:
att_db = AttributeDatabase()
self.gatt_server = Server(att_db)
self.gatt_server.set_mtu(self.mtu)
for service in ble_device.get_services():
gatt_service = GATTService(UUID(service.attribute_type), UUID(service.uuid))
gatt_service.start = service.start
gatt_service.end = service.end
gatt_service.handle = service.start
for incl in service.get_includes():
include_1 = GATTInclude(incl.included_service_att_handle, incl.included_service_end_group_handle,
UUID(incl.included_service_uuid),
incl.include_definition_attribute_properties,
incl.include_definition_attribute_read_permission,
incl.include_definition_attribute_write_permission,
incl.include_definition_attribute_require_authorization)
include_1.handle = incl.handle
gatt_service.add_include(include_1)
for characteristic in service.get_characteristics():
# create general characteristic (note: this method doesn't apply permissions and properties to the
# characteristic declaration descriptor)
characteristic_1 = GATTCharacteristic(characteristic.value, characteristic.gatt_properties,
UUID(characteristic.uuid),
characteristic.characteristic_value_attribute_properties,
characteristic.characteristic_value_attribute_read_permission,
characteristic.characteristic_value_attribute_write_permission,
characteristic.characteristic_value_attribute_require_authorization)
# update characteristic declaration descriptor with configured permissions and authz
characteristic_1.declaration.attribute_properties = characteristic.characteristic_definition_attribute_properties
characteristic_1.declaration.attribute_read_permission = characteristic.characteristic_definition_attribute_read_permission
characteristic_1.declaration.attribute_write_permission = characteristic.characteristic_definition_attribute_write_permission
characteristic_1.declaration.require_authorization = characteristic.characteristic_definition_attribute_require_authorization
characteristic_1.declaration.handle = characteristic.handle
characteristic_1.declaration.value_attribute_handle = characteristic.value_handle
characteristic_1.value_declaration.handle = characteristic.value_handle
for descriptor in characteristic.get_descriptors():
# characteristic declaration is already created when we created the characteristic attribute
if descriptor.type == 0x2803:
pass
descriptor_1 = GATTCharacteristicDescriptorDeclaration(UUID(descriptor.uuid),
descriptor.value,
descriptor.characteristic_descriptor_attribute_properties,
descriptor.characteristic_descriptor_attribute_read_permission,
descriptor.characteristic_descriptor_attribute_write_permission,
descriptor.characteristic_descriptor_attribute_require_authorization)
descriptor_1.handle = descriptor.handle
characteristic_1.add_descriptor(descriptor_1)
gatt_service.add_characteristic(characteristic_1)
self.gatt_server.add_service(gatt_service)
self.gatt_server.refresh_database(calculate_handles=(not use_handles_from_ble_device))
def set_extended_inquiry_response(self, fec_required=0, formatted_eir_data=None):
"""
Set the extended inquiry response on the HCI device.
:param fec_required: FEC required (default: 0)
:type fec_required: 0
:param formatted_eir_data: Formatted extended inquiry response data (default: None)
:type formatted_eir_data: str
:return:
:rtype:
"""
self.stack_connection.set_eir_response(fec_required=fec_required, formatted_eir_data=formatted_eir_data)
def read_remote_used_features(self, connection):
"""
Issues a read remote used features command to the connected peer device.
:param connection: BLEConnection of target connection
:type connection: BLEConnection
:return:
:rtype:
"""
self.stack_connection.read_remote_used_features(connection.connection_handle)
return
# ATT Packets / GATT Procedures
def exchange_mtu(self, connection, mtu, timeout=15 * 1000):
"""
Sends Exchange MTU packet using the supplied BLEConnection object
and returns a GATTRequest object containing the request or any received errors.
Synchronous method. Note: Sending this packet as a peripheral will not
change the MTU configured on the GATT server.
:param connection: BLEConnection with connection to target device
:param mtu: Desired MTU (bytes)
:param timeout: Timeout for exhange MTU response (in milliseconds)
:type connection: BLEConnection
:type mtu: int
:rtype: blesuite.pybt.core.GATTRequest
"""
request = self.stack_connection.exchange_mtu_sync(mtu, connection.connection_handle, timeout=timeout)
if request.has_error():
logger.debug("Exchange MTU Response Error")
else:
logger.debug("Exchange MTU Response Data(str): %s" % request.response.data)
if not request.has_error() and request.has_response():
connection.mtu = mtu
return request
def gatt_discover_primary_services(self, connection, device=None):
"""
Discover primary GATT services of a peer GATT server and populate (or generate) a BLEDevice object
with the discovered entities.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param device: BLEDevice to populate. If None is supplied (default) a new BLEDevice object with the discovered entities will be added.
:type device: BLEDevice
:return: Populated BLEDevice
:rtype: BLEDevice
"""
if device is None:
device = BLEDevice(connection.address)
return gatt_procedure_discover_primary_services(self, connection, device)
def gatt_discover_secondary_services(self, connection, device=None):
"""
Discover secondary GATT services of a peer GATT server and populate (or generate) a BLEDevice object
with the discovered entities.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param device: BLEDevice to populate. If None is supplied (default) a new BLEDevice object with the discovered entities will be added.
:type device: BLEDevice
:return: Populated BLEDevice
:rtype: BLEDevice
"""
if device is None:
device = BLEDevice(connection.address)
return gatt_procedure_discover_secondary_services(self, connection, device)
def gatt_discover_characteristics(self, connection, device=None):
"""
Discover GATT characteristics of a peer GATT server and populate (or generate) a BLEDevice object
with the discovered entities.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param device: BLEDevice to populate. If None is supplied (default) a new BLEDevice object with the discovered entities will be added.
:type device: BLEDevice
:return: Populated BLEDevice
:rtype: BLEDevice
"""
if device is None:
device = BLEDevice(connection.address)
return gatt_procedure_discover_characteristics(self, connection, device)
def gatt_discover_includes(self, connection, device=None):
"""
Discover GATT service includes of a peer GATT server and populate (or generate) a BLEDevice object
with the discovered entities.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param device: BLEDevice to populate. If None is supplied (default) a new BLEDevice object with the discovered entities will be added.
:type device: BLEDevice
:return: Populated BLEDevice
:rtype: BLEDevice
"""
if device is None:
device = BLEDevice(connection.address)
return gatt_procedure_discover_includes(self, connection, device)
def gatt_discover_descriptors(self, connection, device):
"""
Discover GATT characteristic descriptors of a peer GATT server and populate (or generate) a BLEDevice object
with the discovered entities.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param device: BLEDevice to populate. If None is supplied (default) a new BLEDevice object with the discovered entities will be added.
:type device: BLEDevice
:return: Populated BLEDevice
:rtype: BLEDevice
"""
return gatt_procedure_discover_descriptors(self, connection, device)
def smart_scan(self, connection, device=None, look_for_device_info=True, attempt_desc_read=False,
timeout=15 * 1000):
"""
Initiate a BLE Smart Scan, which is an all inclusive way to scan a BLE peripheral for all
services, includes, characteristics, and descriptors. The scan can also attempt to reach from each
attribute handle discovered during the scan (regardless of GATT properties returned by the server) in
order to quickly view data exposed by the device.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param device: BLEDevice to populate. If None is supplied (default) a new BLEDevice object with the discovered entities will be added.
:type device: BLEDevice
:param look_for_device_info: Flag to indicate the scan should scan for several basic types of information based on UUIDs defined by the Bluetooth Special Interest Group (default: True)
:type look_for_device_info: bool
:param attempt_desc_read: Flag to indicate the scan should attempt to read from each attribute discovered during the scan (default: False). Note: This may significantly slow down the scan. If the target peripheral disconnects, the scan will attempt to reconnect to the server.
:type attempt_desc_read: bool
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: Populated BLEDevice
:rtype: BLEDevice
"""
if device is None:
device = BLEDevice(connection.address)
return blesuite_smart_scan(self, connection, device, look_for_device_info=look_for_device_info,
attempt_desc_read=attempt_desc_read, timeout=timeout)
def gatt_write_handle(self, connection, handle, data, timeout=15 * 1000):
"""
Send an ATT Write request to the peer device associated with the supplied BLEConnection, attribute
handle, and data. This is a synchronous call that will wait for either a successful response, error response,
or the specified timeout (milliseconds) to be reached.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param data: Data to place in ATT write request.
:type data: str
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_write_handle(self.stack_connection, connection.connection_handle, handle,
data, timeout=timeout)
def gatt_write_handle_async(self, connection, handle, data, timeout=15 * 1000):
"""
Send an ATT Write request to the peer device associated with the supplied BLEConnection, attribute
handle, and data. This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param data: Data to place in ATT write request.
:type data: str
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_write_handle_async(self.stack_connection, connection.connection_handle, handle, data,
timeout=timeout)
def gatt_write_command_handle(self, connection, handle, data):
"""
Send an ATT Write Command request to the peer device associated with the supplied BLEConnection, attribute
handle, and data. This is an asynchronous call that will send the request to the peer device. No GATTRequest
will be generated since this command should not ever receive a response from the peer.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param data: Data to place in ATT write request.
:type data: str
:param timeout: Request timeout (milliseconds)
:type timeout: int
"""
gatt_procedure_write_command_handle(self.stack_connection, connection.connection_handle, handle, data)
def gatt_prepare_write_handle(self, connection, handle, data, offset, timeout=15 * 1000):
"""
Send an ATT Prepare Write request to the peer device associated with the supplied BLEConnection, attribute
handle, offset, and data. This is a synchronous call that will wait for either a successful response,
error response,
or the specified timeout (milliseconds) to be reached.
Note: Prepare write is used in conjunction with execute write to write a large set of data.
The user will send a series of prepare
write requests with data and the correct offsets to set a large value for a write operation. An execute
write request will then be issued to carry out the write. (Permission / Auth checks should happen on the
prepare write request).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param data: Data to place in ATT write request.
:type data: str
:param offset: Offset to write the data
:type offset: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_prepare_write_handle(self.stack_connection, connection.connection_handle, handle,
data, offset, timeout=timeout)
def gatt_prepare_write_handle_async(self, connection, handle, data, offset, timeout=15 * 1000):
"""
Send an ATT Prepare Write request to the peer device associated with the supplied BLEConnection, attribute
handle, offset, and data. This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
Note: Prepare write is used in conjunction with execute write to write a large set of data.
The user will send a series of prepare
write requests with data and the correct offsets to set a large value for a write operation. An execute
write request will then be issued to carry out the write. (Permission / Auth checks should happen on the
prepare write request).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param data: Data to place in ATT write request.
:type data: str
:param offset: Offset to write the data
:type offset: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_prepare_write_handle_async(self.stack_connection, connection.connection_handle,
handle, data, offset, timeout=timeout)
def gatt_execute_write(self, connection, flags, timeout=15 * 1000):
"""
Send an ATT Execute Write request to the peer device associated with the supplied BLEConnection and flag.
This is a synchronous call that will wait for either a successful response, error response,
or the specified timeout (milliseconds) to be reached.
Note: Execute write is used in conjunction with prepare write
to write a large set of data. The user will send a series of prepare
write requests with data and the correct offsets to set a large value for a write operation. An execute
write request will then be issued to carry out the write. (Permission / Auth checks should happen on the
prepare write request).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param flags: Specifies which execute write operation should be performed (0x00 - Cancel all prepared writes, 0x01 - Immediately write all pending prepared values.
:type flags: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_execute_write(self.stack_connection, connection.connection_handle, flags, timeout=timeout)
def gatt_execute_write_async(self, connection, flags, timeout=15 * 1000):
"""
Send an ATT Execute Write request to the peer device associated with the supplied BLEConnection and flag.
This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
Note: Execute write is used in conjunction with prepare write
to write a large set of data. The user will send a series of prepare
write requests with data and the correct offsets to set a large value for a write operation. An execute
write request will then be issued to carry out the write. (Permission / Auth checks should happen on the
prepare write request).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param flags: Specifies which execute write operation should be performed (0x00 - Cancel all prepared writes, 0x01 - Immediately write all pending prepared values.
:type flags: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_execute_write_async(self.stack_connection, connection.connection_handle, flags,
timeout=timeout)
def gatt_read_handle(self, connection, handle, timeout=15 * 1000):
"""
Send an ATT Read request to the peer device associated with the supplied BLEConnection and attribute
handle. This is a synchronous call that will wait for either a successful response, error response,
or the specified timeout (milliseconds) to be reached.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_read_handle(self.stack_connection, connection.connection_handle, handle, timeout=timeout)
def gatt_read_handle_async(self, connection, handle, timeout=15 * 1000):
"""
Send an ATT Read request to the peer device associated with the supplied BLEConnection and attribute
handle. This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_read_handle_async(self.stack_connection, connection.connection_handle, handle,
timeout=timeout)
def gatt_read_multiple_handles(self, connection, handles, timeout=15 * 1000):
"""
Send an ATT Read Multiple request to the peer device associated with the supplied BLEConnection and
a set of attribute handles.
This is a synchronous call that will wait for either a successful response, error response,
or the specified timeout (milliseconds) to be reached.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handles: A list of attribute handles for target attributes (0x01 - 0xFFFF)
:type handles: list of int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_read_multiple_handles(self.stack_connection, connection.connection_handle,
handles, timeout=timeout)
def gatt_read_multiple_handles_async(self, connection, handles, timeout=15 * 1000):
"""
Send an ATT Read Multiple request to the peer device associated with the supplied BLEConnection and
a set of attribute handles.
This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitorged for a GATTResponse or GATTError (either through a valid
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handles: A list of attribute handles for target attributes (0x01 - 0xFFFF)
:type handles: list of int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_read_multiple_handles_async(self.stack_connection, connection.connection_handle, handles,
timeout=timeout)
def gatt_read_blob_handle(self, connection, handle, offset, timeout=15 * 1000):
"""
Send an ATT Blob Read request to the peer device associated with the supplied BLEConnection, attribute
handle, and an offset. This is a synchronous call that will wait for either a successful response,
error response,
or the specified timeout (milliseconds) to be reached.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param offset: Offset to begin reading attribute value
:type offset: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_read_blob_handle(self.stack_connection, connection.connection_handle, handle, offset,
timeout=timeout)
def gatt_read_blob_handle_async(self, connection, handle, offset, timeout=15 * 1000):
"""
Send an ATT Blob Read request to the peer device associated with the supplied BLEConnection, attribute
handle, and an offset. This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param offset: Offset to begin reading attribute value
:type offset: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_read_blob_handle_async(self.stack_connection, connection.connection_handle, handle,
offset, timeout=timeout)
def gatt_read_uuid(self, connection, uuid, timeout=15 * 1000):
"""
Send an ATT Read request to the peer device associated with the supplied BLEConnection and GATT UUID.
This is a synchronous call that will wait for either a successful response, error response,
or the specified timeout (milliseconds) to be reached.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param uuid: UUID of target GATT entity (16-bit and 128-bit UUIDs are accepted)
:type uuid: str
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_read_uuid(self.stack_connection, connection.connection_handle, UUID(uuid),
timeout=timeout)
def gatt_read_uuid_async(self, connection, uuid, timeout=15 * 1000):
"""
Send an ATT Read request to the peer device associated with the supplied BLEConnection and GATT UUID.
This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param uuid: UUID of target GATT entity (16-bit and 128-bit UUIDs are accepted)
:type uuid: str
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""
return gatt_procedure_read_uuid_async(self.stack_connection, connection.connection_handle, UUID(uuid),
timeout=timeout)
def att_send_raw(self, connection, body):
"""
Sends a raw ATT packet using the supplied BLEConnection object
and data supplied. The function does not apply a standard ATT header the supplied body, but L2CAP
and HCI encapsulation is handled.
Note: Valid ATT packets can be constructed using
packets defined in scapy.layers.bluetooth
or using random data for fuzzing.
:param connection: BLEConnection to target device
:param body: ATT request body
:rtype: GATTRequest
"""
request = self.stack_connection.send_raw_att(body, connection.connection_handle)
return request
def l2cap_send_raw(self, connection, body):
"""
Sends a raw L2CAP packet using the supplied BLEConnection object
and data supplied. The function does not apply a standard L2CAP header to the user supplied value,
but HCI encapsulation is applied.
Note: Valid L2CAP packets can be constructed using packets defined in scapy.layers.bluetooth
or using random data for fuzzing.
:param connection: BLEConnection to target device
:param body: L2CAP request body
:rtype: GATTRequest
"""
request = self.stack_connection.send_raw_l2cap(body, connection.connection_handle)
return request
| 2.28125 | 2 |
pluginsinterface/PlugAsyncio.py | lonelyion/TweetToBot-Docker | 0 | 12759209 | <reponame>lonelyion/TweetToBot-Docker<filename>pluginsinterface/PlugAsyncio.py
# -*- coding: UTF-8 -*-
from pluginsinterface.EventHandling import StandEven
from pluginsinterface.Plugmanagement import async_send_even
import asyncio
import traceback
import threading
from helper import getlogger
logger = getlogger(__name__)
runinfo = {
'run': False,
'threading': None,
'loop': asyncio.new_event_loop(),
'queue': None
}
async def __even_put(runinfo, even: StandEven):
return await runinfo['queue'].put(even)
def even_put(even: StandEven):
global runinfo
if runinfo['run']:
asyncio.run_coroutine_threadsafe(__even_put(runinfo, even),
runinfo['loop'])
return
async def __evendeal(queue):
while True:
even = await queue.get()
try:
await async_send_even(even)
except:
s = traceback.format_exc(limit=10)
logger.error(s)
logger.error('出现这条消息表明模块出现异常')
queue.task_done()
def __runAsyncioTask(runinfo):
#设置事件循环
asyncio.set_event_loop(runinfo['loop'])
runinfo['queue'] = asyncio.Queue(128)
runinfo['loop'].run_forever()
def RunLoop():
"""
启动插件处理循环
"""
global runinfo
runinfo['threading'] = threading.Thread(group=None,
target=__runAsyncioTask,
args=(runinfo, ),
name='PlugAsyncio_thread',
daemon=True)
runinfo['threading'].start()
logger.info('插件事件处理循环启动...')
asyncio.run_coroutine_threadsafe(__evendeal(runinfo['queue']),
runinfo['loop'])
runinfo['run'] = True
| 2.203125 | 2 |
trikit/__init__.py | lazysloth3/trikit | 0 | 12759210 | <gh_stars>0
"""
-------------------------------------------------------------------------------
| _ |
| | |_ _ __(_) | _(_) |_ |
| | __| '__| | |/ / | __| |
| | |_| | | | <| | |_ |
| \__|_| |_|_|\_\_|\__| |
| |
| A Pythonic Approach to Actuarial Reserving |
| Copyright 2018 <NAME> |
-------------------------------------------------------------------------------
"""
import os
import os.path
import sys
import collections
import datetime
import pathlib
import os.path
import warnings
import numpy as np
import pandas as pd
import scipy
from .datasets import dataref
from .triangle import totri
from .utils import (
_load, _get_datasets, _get_lrdb_lobs, _get_lrdb_groups, _get_lrdb_specs,
)
# Initialize dataset loading utility and lrdb-related functions.
lrdb_path = dataref["lrdb"]
load = _load(dataref=dataref)
get_datasets = _get_datasets(dataref=dataref)
get_lrdb_lobs = _get_lrdb_lobs(lrdb_path=lrdb_path)
get_lrdb_groups = _get_lrdb_groups(lrdb_path=lrdb_path)
get_lrdb_specs = _get_lrdb_specs(lrdb_path=lrdb_path)
__version__ = "0.2.10"
| 2.125 | 2 |
configs/mask_rcnn/mask_rcnn_se_x101_64x4d_fpn_mstrain_1x_coco.py | tianchiVideoSeg/mmdetection | 1 | 12759211 | <reponame>tianchiVideoSeg/mmdetection<gh_stars>1-10
_base_ = [
'../_base_/models/mask_rcnn_se_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
pretrained='checkpoints/se_resnext101_64x4d-f9926f93.pth',
backbone=dict(block='SEResNeXtBottleneck', layers=[3, 4, 23, 3], groups=64))
| 1.304688 | 1 |
two-pointers/0345-reverse-vowels-of-a-string(双指针).py | ZHUANGHP/LeetCode-Solution-Python | 1 | 12759212 | class Solution:
def reverseVowels(self, s: str) -> str:
vowels = {'a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U'}
s = list(s)
left = 0
right = len(s) - 1
while left < right:
if s[left] not in vowels:
left += 1
elif s[right] not in vowels:
right -= 1
else:
s[left], s[right] = s[right], s[left]
left += 1
right -= 1
return ''.join(s)
| 3.3125 | 3 |
polyaxon_client/tracking/experiment.py | yu-iskw/polyaxon-client | 0 | 12759213 | <filename>polyaxon_client/tracking/experiment.py<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import atexit
import json
import os
import sys
import time
from datetime import datetime
from polyaxon_client import settings
from polyaxon_client.exceptions import AuthenticationError, PolyaxonClientException
from polyaxon_client.handlers.conf import setup_logging
from polyaxon_client.handlers.handler import PolyaxonHandler
from polyaxon_client.logger import logger
from polyaxon_client.tracking.base import BaseTracker
from polyaxon_client.tracking.in_cluster import ensure_in_custer
from polyaxon_client.tracking.paths import get_base_outputs_path, get_outputs_path
from polyaxon_client.tracking.utils.code_reference import get_code_reference
from polyaxon_client.tracking.utils.env import get_run_env
from polyaxon_client.tracking.utils.tags import validate_tags
class Experiment(BaseTracker):
def __init__(self,
project=None,
experiment_id=None,
group_id=None,
client=None,
track_logs=True,
track_code=True,
track_env=True,
outputs_store=None):
if settings.NO_OP:
return
if project is None and settings.IN_CLUSTER:
experiment_info = self.get_experiment_info()
project = experiment_info['project_name']
experiment_id = experiment_info['experiment_name'].split('.')[-1]
super(Experiment, self).__init__(project=project,
client=client,
track_logs=track_logs,
track_code=track_code,
track_env=track_env,
outputs_store=outputs_store)
self.experiment_id = experiment_id
self.group_id = group_id
self.experiment = None
self.last_status = None
# Check if there's an ephemeral token
check_ephemeral_token = (settings.IN_CLUSTER and
hasattr(settings, 'SECRET_EPHEMERAL_TOKEN') and
settings.SECRET_EPHEMERAL_TOKEN)
if check_ephemeral_token:
try:
self.client.auth.login_experiment_ephemeral_token(
username=self.username,
project_name=self.project_name,
experiment_id=self.experiment_id,
ephemeral_token=settings.SECRET_EPHEMERAL_TOKEN,
set_token=True,
persist_token=True)
except AuthenticationError:
logger.debug('Could not log with ephemeral token.')
if settings.IN_CLUSTER:
self._set_health_url()
# Track run env
if settings.IN_CLUSTER and self.track_env:
self.log_run_env()
def create(self, name=None, tags=None, description=None, config=None, base_outputs_path=None):
if settings.NO_OP:
return None
experiment_config = {'run_env': get_run_env()} if self.track_env else {}
if name:
experiment_config['name'] = name
if tags:
experiment_config['tags'] = tags
if description:
experiment_config['description'] = description
if config:
experiment_config['config'] = config
experiment = self.client.project.create_experiment(
username=self.username,
project_name=self.project_name,
experiment_config=experiment_config,
group=self.group_id,
)
if not experiment:
raise PolyaxonClientException('Could not create experiment.')
if not settings.IN_CLUSTER and self.track_logs:
setup_logging(PolyaxonHandler(send_logs=self._send_logs))
self.experiment_id = (experiment.id
if self.client.api_config.schema_response
else experiment.get('id'))
self.experiment = experiment
self.last_status = 'created'
# Setup the outputs store
base_outputs_path = base_outputs_path or get_base_outputs_path()
if self.outputs_store is None and base_outputs_path:
if self.group_id:
outputs_path = '{}/{}/{}/{}/{}'.format(
base_outputs_path,
self.username,
self.project_name,
self.group_id,
self.experiment_id)
else:
outputs_path = '{}/{}/{}/{}'.format(
base_outputs_path, self.username, self.project_name, self.experiment_id)
self.set_outputs_store(outputs_path=outputs_path)
if self.track_code:
self.log_code_ref()
if not settings.IN_CLUSTER:
self._start()
self._set_health_url()
return self
def _set_health_url(self):
if settings.NO_OP:
return
health_url = self.client.experiment.get_heartbeat_url(
username=self.username,
project_name=self.project_name,
experiment_id=self.experiment_id)
self.client.set_health_check(url=health_url)
def _start(self):
if settings.NO_OP:
return
atexit.register(self._end)
self.start()
def excepthook(exception, value, tb):
self.failed(message='Type: {}, Value: {}'.format(exception, value))
# Resume normal work
sys.__excepthook__(exception, value, tb)
sys.excepthook = excepthook
def _send_logs(self, log_line):
if settings.NO_OP:
return
self.client.experiment.send_logs(username=self.username,
project_name=self.project_name,
experiment_id=self.experiment_id,
log_lines=log_line,
periodic=True)
def _end(self):
if settings.NO_OP:
return
self.succeeded()
def end(self, status, message=None):
if settings.NO_OP:
return
if self.last_status in ['succeeded', 'failed', 'stopped']:
return
self.log_status(status, message)
self.last_status = status
time.sleep(0.1) # Just to give the opportunity to the worker to pick the message
def start(self):
if settings.NO_OP:
return
self.log_status('running')
self.last_status = 'running'
def succeeded(self):
if settings.NO_OP:
return
self.end('succeeded')
def stop(self):
if settings.NO_OP:
return
self.end('stopped')
def failed(self, message=None):
if settings.NO_OP:
return
self.end(status='failed', message=message)
def log_run_env(self):
if settings.NO_OP:
return
patch_dict = {'run_env': get_run_env()}
self.client.experiment.update_experiment(username=self.username,
project_name=self.project_name,
experiment_id=self.experiment_id,
patch_dict=patch_dict,
background=True)
def log_code_ref(self):
if settings.NO_OP:
return
self.client.experiment.create_code_reference(username=self.username,
project_name=self.project_name,
experiment_id=self.experiment_id,
coderef=get_code_reference(),
background=True)
def log_status(self, status, message=None):
if settings.NO_OP:
return
self.client.experiment.create_status(username=self.username,
project_name=self.project_name,
experiment_id=self.experiment_id,
status=status,
message=message,
background=True)
def log_metrics(self, **metrics):
if settings.NO_OP:
return
self.client.experiment.create_metric(username=self.username,
project_name=self.project_name,
experiment_id=self.experiment_id,
values=metrics,
created_at=datetime.utcnow(),
periodic=True)
def log_tags(self, tags, reset=False):
if settings.NO_OP:
return
patch_dict = {'tags': validate_tags(tags)}
if reset is False:
patch_dict['merge'] = True
self.client.experiment.update_experiment(username=self.username,
project_name=self.project_name,
experiment_id=self.experiment_id,
patch_dict=patch_dict,
background=True)
def log_params(self, reset=False, **params):
if settings.NO_OP:
return
patch_dict = {'declarations': params}
if reset is False:
patch_dict['merge'] = True
self.client.experiment.update_experiment(username=self.username,
project_name=self.project_name,
experiment_id=self.experiment_id,
patch_dict=patch_dict,
background=True)
def set_description(self, description):
if settings.NO_OP:
return
self.client.experiment.update_experiment(username=self.username,
project_name=self.project_name,
experiment_id=self.experiment_id,
patch_dict={'description': description},
background=True)
def set_name(self, name):
if settings.NO_OP:
return
self.client.experiment.update_experiment(username=self.username,
project_name=self.project_name,
experiment_id=self.experiment_id,
patch_dict={'name': name},
background=True)
def log_data_ref(self, data, data_name='data', reset=False):
if settings.NO_OP:
return
try:
import hashlib
params = {
data_name: hashlib.md5(str(data).encode("utf-8")).hexdigest()[:settings.HASH_LENGTH]
}
patch_dict = {'data_refs': params}
if reset is False:
patch_dict['merge'] = True
self.client.experiment.update_experiment(username=self.username,
project_name=self.project_name,
experiment_id=self.experiment_id,
patch_dict=patch_dict,
background=True)
except Exception as e:
logger.warning('Could create data hash %s', e)
@staticmethod
def get_cluster_def():
"""Returns cluster definition created by polyaxon.
{
"master": ["plxjob-master0-8eefb7a1146f476ca66e3bee9b88c1de:2000"],
"worker": ["plxjob-worker1-8eefb7a1146f476ca66e3bee9b88c1de:2000",
"plxjob-worker2-8eefb7a1146f476ca66e3bee9b88c1de:2000"],
"ps": ["plxjob-ps3-8eefb7a1146f476ca66e3bee9b88c1de:2000"],
}
:return: dict
"""
if settings.NO_OP:
return None
ensure_in_custer()
cluster = os.getenv('POLYAXON_CLUSTER', None)
try:
return json.loads(cluster) if cluster else None
except (ValueError, TypeError):
print('Could get cluster definition, '
'please make sure this is running inside a polyaxon job.')
return None
@staticmethod
def get_task_info():
"""Returns the task info: {"type": str, "index": int}."""
if settings.NO_OP:
return None
ensure_in_custer()
info = os.getenv('POLYAXON_TASK_INFO', None)
try:
return json.loads(info) if info else None
except (ValueError, TypeError):
print('Could get task info, '
'please make sure this is running inside a polyaxon job.')
return None
@classmethod
def get_tf_config(cls, envvar='TF_CONFIG'):
"""
Returns the TF_CONFIG defining the cluster and the current task.
if `envvar` is not null, it will set and env variable with `envvar`.
"""
if settings.NO_OP:
return None
ensure_in_custer()
cluster_def = cls.get_cluster_def()
task_info = cls.get_task_info()
tf_config = {
'cluster': cluster_def,
'task': task_info,
'model_dir': get_outputs_path(),
'environment': 'cloud'
}
if envvar:
os.environ[envvar] = json.dumps(tf_config)
return tf_config
@staticmethod
def get_experiment_info():
"""
Returns information about the experiment:
* project_name
* experiment_group_name
* experiment_name
* project_uuid
* experiment_group_uuid
* experiment_uuid
"""
if settings.NO_OP:
return None
ensure_in_custer()
info = os.getenv('POLYAXON_EXPERIMENT_INFO', None)
try:
return json.loads(info) if info else None
except (ValueError, TypeError):
print('Could get experiment info, '
'please make sure this is running inside a polyaxon job.')
return None
@staticmethod
def get_declarations():
"""
Returns all the experiment declarations based on both:
* declarations section
* matrix section
"""
if settings.NO_OP:
return None
ensure_in_custer()
declarations = os.getenv('POLYAXON_DECLARATIONS', None)
try:
return json.loads(declarations) if declarations else None
except (ValueError, TypeError):
print('Could get declarations, '
'please make sure this is running inside a polyaxon job.')
return None
| 1.90625 | 2 |
test_day10.py | Yolgie/AdventOfCode2017 | 0 | 12759214 | <reponame>Yolgie/AdventOfCode2017
import unittest
from day10 import Hash
class Tests(unittest.TestCase):
def test_part_1(self):
testObject = Hash()
testObject.test = 1
testObject.list_size = 5
self.assertEqual(12, testObject.process(["3, 4, 1, 5"]))
| 2.765625 | 3 |
src/tests_of_codebase/queueing_test.py | doitintl/intercloud-networking | 1 | 12759215 | #!/usr/bin/env python
import itertools
from cloud.clouds import get_region, Cloud, get_regions
from test_steps.do_test import do_batch
from util.utils import set_cwd, random_id, Timer, init_logger
init_logger()
def test1():
run_id = random_id()
t1 = (get_region(Cloud.GCP, "us-east1"), {})
t2 = (get_region(Cloud.GCP, "us-central1"), {})
t3 = (get_region(Cloud.AWS, "us-east-1"), {})
t4 = (get_region(Cloud.AWS, "us-east-2"), {})
test_input = [
(t1, t2),
(t2, t1),
(t3, t4),
(t4, t3),
]
do_batch(run_id, test_input)
def test2():
run_id = random_id()
regions = get_regions()[:40]
region_pairs = itertools.product(regions, regions)
test_input = [((r[0], {}), (r[1], {})) for r in region_pairs]
do_batch(run_id, test_input)
if __name__ == "__main__":
with Timer("Full run"):
set_cwd()
test2()
| 2.09375 | 2 |
concourse/model/job.py | adracus/cc-utils | 0 | 12759216 | # Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed
# under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import toposort
from concourse.model.base import (
ModelBase,
select_attr,
)
from util import not_none
from concourse.model.resources import RepositoryConfig, ResourceIdentifier
class JobVariant(ModelBase):
def __init__(self, name: str, raw_dict: dict, resource_registry, *args, **kwargs):
self._main_repository_name = None
self._resource_registry = not_none(resource_registry)
self.variant_name = name
super().__init__(raw_dict=raw_dict, *args, **kwargs)
def _known_attributes(self):
return {
'steps',
'traits',
'repo',
'repos',
}
def _children(self):
yield from self.steps()
yield from self.traits().values()
yield from self.repositories()
def traits(self):
return self._traits_dict
def trait(self, name):
return self._traits_dict[name]
def has_trait(self, name):
return name in self.traits()
def job_name(self):
return '{b}-{n}-job'.format(
b=self.main_repository().branch(),
n=self.variant_name,
)
def meta_resource_name(self):
meta_res = self._resource_registry.resource(
ResourceIdentifier(type_name='meta', base_name=self.variant_name)
)
return meta_res.resource_identifier().name()
def steps(self):
return self._steps_dict.values()
def step_names(self):
return map(select_attr('name'), self.steps())
def ordered_steps(self):
dependencies = {
step.name: step.depends() for step in self.steps()
}
try:
result = list(toposort.toposort(dependencies))
except toposort.CircularDependencyError as de:
# remove cirular dependencies caused by synthetic steps
# (custom steps' dependencies should "win")
for step_name, step_dependencies in de.data.items():
step = self.step(step_name)
if not step.is_synthetic:
continue # only patch away synthetic steps' dependencies
for step_dependency_name in step_dependencies:
step_dependency = self.step(step_dependency_name)
if step_dependency.is_synthetic:
continue # leave dependencies between synthetic steps
# patch out dependency from synthetic step to custom step
dependencies[step_name].remove(step_dependency_name)
# try again - if there is still a cyclic dependency, this is probably caused
# by a user error - so let it propagate
result = toposort.toposort(dependencies)
# result contains a generator yielding tuples of step name in the correct execution order.
# each tuple can/should be parallelised
return result
def add_step(self, step: 'PipelineStep'):
if self.has_step(step.name):
raise ValueError('conflict: pipeline definition already contained step {s}'.format(
s=step.name
)
)
self._steps_dict[step.name] = step
def step(self, name):
return self._steps_dict[name]
def has_step(self, step_name):
return step_name in self.step_names()
def pr_repository(self, name):
pr_repo = self.repository(name)
return RepositoryConfig(
raw_dict=dict(pr_repo.raw),
logical_name=name,
qualifier='pr',
is_pull_request=True
)
def repositories(self):
# TODO: introduce a common base class for "input resources"
# (where Github and PR are two examples, and "time" will be the third)
return self._repos_dict.values()
def repository_names(self):
return self._repos_dict.keys()
def repository(self, name):
return self._repos_dict[name]
def has_main_repository(self):
return self._main_repository_name is not None
def main_repository(self):
return self.repository(self._main_repository_name)
def publish_repositories(self):
return self._publish_repos_dict.values()
def publish_repository(self, name):
return self._publish_repos_dict[name]
def has_publish_repository(self, name):
return name in self._publish_repos_dict
def __repr__(self):
return f'JobVariant: {self.variant_name}'
| 1.679688 | 2 |
tests/test_simu_linear.py | ppuertocrem/pandangas | 1 | 12759217 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `simu_linear` package."""
import pytest
import numpy as np
from thermo.chemical import Chemical
from pandangas import simu_linear as sim
from pandangas import topology as top
from fixtures import simple_network
def test_solve():
# 3 * x0 + x1 = 9 and x0 + 2 * x1 = 8 <=> x0 = 2, x1 = 3
a = np.array([[3, 1], [1, 2]])
b = np.array([9, 8])
assert np.array_equal(sim.solve(a, b), np.array([2.0, 3.0]))
def test_weird():
a = np.array([1, 0, 0, 1, 0, 1])
waited = np.array([[1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1]])
assert np.array_equal(sim.weird(a), waited)
def test_create_a(simple_network):
gas = Chemical("natural gas", T=10 + 273.15, P=1.022e5)
net = simple_network
g = top.graphs_by_level_as_dict(net)
graph = g["BP"]
a = sim.create_a(graph, gas)
assert a.shape == (20, 20)
def test_create_k(simple_network):
gas = Chemical("natural gas", T=10 + 273.15, P=1.022e5)
net = simple_network
g = top.graphs_by_level_as_dict(net)
graph = g["BP"]
k = sim.create_k(graph, gas)
assert k.shape == (len(graph.edges),)
for ik in k:
assert int(ik) == 49975
def test_create_b(simple_network):
net = simple_network
loads = sim._scaled_loads_as_dict(net)
p_ops = sim._operating_pressures_as_dict(net)
g = top.graphs_by_level_as_dict(net)
graph = g["BP"]
b = sim.create_b(graph, loads, p_ops)
assert b.shape == (20,)
def test_run_one_level_BP_shape(simple_network):
net = simple_network
g = top.graphs_by_level_as_dict(net)
graph = g["BP"]
p_nodes, m_dot_pipes, m_dot_nodes, gas = sim.run_one_level(net, "BP")
assert p_nodes.shape == (len(graph.nodes),)
assert m_dot_pipes.shape == (len(graph.edges),)
assert m_dot_nodes.shape == (len(graph.nodes),)
def test_run_one_level_BP_values(simple_network):
net = simple_network
g = top.graphs_by_level_as_dict(net)
graph = g["BP"]
p_nodes, m_dot_pipes, m_dot_nodes, gas = sim.run_one_level(net, "BP")
assert p_nodes.round().tolist() == [102200.0, 102190.0, 102188.0, 102193.0, 102190.0, 102200.0]
assert m_dot_pipes.round(5).tolist() == [2.1e-04, 2.4e-04, 3.0e-05, 7.0e-05, -1.4e-04, 7.0e-05, -2.0e-04, 1.0e-05]
assert m_dot_nodes.round(5).tolist() == [-0.00045, 0.00026, 0.00026, 0.0, 0.00026, -0.00034]
| 2.53125 | 3 |
pbiblisite/pbibli/urls.py | Shikoruma/PBibli | 0 | 12759218 | from django.urls import path, include
from .views import BookViewSet, LoanViewSet, UserViewSet, SelfView, AllBooksView
from rest_framework import routers
router = routers.DefaultRouter()
router.register(r'books', BookViewSet)
router.register(r'loans', LoanViewSet)
router.register(r'users', UserViewSet)
urlpatterns = [
path('self/', SelfView.as_view()),
path('allbooks/', AllBooksView.as_view()),
path('', include(router.urls)),
]
| 1.90625 | 2 |
bk/SensorReading.py | racheliurui/vscode-hello-python | 0 | 12759219 | #!/usr/bin/env python3
# https://sites.google.com/site/ev3python/learn_ev3_python/using-sensors/sensor-modes
speedReading=0
# Color Sensor Readings
# COL-REFLECT COL-AMBIENT COL-COLOR RGB-RAW
colorSensor_mode_default = "COL-COLOR"
colorSensor_mode_lt = colorSensor_mode_default
colorSensor_mode_rt = colorSensor_mode_default
colorSensor_reflect_lt=0
colorSensor_reflect_rt=0
colorSensor_color_lt=0
colorSensor_color_rt=0
colorSensor_rawred_lt=0
colorSensor_rawgreen_lt=0
colorSensor_rawblue_lt=0
colorSensor_rawred_rt=0
colorSensor_rawgreen_rt=0
colorSensor_rawblue_rt=0
ultrasonicSensor_ReadingInCm=0
| 1.640625 | 2 |
graspy/embed/mase.py | asaadeldin11/graspy | 0 | 12759220 | # Copyright 2019 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from sklearn.utils.validation import check_is_fitted
from ..utils import import_graph, is_almost_symmetric
from .base import BaseEmbedMulti
from .svd import select_dimension, selectSVD
class MultipleASE(BaseEmbedMulti):
r"""
Multiple Adjacency Spectral Embedding (MASE) embeds arbitrary number of input
graphs with matched vertex sets.
For a population of undirected graphs, MASE assumes that the population of graphs
is sampled from :math:`VR^{(i)}V^T` where :math:`V \in \mathbb{R}^{n\times d}` and
:math:`R^{(i)} \in \mathbb{R}^{d\times d}`. Score matrices, :math:`R^{(i)}`, are
allowed to vary for each graph, but are symmetric. All graphs share a common a
latent position matrix :math:`V`.
For a population of directed graphs, MASE assumes that the population is sampled
from :math:`UR^{(i)}V^T` where :math:`U \in \mathbb{R}^{n\times d_1}`,
:math:`V \in \mathbb{R}^{n\times d_2}`, and
:math:`R^{(i)} \in \mathbb{R}^{d_1\times d_2}`. In this case, score matrices
:math:`R^{(i)}` can be assymetric and non-square, but all graphs still share a
common latent position matrices :math:`U` and :math:`V`.
Parameters
----------
n_components : int or None, default = None
Desired dimensionality of output data. If "full",
n_components must be <= min(X.shape). Otherwise, n_components must be
< min(X.shape). If None, then optimal dimensions will be chosen by
:func:`~graspy.embed.select_dimension` using ``n_elbows`` argument.
n_elbows : int, optional, default: 2
If ``n_components=None``, then compute the optimal embedding dimension using
:func:`~graspy.embed.select_dimension`. Otherwise, ignored.
algorithm : {'randomized' (default), 'full', 'truncated'}, optional
SVD solver to use:
- 'randomized'
Computes randomized svd using
:func:`sklearn.utils.extmath.randomized_svd`
- 'full'
Computes full svd using :func:`scipy.linalg.svd`
- 'truncated'
Computes truncated svd using :func:`scipy.sparse.linalg.svds`
n_iter : int, optional (default = 5)
Number of iterations for randomized SVD solver. Not used by 'full' or
'truncated'. The default is larger than the default in randomized_svd
to handle sparse matrices that may have large slowly decaying spectrum.
scaled : bool, optional (default=False)
Whether to scale individual eigenvectors with eigenvalues in first embedding
stage.
Attributes
----------
n_graphs_ : int
Number of graphs
n_vertices_ : int
Number of vertices in each graph
latent_left_ : array, shape (n_samples, n_components)
Estimated left latent positions of the graph.
latent_right_ : array, shape (n_samples, n_components), or None
Estimated right latent positions of the graph. Only computed when the an input
graph is directed, or adjacency matrix is assymetric. Otherwise, None.
scores_ : array, shape (n_samples, n_components, n_components)
Estimated :math:`\hat{R}` matrices for each input graph.
Notes
-----
When an input graph is directed, `n_components` of `latent_left_` may not be equal
to `n_components` of `latent_right_`.
"""
def __init__(
self,
n_components=None,
n_elbows=2,
algorithm="randomized",
n_iter=5,
scaled=False,
):
if not isinstance(scaled, bool):
msg = "scaled must be a boolean, not {}".format(scaled)
raise TypeError(msg)
super().__init__(
n_components=n_components,
n_elbows=n_elbows,
algorithm=algorithm,
n_iter=n_iter,
)
self.scaled = scaled
def _reduce_dim(self, graphs):
# first embed into log2(n_vertices) for each graph
n_components = int(np.ceil(np.log2(np.min(self.n_vertices_))))
# embed individual graphs
embeddings = [
selectSVD(
graph,
n_components=n_components,
algorithm=self.algorithm,
n_iter=self.n_iter,
)
for graph in graphs
]
Us, Ds, Vs = zip(*embeddings)
# Choose the best embedding dimension for each graphs
if self.n_components is None:
embedding_dimensions = []
for D in Ds:
elbows, _ = select_dimension(D, n_elbows=self.n_elbows)
embedding_dimensions.append(elbows[-1])
# Choose the max of all of best embedding dimension of all graphs
best_dimension = int(np.ceil(np.max(embedding_dimensions)))
else:
best_dimension = self.n_components
if not self.scaled:
Us = np.hstack([U[:, :best_dimension] for U in Us])
Vs = np.hstack([V.T[:, :best_dimension] for V in Vs])
else:
# Equivalent to ASE
Us = np.hstack(
[
U[:, :best_dimension] @ np.diag(np.sqrt(D[:best_dimension]))
for U, D in zip(Us, Ds)
]
)
Vs = np.hstack(
[
V.T[:, :best_dimension] @ np.diag(np.sqrt(D[:best_dimension]))
for V, D in zip(Vs, Ds)
]
)
# Second SVD for vertices
# The notation is slightly different than the paper
Uhat, _, _ = selectSVD(
Us,
n_components=self.n_components,
n_elbows=self.n_elbows,
algorithm=self.algorithm,
n_iter=self.n_iter,
)
Vhat, _, _ = selectSVD(
Vs,
n_components=self.n_components,
n_elbows=self.n_elbows,
algorithm=self.algorithm,
n_iter=self.n_iter,
)
return Uhat, Vhat
def fit(self, graphs, y=None):
"""
Fit the model with graphs.
Parameters
----------
graphs : list of nx.Graph or ndarray, or ndarray
If list of nx.Graph, each Graph must contain same number of nodes.
If list of ndarray, each array must have shape (n_vertices, n_vertices).
If ndarray, then array must have shape (n_graphs, n_vertices, n_vertices).
Returns
-------
self : object
Returns an instance of self.
"""
graphs = self._check_input_graphs(graphs)
# Check if undirected
undirected = all(is_almost_symmetric(g) for g in graphs)
# embed
Uhat, Vhat = self._reduce_dim(graphs)
self.latent_left_ = Uhat
if not undirected:
self.latent_right_ = Vhat
self.scores_ = Uhat.T @ graphs @ Vhat
else:
self.latent_right_ = None
self.scores_ = Uhat.T @ graphs @ Uhat
return self
def fit_transform(self, graphs, y=None):
"""
Fit the model with graphs and apply the embedding on graphs.
n_components is either automatically determined or based on user input.
Parameters
----------
graphs : list of nx.Graph or ndarray, or ndarray
If list of nx.Graph, each Graph must contain same number of nodes.
If list of ndarray, each array must have shape (n_vertices, n_vertices).
If ndarray, then array must have shape (n_graphs, n_vertices, n_vertices).
Returns
-------
out : array-like, shape (n_vertices, n_components) if input
graphs were symmetric. If graphs were directed, returns tuple of
two arrays (same shape as above) where the first corresponds to the
left latent positions, and the right to the right latent positions
"""
return self._fit_transform(graphs)
| 1.960938 | 2 |
DublinBikeWebApp/web/app.py | BreakfastBagels/DublinBikeApp | 1 | 12759221 | <filename>DublinBikeWebApp/web/app.py<gh_stars>1-10
#!/usr/bin/env python
from flask import Flask, jsonify, redirect, url_for, render_template, send_from_directory, send_file
from flaskext.mysql import MySQL
import json
import pickle
from sklearn.preprocessing import PolynomialFeatures
# Create app name and config options that are later needed
app = Flask(__name__)
mysql = MySQL()
app.config['MYSQL_DATABASE_HOST'] = 'main-db.cd8z7cqv2c8a.us-east-1.rds.amazonaws.com'
app.config['MYSQL_DATABASE_PORT'] = 3306
app.config['MYSQL_DATABASE_USER'] = 'admin'
app.config['MYSQL_DATABASE_PASSWORD'] = '<PASSWORD>'
mysql.init_app(app)
# Landing page route
@app.route("/")
@app.route("/index")
def landing_page():
return render_template("index.html", content="trying stuff out")
# Map page route
@app.route("/map")
def map_page():
return render_template("map.html")
# Query that returns the correct bagel image based on bike availability passed in
@app.route("/Bagel_Icon/<type>")
def bagel_icon(type):
"""Function that checks the string that is passed in and returns correct image in response"""
if type == "Full":
return send_file("static/icons/Bagel_Full_Small.png", mimetype='image/png')
elif type == "Empty":
return send_file("static/icons/Bagel_Empty_Small.png", mimetype='image/png')
elif type == "Semi_Empty":
return send_file("static/icons/Bagel_Semi_Empty_Small.png", mimetype='image/png')
else:
return send_file("static/icons/Bagel_Semi_Full_Small.png", mimetype='image/png')
# Route for statistics page
@app.route("/stats")
def stats_page():
return render_template("stats.html")
# API that generates bike availability prediction for user based on trained models
@app.route("/model/<num>/<weekday>")
def predict3(num, weekday):
"""Function that uses associated parameters to search for appropriate pickle
serialised file and returns it as a deserialized json object"""
# Create initial string and add values to it to help find the correct file
pickle_rick = "ModellingNotebooks/mean-bikes-pickle"
pickle_rick += str(num) + "-"
if str(weekday) == "weekday":
pickle_rick += "weekday"
elif str(weekday) == "weekend":
pickle_rick += "weekend"
with open(pickle_rick, 'rb') as file:
model = pickle.load(file)
# Create PolynomialFeatures object to generate polynomial predictions
poly = PolynomialFeatures(degree=2)
# Result to be passed into dictionary to allow it to be then returned as JSON
result = {}
for i in range(24):
value = (model.predict(poly.fit_transform(([[i]]))))[0]
result.update({i: value})
return jsonify(result)
# API query that queries database for weather information
@app.route("/get-weather")
def get():
"""Function that gets most recent weather data from current weather table
Returns current weather information as a JSON object"""
cur = mysql.connect().cursor()
cur.execute('''select * from maindb.current_weather order by Time desc limit 1''')
r = [dict((cur.description[i][0], value)
for i, value in enumerate(row)) for row in cur.fetchall()]
json_weather = jsonify({'weather': r})
return json_weather
# API query that queries database for hourly weather information
@app.route("/hourly-weather")
def get_hourly():
"""Function that gets recent hourly weather data from hourly weather table
Returns an array of JSON objects for each of the 10 rows queried in the SQL query"""
cur = mysql.connect().cursor()
cur.execute('''select * from maindb.hourly_weather order by Hour_Recorded desc, Hourly_Time asc limit 10''')
r = [dict((cur.description[i][0], value)
for i, value in enumerate(row)) for row in cur.fetchall()]
json_hourly = jsonify({'hourly': r})
return json_hourly
# API query that generates daily weather information for user
@app.route("/daily-weather")
def get_daily():
"""Function that gets recent daily weather data from hourly weather table
Returns an array of JSON objects for each of the 10 rows queried in the SQL query"""
cur = mysql.connect().cursor()
cur.execute('''select * from maindb.daily_weather order by Hour_Recorded desc, Daily_Time asc limit 10''')
r = [dict((cur.description[i][0], value)
for i, value in enumerate(row)) for row in cur.fetchall()]
json_hourly = jsonify({'daily': r})
return json_hourly
# API query that returns json object of bike stations and their locations
@app.route('/static_stations')
def static_stations():
"""Function that returns JSON object containing constant information for stations"""
cur = mysql.connect().cursor()
cur.execute('''select * from maindb.static_table order by address''')
r = [dict((cur.description[i][0], value)
for i, value in enumerate(row)) for row in cur.fetchall()]
json_stations = jsonify({'stations': r})
return json_stations
# API query that obtains up-to-date information on bike availability for stations
@app.route('/station_info')
def get_station_info():
"""Function that returns JSON object of current station information"""
def run_station_query():
"""Function that generates SQL query for current information for all bike stations
Returns result of SQL query"""
cur = mysql.connect().cursor()
sql_query = ("SELECT dt.Station_Number, st.address, dt.Available_Stands, dt.Available_Bikes, dt.Time_Entered "
"FROM maindb.static_table as st, maindb.dynamic_table as dt "
"WHERE dt.Station_Number=st.number ORDER BY Time_Entered DESC LIMIT 110;")
cur.execute(sql_query)
return cur
def create_station_data_list(cur):
"""Function that handles result of SQL query for JSONification
Returns list containing station data for each station individually"""
rows = cur.fetchall()
column_list = cur.description
station_data_list = []
for row in rows:
station_dict = dict()
for i, row_value in enumerate(row):
column_name = column_list[i][0]
station_dict[column_name] = row_value
station_data_list.append(station_dict)
return station_data_list
# Run the SQL query and return the result as JSON
cur = run_station_query()
station_data_list = create_station_data_list(cur)
return jsonify({'station_info': station_data_list})
# Set host IP address to any and the port number to 5000
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=5000)
| 2.640625 | 3 |
edau/dataset.py | Hiromi-nee/edau | 0 | 12759222 | <filename>edau/dataset.py
import torch
import torch.utils.data
from PIL import Image
import os
import pandas as pd
class CSVImageDataset(torch.utils.data.Dataset):
def __init__(self, csv_path, names=None, header=None, sep=",", root=None, transforms=None):
if transforms is not None:
self.transforms = transforms
else:
import torchvision.transforms as T
self.transforms = T.Compose([T.ToTensor()])
self.data = pd.read_csv(csv_path, sep=sep, header=header names=names)
self.root = root
self.classes = list(data[1].unique())
self.num_classes = len(classes)
def __getitem__(self, idx):
images = self.data[0]
labels = self.data[1]
img_path = os.path.join(root, images[idx])
label = torch.as_tensor(labels[idx], dtype=torch.int64)
img = Image.open(img_path).convert("RGB")
img = self.transforms(img)
return img, label
def __len__(self):
return len(self.data)
| 2.984375 | 3 |
aries_cloudagent/protocols/issue_credential/v1_1/message_types.py | krzosa/aries-cloudagent-python | 0 | 12759223 | <reponame>krzosa/aries-cloudagent-python
"""Message types to register."""
PROTOCOL_URI = "https://didcomm.org/issue-credential/1.1"
PROTOCOL_PACKAGE = "aries_cloudagent.protocols.issue_credential.v1_1"
CREDENTIAL_ISSUE = f"{PROTOCOL_URI}/issue-credential"
CREDENTIAL_REQUEST = f"{PROTOCOL_URI}/request-credential"
MESSAGE_TYPES = {
CREDENTIAL_ISSUE: (f"{PROTOCOL_PACKAGE}.messages.credential_issue.CredentialIssue"),
CREDENTIAL_REQUEST: (
f"{PROTOCOL_PACKAGE}.messages.credential_request.CredentialRequest"
),
}
| 1.625 | 2 |
larvik/extenders.py | jhnnsrs/arbeider | 0 | 12759224 | <reponame>jhnnsrs/arbeider
import pandas as pd
import xarray as xr
import dask.dataframe as dd
from larvik.logging import get_module_logger
from django.conf import settings
arnheim_debug = settings.DEBUG
logger = get_module_logger(__name__)
if arnheim_debug:
logger.info("Making DataArrays Look Beautiful")
logger.info("Extending DataArray")
xr.set_options(display_style="html")
class ArnheimError(Exception):
pass
@xr.register_dataarray_accessor("biometa")
class MetaAccessor:
def __init__(self, xarray_obj):
self._obj = xarray_obj
self._channelvalue = None
self._channeldict = None
self._planesdict = None
self._planes = None
def selchannel(self, **kwargs):
"""Return the geographic center point of this dataset."""
# we can use a cache on our accessor objects, because accessors
# themselves are cached on instances that access them.
lala = pd.DataFrame(self._obj.channel.data.compute())
for key, value in kwargs.items():
lala = lala[lala[key] == value]
return lala
@property
def name(self):
return self._obj.attrs["seriesname"]
@property
def scan(self):
return pd.DataFrame(self._obj.attrs["scan"])
@property
def channels(self):
if not "channels" in self._obj.coords:
raise ArnheimError("No channels. Did you transform the Array in (c) with coordinate changes?")
"""Return the geographic center point of this dataset."""
# we can use a cache on our accessor objects, because accessors
# themselves are cached on instances that access them.
lala = dd.from_dask_array(self._obj.channels.data)
return lala
@property
def planes(self):
if not "planes" in self._obj.coords:
raise ArnheimError("No planes. Did you transform the Array in (c,z,t) with coordinate changes?")
"""Return the geographic center point of this dataset."""
# we can use a cache on our accessor objects, because accessors
# themselves are cached on instances that access them.
lala = dd.from_dask_array(self._obj.channels.data.flatten())
return lala
@property
def savecoords(self):
''' All the save coordinates for accessing'''
return [key for key, value in self._obj.coords.items()]
@xr.register_dataarray_accessor("viewer")
class MetaAccessor:
def __init__(self, xarray_obj):
self._obj = xarray_obj
self.log = logger.info
def show(self, maxisp=True, t=0, rgb=(0, 1, 2), figsize=None, scale=12):
import matplotlib.pyplot as plt
figsize = (scale + scale/4, scale * (self._obj.shape[0]/self._obj.shape[1]))
if figsize:
figsize = figsize
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
image = self._obj
if "t" in image.dims:
self.log(f"Stack has {len(image.t)} Timepoints: selecting t={t}")
image = image.sel(t=t)
if "z" in image.dims and maxisp:
self.log(f"Stack has {len(image.z)} Z Planes: Projecting maximum intensity")
image = image.max(dim="z")
if "c" in image.dims:
nchannels = len(image.c)
if nchannels == 1:
channelname = str(image.channels.data["Name"].compute())
image = image.sel(c=image.c[0])
plot = image.plot.imshow(ax= ax)
elif nchannels == 2:
self.log(f"Stack has 2 Channels: Merging intensity")
channelname = "Merged " + " & ".join(image.channels.data["Name"].compute())
image = image.max(dim="c")
plot = image.plot.imshow(ax= ax)
elif nchannels == 3:
channelname = " & ".join(image.channels.data["Name"].compute())
plot = image.plot.imshow(ax= ax, rgb="c")
elif nchannels > 3:
image = image.sel(c=rgb)
channelname = " & ".join(image.channels.data["Name"].compute())
plot = image.plot.imshow(rgb="c",ax= ax)
else:
channelname = "Not Set"
plot = image.plot.imshow(ax= ax)
ax.set_title(channelname)
return fig
| 2.328125 | 2 |
harness/determined/core/_searcher.py | gt2345/gtang-determined | 0 | 12759225 | <reponame>gt2345/gtang-determined
import enum
import logging
import math
from typing import Iterator, Optional
import determined as det
from determined import core
from determined.common.experimental.session import Session
logger = logging.getLogger("determined.core")
class Unit(enum.Enum):
EPOCHS = "EPOCHS"
RECORDS = "RECORDS"
BATCHES = "BATCHES"
def _parse_searcher_units(experiment_config: dict) -> Optional[Unit]:
searcher = experiment_config.get("searcher", {})
length_example = searcher.get("max_length")
if isinstance(length_example, dict) and len(length_example) == 1:
key = next(iter(length_example.keys()))
return {"records": Unit.RECORDS, "epochs": Unit.EPOCHS, "batches": Unit.BATCHES}.get(key)
# Either a `max_length: 50` situation or a broken config.
return None
class SearcherOperation:
"""
A ``SearcherOperation`` is a request from the hyperparameter-search logic for the training
script to execute one train-validate-report cycle.
Some searchers, such as single, random, or grid, pass only a single ``SearcherOperation`` to
each trial, while others may pass many ``SearcherOperations``.
Each ``SearcherOperation`` has a length attribute representing the cumulative training that
should be completed before the validate-report steps of the cycle. The length attribute is
absolute, not incremental, meaning that if the searcher wants you to train for 10 units and
validate, then train for 10 more units and validate, it emits one ``SearcherOperation`` with
``.length=10`` followed by a second ``SearcherOperation`` with ``.length=20``. Using absolute
lengths instead of incremental lengths makes restarting after crashes simple and robust.
"""
def __init__(
self,
session: Session,
trial_id: int,
length: int,
is_chief: bool,
) -> None:
self._session = session
self._trial_id = trial_id
self._length = length
self._is_chief = is_chief
self._completed = False
@property
def length(self) -> int:
"""
``length`` represents the total amount of training which should be reached by the train step
before the validate-report steps.
"""
return self._length
def report_progress(self, length: float) -> None:
"""
``report_progress()`` reports the training progress to the Determined master so the WebUI
can show accurate progress to users.
The unit of the length value passed to ``report_progress()`` must match the unit of the
``.length`` attribute. The unit of the ``.length`` attribute is user-defined. When
treating ``.length`` as batches, ``report_progress()`` should report batches. When treating
.length as epochs, ``report_progress()`` must also be in epochs.
"""
if not self._is_chief:
raise RuntimeError("you must only call op.report_progress() from the chief worker")
if self._completed and length != self._length:
raise RuntimeError("you must not call op.report_progress() after op.report_completed()")
logger.debug(f"op.report_progress({length})")
self._session.post(
f"/api/v1/trials/{self._trial_id}/progress",
data=det.util.json_encode(length),
)
def report_completed(self, searcher_metric: float) -> None:
"""
``report_completed()`` is the final step of a train-validate-report cycle.
``report_completed()`` requires the value of the metric you are searching over. This value
is typically the output of the "validate" step of the train-validate-report cycle.
"""
if not self._is_chief:
raise RuntimeError("you must only call op.report_completed() from the chief worker")
if self._completed:
raise RuntimeError("you may only call op.report_completed() once")
if math.isnan(searcher_metric):
raise RuntimeError("searcher_metric may not be NaN")
self._completed = True
body = {"op": {"length": self._length}, "searcherMetric": searcher_metric}
logger.debug(f"op.report_completed({searcher_metric})")
self._session.post(
f"/api/v1/trials/{self._trial_id}/searcher/completed_operation",
data=det.util.json_encode(body),
)
class SearcherMode(enum.Enum):
"""
``SearcherMode`` defines the calling behavior of the ``SearcherContext.operations()`` call.
When mode is ``WorkersAskChief`` (the default), all workers must call
``SearcherContext.operations()`` in step with each other. The chief iterates through
searcher operations from the master and then propagates the operations to each worker,
introducing a synchronization point between workers.
When mode is ``ChiefOnly``, only the chief may call ``SearcherContext.operations()``. Usually
this implies you must manually inform the workers of what work to do next.
"""
WorkersAskChief = "WORKERS_ASK_CHIEF"
ChiefOnly = "CHEIF_ONLY"
class SearcherContext:
"""
``SearcherContext`` gives direct access to operations emitted by the search algorithm in the
master. Each ``SearcherOperation`` emitted has a (unitless) length that you should train for,
then you complete the op by reporting the validation metric you are searching over.
It is the user's responsibility to execute the required training. Because the user configured
the length of the searcher in the experiment configuration, the user should know if the unitless
length represents epochs, batches, records, etc.
It is also the user's responsibility to evaluate the model after training and report the correct
metric; if you intend to search over a metric called val_accuracy, you should report
val_accuracy.
Lastly, it is recommended (not required) to report progress periodically, so that the webui can
accurately reflect current progress. Progress is another unitless length.
Example:
.. code:: python
# Assuming you configured the searcher in terms of batches,
# the op.length is also interpeted as a batch count.
# Note that you'll have to load your starting point from a
# checkpoint if you want to support pausing/continuing training.
batches_trained = 0
for op in generic_context.searcher.operations():
# Train for however long the op requires you to.
# Note that op.length is an absolute length, not an
# incremental length:
while batches_trained < op.length:
my_train_batch()
batches_trained += 1
# Reporting progress every batch would be expensive:
if batches_trained % 1000:
op.report_progress(batches_trained)
# After training the required amount, pass your searcher
# metric to op.report_completed():
val_metrics = my_validate()
op.report_completed(val_metrics["my_searcher_metric"])
Note that reporting metrics is completely independent of the SearcherContext API, using
``core_context.train.report_training_metrics()`` or
``core_context.train.report_validation_metrics()``.
"""
def __init__(
self,
session: Session,
dist: core.DistributedContext,
trial_id: int,
run_id: int,
allocation_id: str,
units: Optional[Unit] = None,
) -> None:
self._session = session
self._dist = dist
self._trial_id = trial_id
self._run_id = run_id
self._allocation_id = allocation_id
self._units = units
def _get_searcher_op(self) -> Optional[SearcherOperation]:
logger.debug("_get_searcher_op()")
r = self._session.get(f"/api/v1/trials/{self._trial_id}/searcher/operation")
body = r.json()
if body["completed"]:
return None
# grpc-gateway encodes uint64 as a string, since it is bigger than a JavaScript `number`.
length = int(body["op"]["validateAfter"]["length"])
is_chief = self._dist.rank == 0
return SearcherOperation(self._session, self._trial_id, length=length, is_chief=is_chief)
def operations(
self,
searcher_mode: SearcherMode = SearcherMode.WorkersAskChief,
auto_ack: bool = True,
) -> Iterator[SearcherOperation]:
"""
Iterate through all the operations this searcher has to offer.
See :class:`~determined.core.SearcherMode` for details about calling requirements in
distributed training scenarios.
After training to the point specified by each ``SearcherOperation``, the chief, and only the
chief, must call ``op.report_completed(``) on each operation. This is true regardless of
the ``searcher_mode`` setting because the Determined master needs a clear, unambiguous
report of when an operation is completed.
"""
searcher_mode = SearcherMode(searcher_mode)
if self._dist.rank == 0:
# Chief gets operations from master.
while True:
op = self._get_searcher_op()
if searcher_mode == SearcherMode.WorkersAskChief:
# Broadcast op.length (or None) to workers. We broadcast just the length
# because SearcherOperation is not serializable, and the is_chief attribute
# obviously must be set on a per-worker basis.
_ = self._dist.broadcast(op and op.length)
if op is None:
if auto_ack:
self.acknowledge_out_of_ops()
break
yield op
if not op._completed:
raise RuntimeError("you must call op.report_completed() on each operation")
else:
if searcher_mode != SearcherMode.WorkersAskChief:
raise RuntimeError(
"you cannot call searcher.operations(searcher_mode=ChiefOnly) from a non-chief "
"worker."
)
# Worker gets operations from chief.
while True:
op_length = self._dist.broadcast(None)
if op_length is None:
break
yield SearcherOperation(
self._session, self._trial_id, length=op_length, is_chief=False
)
def acknowledge_out_of_ops(self) -> None:
"""
acknowledge_out_of_ops() tells the Determined master that you are shutting down because
you have recognized the searcher has no more operations for you to complete at this time.
This is important for the Determined master to know that it is safe to restart this process
should new operations be assigned to this trial.
acknowledge_out_of_ops() is normally called automatically just before operations() raises a
StopIteration, unless operations() is called with auto_ack=False.
"""
logger.debug(f"acknowledge_out_of_ops(allocation_id:{self._allocation_id})")
self._session.post(f"/api/v1/allocations/{self._allocation_id}/signals/ack_preemption")
def get_configured_units(self) -> Optional[Unit]:
"""
get_configured_units() reports what units were used in the searcher field of the experiment
config. If no units were configured, None is returned.
An experiment configured like this causes ``get_configured_units()`` to return EPOCHS:
.. code:: yaml
searcher:
name: single
max_length:
epochs: 50
An experiment configured like this causes ``get_configured_units()`` to return None:
.. code:: yaml
searcher:
name: single
max_length: 50
"""
return self._units
class DummySearcherOperation(SearcherOperation):
def __init__(self, length: int, is_chief: bool) -> None:
self._length = length
self._is_chief = is_chief
self._completed = False
def report_progress(self, length: float) -> None:
if not self._is_chief:
raise RuntimeError("you must only call op.report_progress() from the chief worker")
if self._completed and length != self._length:
raise RuntimeError("you must not call op.report_progress() after op.report_completed()")
logger.info("progress report: {length}/{self._length}")
def report_completed(self, searcher_metric: float) -> None:
if not self._is_chief:
raise RuntimeError("you must only call op.report_completed() from the chief worker")
if self._completed:
raise RuntimeError("you may only call op.report_completed() once")
if math.isnan(searcher_metric):
raise RuntimeError("searcher_metric may not be NaN")
self._completed = True
logger.info(
f"SearcherOperation Complete: searcher_metric={det.util.json_encode(searcher_metric)}"
)
class DummySearcherContext(SearcherContext):
"""Yield a singe search op. We need a way for this to be configurable."""
def __init__(self, dist: core.DistributedContext, length: int = 1) -> None:
self._dist = dist
self._length = length
def operations(
self,
searcher_mode: SearcherMode = SearcherMode.WorkersAskChief,
auto_ack: bool = True,
) -> Iterator[SearcherOperation]:
searcher_mode = SearcherMode(searcher_mode)
# Force the same synchronization behavior in the DummySearcherContext as the real one.
if self._dist.rank == 0:
# Chief makes a dummy op.
op = DummySearcherOperation(self._length, self._dist.rank == 0)
if searcher_mode == SearcherMode.WorkersAskChief:
# Broadcast op to workers.
_ = self._dist.broadcast(op and op.length)
yield op
if not op._completed:
raise RuntimeError("you must call op.report_completed() on each operation")
if searcher_mode == SearcherMode.WorkersAskChief:
_ = self._dist.broadcast(None)
else:
if searcher_mode != SearcherMode.WorkersAskChief:
raise RuntimeError(
"you cannot call searcher.operations(searcher_mode=ChiefOnly) "
"from a non-chief worker."
)
# Worker gets operations from chief.
while True:
op_length = self._dist.broadcast(None)
if op_length is None:
break
yield DummySearcherOperation(op_length, False)
def acknowledge_out_of_ops(self) -> None:
pass
def get_configured_units(self) -> Optional[Unit]:
return Unit.EPOCHS
| 2.40625 | 2 |
entry_point_inspector/ep.py | hroncok/entry_point_inspector | 17 | 12759226 | import logging
import sys
import traceback
from cliff import show
import pkg_resources
LOG = logging.getLogger(__name__)
class EntryPointShow(show.ShowOne):
"""Shows the details for a single entry point.
"""
def get_parser(self, prog_name):
p = super(EntryPointShow, self).get_parser(prog_name)
p.add_argument(
'group',
help='the name of the group to show',
)
p.add_argument(
'name',
help='the name of the entry point to show',
)
p.add_argument(
'--distribution',
default=None,
help='the name of the distribution if name is not unique',
)
return p
def take_action(self, parsed_args):
if parsed_args.distribution:
LOG.debug(
'Loading %s from %s using distribution %s',
parsed_args.name,
parsed_args.group,
parsed_args.distribution,
)
dist = pkg_resources.get_distribution(parsed_args.distribution)
ep = pkg_resources.get_entry_info(
dist,
parsed_args.group,
parsed_args.name,
)
else:
LOG.debug(
'Looking for %s in group %s',
parsed_args.name,
parsed_args.group,
)
try:
ep = next(pkg_resources.iter_entry_points(
parsed_args.group,
parsed_args.name,
))
except StopIteration:
raise ValueError('Could not find %r in %r' % (
parsed_args.name,
parsed_args.group,
))
try:
ep.load()
except Exception:
tb = traceback.format_exception(*sys.exc_info())
else:
tb = ''
return (
('Module', 'Member', 'Distribution', 'Path', 'Error'),
(ep.module_name,
'.'.join(ep.attrs),
str(ep.dist),
ep.dist.location,
tb),
)
| 2.375 | 2 |
src/music creator/converter.py | azizyano/blockchain-music-share-master | 0 | 12759227 | from flask import Flask, request, redirect, render_template, session
from flask.json import jsonify
import os
app = Flask(__name__)
# convert transaction Hach to a 6 base
def reVal(num):
if (num >= 0 and num <= 9):
return chr(num + ord('0'))
else:
return chr(num - 10 + ord('A'))
# Utility function to reverse a string
def strev(str):
len = len(str)
for i in range(int(len / 2)):
temp = str[i]
str[i] = str[len - i - 1]
str[len - i - 1] = temp
def fromDeci(res, base, inputNum):
#index = 0 Initialize index of result
# Convert input number is given base
# by repeatedly dividing it by base
# and taking remainder
while (inputNum > 0):
res+= reVal(inputNum % base)
inputNum = int(inputNum / base)
# Reverse the result
res = res[::-1]
return res
# Driver Code
#convert hach of a transaction to 6 base
Hash=0x755e1278c22c92c4ea0b5a44b3dd52a8a84ca59531849d0e279c84eb289da8f2
Hash_in_dec = int(Hash[1:],16)
base = 6
res = ""
Hash_in_dec_list=str(fromDeci(res, base, Hash_in_dec))
# address of transaction converted to decimal
address_bradg=0xfff923f5a1016e422ddb5d5b7d3ef8152957d2a5
address1=int(address_bradg[1:], 16)
# write in text file note using the disposition from Hash_in_dec_list and note number from address
Hash_in_dec_list=[int(i) for i in str(list)]
j=0
while j < len(Hash_in_dec_list):
with open("note.txt",'r+') as f:
line = f.readlines()
note=""
f.seek(0)
for index,line in enumerate(line):
if index==Hash_in_dec_list[j]:
note+=line.strip()+ str(address1[j])+'\n'
else:
note+=line.strip()+ '-\n'
f.write(note)
f.close()
j+=1
# import Hash from javascrypt web page
@app.route('/', methods=['POST'])
def postmethod():
TxHash = request.get_json()
print(TxHash)
return jsonify()
| 3.359375 | 3 |
indy_node/test/upgrade/test_forced_upgrade_if_request_received_after_propagate.py | Rob-S/indy-node | 627 | 12759228 | from indy_node.server.upgrade_log import UpgradeLog
from indy_node.test import waits
from indy_node.test.upgrade.helper import checkUpgradeScheduled, sdk_ensure_upgrade_sent
from plenum.common.constants import VERSION
from plenum.common.messages.node_messages import Propagate
from plenum.common.request import Request
from plenum.test.delayers import req_delay, ppgDelay
from plenum.test.test_node import getNonPrimaryReplicas
def test_forced_upgrade_handled_once_if_request_received_after_propagate(
looper, nodeSet, sdk_pool_handle, sdk_wallet_trustee,
validUpgradeExpForceTrue):
"""
Verifies that POOL_UPGRADE force=true request is handled one time in case
the node commits the transaction to the ledger but during the 3PC-process
receives the request directly from the client after a PROPAGATE from some
other node
"""
slow_node = getNonPrimaryReplicas(nodeSet, instId=0)[-1].node
slow_node.clientIbStasher.delay(req_delay())
slow_node.nodeIbStasher.delay(ppgDelay(sender_filter='Beta'))
slow_node.nodeIbStasher.delay(ppgDelay(sender_filter='Gamma'))
original_process_propagate = slow_node.nodeMsgRouter.routes[Propagate]
original_process_request = slow_node.clientMsgRouter.routes[Request]
def patched_process_propagate(msg: Propagate, frm: str):
original_process_propagate(msg, frm)
slow_node.clientIbStasher.reset_delays_and_process_delayeds()
slow_node.nodeMsgRouter.routes[Propagate] = original_process_propagate
def patched_process_request(request: Request, frm: str):
original_process_request(request, frm)
slow_node.nodeIbStasher.reset_delays_and_process_delayeds()
slow_node.clientMsgRouter.routes[Request] = original_process_request
slow_node.nodeMsgRouter.routes[Propagate] = patched_process_propagate
slow_node.clientMsgRouter.routes[Request] = patched_process_request
init_len = len(list(slow_node.upgrader._actionLog))
sdk_ensure_upgrade_sent(looper, sdk_pool_handle, sdk_wallet_trustee,
validUpgradeExpForceTrue)
looper.runFor(waits.expectedUpgradeScheduled())
checkUpgradeScheduled([slow_node], validUpgradeExpForceTrue[VERSION])
if init_len ==0:
# first upgrade - should be only one scheduled
assert len(list(slow_node.upgrader._actionLog)) == 1
else:
# one upgrade were already scheduled. we should cancel it and schedule new one
# so action log should be increased by 2
assert len(list(slow_node.upgrader._actionLog)) == init_len + 2
assert slow_node.upgrader._actionLog.last_event.ev_type == UpgradeLog.Events.scheduled
| 1.804688 | 2 |
src/app/api/routes/accesses.py | pyronear/pyro-api | 8 | 12759229 | # Copyright (C) 2021, Pyronear contributors.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
from typing import List
from fastapi import APIRouter, Path, Security
from app.api import crud
from app.db import accesses
from app.api.schemas import AccessRead, AccessType
from app.api.deps import get_current_access
router = APIRouter()
@router.get("/{access_id}/", response_model=AccessRead, summary="Get information about a specific access")
async def get_access(access_id: int = Path(..., gt=0), _=Security(get_current_access, scopes=[AccessType.admin])):
"""
Based on a access_id, retrieves information about the specified access
"""
return await crud.get_entry(accesses, access_id)
@router.get("/", response_model=List[AccessRead], summary="Get the list of all accesses")
async def fetch_accesses(_=Security(get_current_access, scopes=[AccessType.admin])):
"""
Retrieves the list of all accesses and their information
"""
return await crud.fetch_all(accesses)
| 2.390625 | 2 |
NBS_TCP.py | RETELLIGENCE-IWEN/NBS_2020_TCP | 1 | 12759230 | <filename>NBS_TCP.py
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
import sys
import random
import concurrent.futures
import socket
import json
import threading
import time
from util import recvall
HEADER_SIZE = 4
ADDR = ('1172.16.17.32', 50637)
run = True
def __recv(sock, as_str=False):
header = recvall(sock, 4)
bytes_to_read = int.from_bytes(header, byteorder=sys.byteorder)
print('%d bytes to read!' % bytes_to_read)
data = recvall(sock, bytes_to_read)
if as_str:
data = data.decode("utf-8")
return data
def handle_connection(sock, addr):
tag = '%s:%s' % (threading.get_ident(), addr)
while run:
# [1] client -> server
data = __recv(sock, as_str=True)
print(data)
data = json.loads(data)
print('[%s] recv: %s' % (tag, data))
# [2] server -> client
ActionCommand = {
"ActionNumbers": 2,
"Actions":[
{
"Id": 0, # Unit Id
"Engine": 5, # Engine Speed (0=R, 1=N, 2, 3, 4, 5)
"Direction": 3, # Steering Direction (1, 2, 3, 4, 5)
"Radar": 1, # Radar On/Off
"NavalGun": 1, # Fire ?
"N_FireTargetId": 4, # Target ID
"Missile": 0, # Fire ?
"M_FireTargetId": 3, # Target ID
"Torpedo": 0, # Fire ?
"T_FireTargetId": 3 # Target ID
},
{
"Id": 4,
"Engine": 5,
"Direction": 2,
"Radar": 1,
"NavalGun": 1,
"N_FireTargetId": 0,
"Missile": 1,
"M_FireTargetId": 0,
"Torpedo": 0,
"T_FireTargetId": 0
},
]
}
data = json.dumps(ActionCommand)
data = data.encode("utf-8")
print('[%s] send: %d bytes' % (tag, len(data)), data)
sock.sendall(int.to_bytes(len(data), HEADER_SIZE, byteorder=sys.byteorder))
time.sleep(1)
sock.sendall(data)
print("One Time")
print("Terminating connection")
def main():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(ADDR)
sock.listen(8)
print('Server is running at %s' % (ADDR,))
pool = concurrent.futures.ThreadPoolExecutor(max_workers=8)
while True:
print('Waiting for connection..')
client, addr = sock.accept()
pool.submit(handle_connection, client, addr) # future.result()
# [4] Close
print("Socket Closing")
time.sleep(10)
sock.close()
if __name__ == "__main__":
main()
| 2.796875 | 3 |
lredit_mainwindow.py | crftwr/lredit | 1 | 12759231 | <filename>lredit_mainwindow.py<gh_stars>1-10
import os
import sys
import time
import gc
import re
import cProfile
import threading
import fnmatch
import difflib
import configparser
import json
import traceback
import ctypes
import msvcrt
import locale
import pyauto
import ckit
from ckit.ckit_const import *
import lredit_mode
import lredit_minormode
import lredit_project
import lredit_bookmark
import lredit_isearch
import lredit_grep
import lredit_tags
import lredit_zenhan
import lredit_statusbar
import lredit_tabbar
import lredit_msgbox
import lredit_listwindow
import lredit_configmenu
import lredit_commandline
import lredit_history
import lredit_misc
import lredit_native
import lredit_resource
import lredit_debug
CommandSequence = ckit.CommandSequence
## @addtogroup mainwindow
## @{
#--------------------------------------------------------------------
class MouseInfo:
def __init__( self, mode, **args ):
self.mode = mode
self.__dict__.update(args)
#--------------------------------------------------------------------
## ペインのベースクラス
class Pane:
def __init__(self):
pass
def widget(self):
return None
## 編集ペインクラス
class EditPane(Pane):
def __init__(self):
Pane.__init__(self)
self.tab = None
self.edit = None
self.footer_paint_hook = None
self.edit_list = []
def widget(self):
return self.edit
## ログペインクラス
class LogPane(Pane):
def __init__(self):
Pane.__init__(self)
self.edit = None
def widget(self):
return self.edit
#--------------------------------------------------------------------
class JumpItem:
def __init__(self,text):
self.text = text
def shiftLineNo( self, filename, left, old_right, new_right ):
pass
def __call__(self):
pass
class GrepJumpItem(JumpItem):
def __init__( self, main_window, filename, lineno, search_object, text ):
JumpItem.__init__(self,text)
self.main_window = main_window
self.filename = filename
self.lineno = lineno
self.search_object = search_object
def shiftLineNo( self, filename, left, old_right, new_right ):
if self.filename==filename and self.lineno>left:
if self.lineno>=old_right:
self.lineno += new_right-old_right
return True
elif self.lineno>new_right:
self.lineno = new_right
return True
return False
def __call__(self):
self.main_window.activeOpen( filename=self.filename, lineno=self.lineno )
self.main_window.activeEditPane().edit.search( search_object=self.search_object, direction=1, oneline=True, message=False )
self.main_window.command.FocusEdit()
for i, item in enumerate(self.main_window.jump_list):
if item == self:
self.main_window.jump_selection = i
break
msg = "[%d/%d] %s:%d" % ( self.main_window.jump_selection+1, len(self.main_window.jump_list), self.filename, self.lineno+1 )
self.main_window.setStatusMessage( msg, 3000 )
class CompareJumpItem(JumpItem):
def __init__( self, main_window, filename1, lineno1, filename2, lineno2, text ):
JumpItem.__init__(self,text)
self.main_window = main_window
self.filename1 = filename1
self.lineno1 = lineno1
self.filename2 = filename2
self.lineno2 = lineno2
def shiftLineNo( self, filename, left, old_right, new_right ):
if self.filename1==filename and self.lineno1>left:
if self.lineno1>=old_right:
self.lineno1 += new_right-old_right
return True
elif self.lineno1>new_right:
self.lineno1 = new_right
return True
if self.filename2==filename and self.lineno2>left:
if self.lineno2>=old_right:
self.lineno2 += new_right-old_right
return True
elif self.lineno2>new_right:
self.lineno2 = new_right
return True
return False
def __call__(self):
self.main_window.leftOpen( filename=self.filename1, lineno=self.lineno1 )
self.main_window.rightOpen( filename=self.filename2, lineno=self.lineno2 )
self.main_window.command.FocusEdit()
for i, item in enumerate(self.main_window.jump_list):
if item == self:
self.main_window.jump_selection = i
break
#--------------------------------------------------------------------
REGION_LEFT_TAB = 1<<1
REGION_LEFT_EDIT = 1<<2
REGION_LEFT_FOOTER = 1<<3
REGION_RIGHT_TAB = 1<<4
REGION_RIGHT_EDIT = 1<<5
REGION_RIGHT_FOOTER = 1<<6
REGION_EDIT_SEPARATOR = 1<<7
REGION_LOG = 1<<9
REGION_STATUS_BAR = 1<<10
REGION_EDIT = REGION_LEFT_TAB | REGION_LEFT_EDIT | REGION_LEFT_FOOTER | REGION_RIGHT_TAB | REGION_RIGHT_EDIT | REGION_RIGHT_FOOTER | REGION_EDIT_SEPARATOR
REGION_ALL = 0xffffffff
## メインウインドウ
#
# テキストエディタの主な機能を実現しているクラスです。\n\n
# 設定ファイル config.py の configure に渡される window 引数は、MainWindow クラスのオブジェクトです。
#
class MainWindow( ckit.TextWindow ):
FOCUS_EDIT = 0
FOCUS_LOG = 1
FOCUS_EDIT_LEFT = 10
FOCUS_EDIT_RIGHT = 11
def __init__( self, config_filename, ini_filename, debug=False, profile=False ):
self.initialized = False
self.config_filename = config_filename
self.debug = debug
self.profile = profile
self.ini = configparser.RawConfigParser()
self.ini_filename = ini_filename
self.loadState()
self.loadTheme()
self.title = ""
x = self.ini.getint( "GEOMETRY", "x" )
y = self.ini.getint( "GEOMETRY", "y" )
# ウインドウの左上位置のDPIによってをフォントサイズ決定する
dpi_scale = ckit.TextWindow.getDisplayScalingFromPosition( x, y )
font_size = self.ini.getint( "FONT", "size" )
font_size = round( font_size * dpi_scale )
ckit.TextWindow.__init__(
self,
x=x,
y=y,
width=self.ini.getint( "GEOMETRY", "width" ),
height=self.ini.getint( "GEOMETRY", "height" ),
font_name = self.ini.get( "FONT", "name" ),
font_size = font_size,
bg_color = ckit.getColor("bg"),
cursor0_color = ckit.getColor("cursor0"),
cursor1_color = ckit.getColor("cursor1"),
border_size = 2,
title_bar = True,
title = "",
cursor = True,
sysmenu=True,
show=False,
activate_handler = self._onActivate,
close_handler = self._onClose,
move_handler = self._onMove,
size_handler = self._onSize,
dpi_handler = self._onDpi,
keydown_handler = self._onKeyDown,
keyup_handler = self._onKeyUp,
char_handler = self._onChar,
lbuttondown_handler = self._onLeftButtonDownOutside,
lbuttonup_handler = self._onLeftButtonUpOutside,
mbuttondown_handler = self._onMiddleButtonDownOutside,
mbuttonup_handler = self._onMiddleButtonUpOutside,
rbuttondown_handler = self._onRightButtonDownOutside,
rbuttonup_handler = self._onRightButtonUpOutside,
lbuttondoubleclick_handler = self._onLeftButtonDoubleClickOutside,
mousemove_handler = self._onMouseMoveOutside,
mousewheel_handler= self._onMouseWheelOutside,
dropfiles_handler = self._onDropFiles,
ipc_handler = self._onIpc,
)
# モニター境界付近でウインドウが作成された場合を考慮して、DPIを再確認する
dpi_scale2 = self.getDisplayScaling()
if dpi_scale2 != dpi_scale:
self.updateFont()
self.messageloop_list = []
self.quit_request_list = []
self.updateColor()
if self.ini.getint( "DEBUG", "detect_block" ):
lredit_debug.enableBlockDetector()
if self.ini.getint( "DEBUG", "print_errorinfo" ):
lredit_debug.enablePrintErrorInfo()
self.setCursorPos( -1, -1 )
self.focus_top = MainWindow.FOCUS_EDIT
self.focus_edit = MainWindow.FOCUS_EDIT_LEFT
self.left_edit_pane_width = self.ini.getint( "GEOMETRY", "left_edit_pane_width" )
self.log_pane_height = self.ini.getint( "GEOMETRY", "log_pane_height" )
self.command = ckit.CommandMap(self)
self.status_bar = lredit_statusbar.StatusBar()
self.status_bar_layer = lredit_statusbar.SimpleStatusBarLayer()
self.status_bar_resistered = False
self.status_bar_paint_hook = None
self.commandline_edit = None
self.progress_bar = None
self.project = None
self.bookmarks = lredit_bookmark.BookmarkTable()
self.bookmarks.load( self.ini, "BOOKMARK" )
self.edit_list = []
self.jump_list = []
self.jump_selection = None
self.document_next_pivot = None
self.keymap = ckit.Keymap()
self.mode_list = []
self.minor_mode_list = []
self.fileext_list = []
self.commandline_list = []
self.filename_history = lredit_history.History(1000)
self.filename_history.load( self.ini, "FILENAME" )
self.commandline_history = lredit_history.History(1000)
self.commandline_history.load( self.ini, "COMMANDLINE" )
self.search_history = lredit_history.History(100)
self.search_history.load( self.ini, "SEARCH" )
self.replace_history = lredit_history.History(100)
self.replace_history.load( self.ini, "REPLACE" )
self.grep_location_history = lredit_history.History(100)
self.grep_location_history.load( self.ini, "GREP", "location" )
self.grep_filename_pattern_history = lredit_history.History(100)
self.grep_filename_pattern_history.load( self.ini, "GREP", "filename_pattern" )
if not self.grep_filename_pattern_history.items : self.grep_filename_pattern_history.append("*")
self.grep_dirname_exclude_pattern_history = lredit_history.History(100)
self.grep_dirname_exclude_pattern_history.load( self.ini, "GREP", "dirname_exclude_pattern" )
if not self.grep_dirname_exclude_pattern_history.items : self.grep_dirname_exclude_pattern_history.append(".svn CVS RCS")
self.launcher = lredit_commandline.commandline_Launcher(self)
self.keydown_hook = None
self.char_hook = None
self.mouse_event_mask = False
self.mouse_click_info = None
self.mod_hooks = []
self.mod = 0
self.search_object = None
self.migemo = None
self.tags_list = []
self.tags_jump_history = []
self.task_queue_stack = []
self.synccall = ckit.SyncCall()
self.idle_count = 0
self.user_input_ownership = threading.Lock()
self.left_edit_pane = EditPane()
self.left_edit_pane.tab = lredit_tabbar.TabBarWidget( self, 0, 0, self.leftEditPaneWidth(), 1, self._onTabSelectionChanged )
self.right_edit_pane = EditPane()
self.right_edit_pane.tab = lredit_tabbar.TabBarWidget( self, self.leftEditPaneWidth() + self.editSeparatorWidth(), 0, self.rightEditPaneWidth(), 1, self._onTabSelectionChanged )
self.log_pane = LogPane()
self.log_pane.edit = ckit.TextWidget( self, 0, 0, 0, 0, message_handler=self.setStatusMessage )
doc = ckit.Document( filename=None, mode=self.createModeFromName("text") )
doc.setReadOnly(True)
doc.setBGColor(None)
self.log_pane.edit.setDocument(doc)
self.log_pane.edit.scroll_margin_v = 0
self.log_pane.edit.scroll_bottom_adjust = True
self.log_pane.edit.show_lineno = False
self.log_pane.edit.doc.mode.show_tab = False
self.log_pane.edit.doc.mode.show_space = False
self.log_pane.edit.doc.mode.show_wspace = False
self.log_pane.edit.doc.mode.show_lineend = False
self.log_pane.edit.doc.mode.show_fileend = False
self.setTimer( self.onTimerJob, 10 )
self.setTimer( self.onTimerSyncCall, 10 )
self.setTimer( self.onTimerIdle, 10 )
lredit_misc.registerNetConnectionHandler( self._onCheckNetConnection )
try:
self.createThemePlane()
except:
traceback.print_exc()
#lredit_debug.printErrorInfo()
try:
self.wallpaper = None
self.updateWallpaper()
except:
self.wallpaper = None
self.updateCursor()
self.initialized = True
self.paint()
self.show(True)
## 破棄する
def destroy(self):
lredit_debug.disableBlockDetector()
ckit.TextWindow.destroy(self)
## メッセージループを処理する
def messageLoop( self, continue_cond_func=None, name=None ):
self.messageloop_list.append(name)
if not continue_cond_func:
def defaultLoopCond():
if name in self.quit_request_list:
self.quit_request_list.remove(name)
return False
return True
continue_cond_func = defaultLoopCond
ckit.TextWindow.messageLoop( self, continue_cond_func )
self.messageloop_list.remove(name)
## メッセージループを抜ける
def quit( self, name=None ):
if name not in self.messageloop_list:
raise ValueError
# 最上位のメッセージループを抜けるときは、タスクを全てキャンセルする
if name=="top":
self.enable(False)
for task_queue in self.task_queue_stack:
task_queue.cancel()
self.quit_request_list.append(name)
## トップレベルのメッセージループが終了中であるかをチェックする
def isQuitting(self):
return ( "top" in self.quit_request_list )
## ユーザ入力権を獲得する
#
# @param self -
# @param blocking ユーザ入力権を獲得するまでブロックするか
#
# LREdit をマウスやキーボードで操作させる権利を獲得するための関数です。\n\n
#
# バックグラウンド処理の途中や最後でユーザの操作を受け付ける場合には、
# releaseUserInputOwnership と releaseUserInputOwnership を使って、
# 入力権を所有する必要があります。
# さもないと、フォアグラウンドのユーザ操作と衝突してしまい、ユーザが混乱したり、
# LREdit が正しく動作しなくなります。\n\n
#
# @sa releaseUserInputOwnership
#
def acquireUserInputOwnership( self, blocking=1 ):
return self.user_input_ownership.acquire(blocking)
## ユーザ入力権を解放する
#
# @sa acquireUserInputOwnership
#
def releaseUserInputOwnership(self):
self.user_input_ownership.release()
def onTimerJob(self):
# タスクキューが空っぽだったら破棄する
if len(self.task_queue_stack)>0:
if self.task_queue_stack[-1].numItems()==0:
self.task_queue_stack[-1].cancel()
self.task_queue_stack[-1].join()
self.task_queue_stack[-1].destroy()
del self.task_queue_stack[-1]
# 新しくアクティブになったタスクキューを再開する
if len(self.task_queue_stack)>0:
self.task_queue_stack[-1].restart()
if not self.acquireUserInputOwnership(False) : return
try:
ckit.JobQueue.checkAll()
finally:
self.releaseUserInputOwnership()
def onTimerSyncCall(self):
self.synccall.check()
def resetIdleCount(self):
self.idle_count = 30
def onTimerIdle(self):
if self.idle_count > 0:
self.idle_count -= 1
else:
for edit in self.edit_list:
if edit.doc.isSyntaxDirty():
edit.doc.updateSyntaxTimer()
break
## サブスレッドで処理を実行する
#
# @param self -
# @param func サブスレッドで実行する呼び出し可能オブジェクト
# @param arg 引数 func に渡す引数
# @param cancel_func ESCキーが押されたときのキャンセル処理
# @param cancel_func_arg 引数 cancel_func に渡す引数
# @param raise_error 引数 func のなかで例外が発生したときに、それを raise するか
#
# メインスレッドのユーザインタフェイスの更新を止めずに、サブスレッドの中で任意の処理を行うための関数です。\n\n
#
# この関数のなかでは、引数 func をサブスレッドで呼び出しながら、メインスレッドでメッセージループを回します。
# 返値には、引数 func の返値がそのまま返ります。\n\n
#
# ファイルのコピーや画像のデコードなどの、比較的時間のかかる処理は、メインスレッドではなくサブスレッドの中で処理するように心がけるべきです。
# さもないと、メインスレッドがブロックし、ウインドウの再描画などが長時間されないままになるといった弊害が発生します。
#
def subThreadCall( self, func, arg, cancel_func=None, cancel_func_arg=(), raise_error=False ):
class SubThread(threading.Thread):
def __init__(subthread_self):
threading.Thread.__init__(subthread_self)
subthread_self.result = None
subthread_self.error = None
def run(subthread_self):
lredit_native.setBlockDetector()
try:
subthread_self.result = func(*arg)
except Exception as e:
lredit_debug.printErrorInfo()
subthread_self.error = e
def onKeyDown( vk, mod ):
if vk==VK_ESCAPE:
if cancel_func:
cancel_func(*cancel_func_arg)
return True
def onChar( ch, mod ):
return True
keydown_hook_old = self.keydown_hook
char_hook_old = self.char_hook
mouse_event_mask_old = self.mouse_event_mask
sub_thread = SubThread()
sub_thread.start()
self.keydown_hook = onKeyDown
self.char_hook = onChar
self.mouse_event_mask = True
self.mouse_click_info = None
self.removeKeyMessage()
self.messageLoop( sub_thread.isAlive )
sub_thread.join()
result = sub_thread.result
error = sub_thread.error
del sub_thread
self.keydown_hook = keydown_hook_old
self.char_hook = char_hook_old
self.mouse_event_mask = mouse_event_mask_old
if error:
if raise_error:
raise error
else:
print( error )
return result
## コンソールプログラムをサブプロセスとして実行する
#
# @param self -
# @param cmd コマンドと引数のシーケンス
# @param cwd サブプロセスのカレントディレクトリ
# @param env サブプロセスの環境変数
# @param enable_cancel True:ESCキーでキャンセルする False:ESCキーでキャンセルしない
#
# 任意のコンソールプログラムを、ファイラのサブプロセスとして実行し、そのプログラムの出力を、ログペインにリダイレクトします。\n\n
#
# 引数 cmd には、サブプロセスとして実行するプログラムと引数をリスト形式で渡します。\n
# 例: [ "subst", "R:", "//remote-machine/public/" ]
#
def subProcessCall( self, cmd, cwd=None, env=None, enable_cancel=False ):
p = ckit.SubProcess(cmd,cwd,env)
if enable_cancel:
cancel_handler = p.cancel
else:
cancel_handler = None
return self.subThreadCall( p, (), cancel_handler )
## バックグラウンドタスクのキューに、タスクを投入する
#
# @param self -
# @param job_item バックグラウンドタスクとして実行する JobItem オブジェクト
# @param comment ユーザに説明する際のタスクの名前
# @param create_new_queue 新しいタスクキューを作成し、優先的に処理するか。( True:作成する False:作成しない None:問い合わせる )
#
# LREdit はバックグランド処理をサポートしており、ファイルのコピーや検索などの時間のかかる処理をバックグラウンドで実行しながら、
# ほかのディレクトリを閲覧したり、次に実行するバックグランド処理を予約したりすることが出来ます。\n\n
#
# バックグランド処理は、複数予約することが出来ますが、同時に実行することが出来るのは1つだけで、キュー(待ち行列)に投入されて、
# 順番に処理されます。
#
def taskEnqueue( self, job_item, comment="", create_new_queue=None ):
if len(self.task_queue_stack)>0:
if create_new_queue==None:
result = lredit_msgbox.popMessageBox(
self,
lredit_msgbox.MSGBOX_TYPE_YESNO,
ckit.strings["msgbox_title_insert_task"],
ckit.strings["msgbox_ask_insert_task"] )
if result==lredit_msgbox.MSGBOX_RESULT_YES:
create_new_queue = True
elif result==lredit_msgbox.MSGBOX_RESULT_NO:
create_new_queue = False
else:
return
else:
create_new_queue = True
if create_new_queue:
new_task_queue = ckit.JobQueue()
# まず前のタスクキューをポーズする処理を投入する
if len(self.task_queue_stack)>0:
prev_task_queue = self.task_queue_stack[-1]
def jobPause( job_item ):
prev_task_queue.pause()
pause_job_item = ckit.JobItem( jobPause, None )
new_task_queue.enqueue(pause_job_item)
self.task_queue_stack.append(new_task_queue)
else:
if comment and self.task_queue_stack[-1].numItems()>0:
self.setStatusMessage( ckit.strings["statusbar_task_reserved"] % comment, 3000 )
self.task_queue_stack[-1].enqueue(job_item)
## コマンドラインで文字列を入力する
#
# @param self -
# @param title コマンド入力欄の左側に表示されるタイトル文字列
# @param text コマンド入力欄の初期文字列
# @param selection コマンド入力欄の初期選択範囲
# @param auto_complete 自動補完を有効にするか
# @param autofix_list 入力確定をする文字のリスト
# @param return_modkey 入力欄が閉じたときに押されていたモディファイアキーを取得するか
# @param update_handler コマンド入力欄の変更があったときに通知を受けるためのハンドラ
# @param candidate_handler 補完候補を列挙するためのハンドラ
# @param candidate_remove_handler 補完候補を削除するためのハンドラ
# @param status_handler コマンド入力欄の右側に表示されるステータス文字列を返すためのハンドラ
# @param enter_handler コマンド入力欄でEnterキーが押されたときのハンドラ
# @param keydown_handler コマンド入力欄でのキー入力を処理するためのハンドラ
# @return 入力された文字列
#
# LREdit のメインウインドウの下端のステータスバーの領域をつかって、任意の文字列の入力を受け付けるための関数です。\n\n
#
def commandLine( self, title, text="", selection=None, auto_complete=False, autofix_list=None, return_modkey=False, update_handler=None, candidate_handler=None, candidate_remove_handler=None, status_handler=None, enter_handler=None, keydown_handler=None ):
title = " " + title + " "
title_width = self.getStringWidth(title)
status_string = [ "" ]
result = [ None ]
result_mod = [ 0 ]
class CommandLine:
def __init__(commandline_self):
commandline_self.planned_command_list = []
def _onKeyDown( commandline_self, vk, mod ):
result_mod[0] = mod
if keydown_handler:
if keydown_handler( vk, mod ):
if status_handler:
text = self.commandline_edit.getText()
selection = self.commandline_edit.getSelection()
update_info = ckit.EditWidget.UpdateInfo( text, selection )
status_string[0] = status_handler(update_info)
self.paint(REGION_STATUS_BAR)
return True
if self.commandline_edit.onKeyDown( vk, mod ):
return True
if vk==VK_RETURN:
text = self.commandline_edit.getText()
if enter_handler:
commandline_self.closeList()
if enter_handler( commandline_self, text, mod ):
return True
result[0] = text
commandline_self.quit()
elif vk==VK_ESCAPE:
if self.commandline_edit.getText():
self.commandline_edit.clear()
else:
commandline_self.quit()
return True
def _onChar( commandline_self, ch, mod ):
result_mod[0] = mod
self.commandline_edit.onChar( ch, mod )
return True
def _onUpdate( commandline_self, update_info ):
if update_handler:
if not update_handler(update_info):
return False
if status_handler:
status_string[0] = status_handler(update_info)
self.paint(REGION_STATUS_BAR)
def _onPaint( commandline_self, x, y, width, height ):
status_string_for_paint = " " + status_string[0] + " "
status_width = self.getStringWidth(status_string_for_paint)
attr = ckit.Attribute( fg=ckit.getColor("bar_fg") )
self.putString( x, y, title_width, height, attr, title )
self.putString( x+width-status_width, y, status_width, height, attr, status_string_for_paint )
if self.theme_enabled:
client_rect = self.getClientRect()
offset_x, offset_y = self.charToClient( 0, 0 )
char_w, char_h = self.getCharSize()
frame_width = 2
self.plane_statusbar.setPosSize( 0, (self.height()-1)*char_h+offset_y-frame_width, client_rect[2], client_rect[3]-((self.height()-1)*char_h+offset_y-frame_width) )
self.plane_commandline.setPosSize( title_width*char_w+offset_x, (self.height()-1)*char_h+offset_y-frame_width, client_rect[2]-((title_width+status_width)*char_w+offset_x), char_h+frame_width*2 )
self.commandline_edit.setPosSize( x+title_width, y, width-title_width-status_width, height )
self.commandline_edit.paint()
def getText(commandline_self):
return self.commandline_edit.getText()
def setText( commandline_self, text ):
self.commandline_edit.setText(text)
def getSelection(commandline_self):
return self.commandline_edit.getSelection()
def setSelection(commandline_self,selection):
self.commandline_edit.setSelection(selection)
def selectAll(commandline_self):
self.commandline_edit.selectAll()
def closeList(commandline_self):
self.commandline_edit.closeList()
def planCommand( commandline_self, command, info, history ):
commandline_self.planned_command_list.append( ( command, info, history ) )
def appendHistory(commandline_self,newentry):
self.commandline_history.append(newentry)
def quit(commandline_self):
self.quit( name="commandline" )
commandline_edit_old = self.commandline_edit
keydown_hook_old = self.keydown_hook
char_hook_old = self.char_hook
mouse_event_mask_old = self.mouse_event_mask
status_bar_paint_hook_old = self.status_bar_paint_hook
commandline = CommandLine()
self.commandline_edit = ckit.EditWidget( self, title_width, self.height()-1, self.width()-title_width, 1, text, selection, auto_complete=auto_complete, no_bg=True, autofix_list=autofix_list, update_handler=commandline._onUpdate, candidate_handler=candidate_handler, candidate_remove_handler=candidate_remove_handler )
self.commandline_edit.vk_complete = []
self.keydown_hook = commandline._onKeyDown
self.char_hook = commandline._onChar
self.mouse_event_mask = True
self.mouse_click_info = None
self.status_bar_paint_hook = commandline._onPaint
if status_handler:
status_string[0] = status_handler(ckit.EditWidget.UpdateInfo(text,selection))
if self.theme_enabled:
self.plane_commandline.show(True)
self.updateCursor()
self.paint()
self.removeKeyMessage()
self.messageLoop( name="commandline" )
self.commandline_edit.destroy()
self.commandline_edit = commandline_edit_old
self.keydown_hook = keydown_hook_old
self.char_hook = char_hook_old
self.mouse_event_mask = mouse_event_mask_old
self.status_bar_paint_hook = status_bar_paint_hook_old
if self.theme_enabled:
self.plane_commandline.show(False)
self.setCursorPos( -1, -1 )
self.updateCursor()
self.updatePaneRect()
self.paint()
for command, info, history in commandline.planned_command_list:
try:
command(info)
if history:
self.commandline_history.append(history)
except Exception as e:
print( e )
lredit_debug.printErrorInfo()
if return_modkey:
return result[0], result_mod[0]
else:
return result[0]
## コマンドラインでファイル名を入力する
#
# @param self -
# @param title コマンド入力欄の左側に表示されるタイトル文字列
# @param filename コマンド入力欄の初期文字列
# @param ensure_exists ファイルが実在することを保証するか
# @param return_modkey 入力欄が閉じたときに押されていたモディファイアキーを取得するか
# @return 入力された文字列
#
def inputFilename( self, title, filename, ensure_exists=False, return_modkey=False ):
def check(filename):
if ensure_exists:
return ( os.path.exists(filename) and os.path.isfile(filename) )
else:
if os.path.exists(filename):
return os.path.isfile(filename)
else:
return bool(os.path.basename(filename))
def statusString( update_info ):
if check(update_info.text):
return "OK"
else:
return " "
def onEnter( commandline, text, mod ):
if not check(text):
return True
return False
if not filename:
if len(self.filename_history.items)>0:
filename = self.filename_history.items[0]
filename = os.path.dirname(filename)
filename = ckit.joinPath(filename,"")
if not filename:
filename = os.getcwd()
filename = ckit.joinPath(filename,"")
# ファイル名部分だけを選択する
basename = os.path.basename(filename)
selection = [ len(filename)-len(basename), len(filename) ]
base = "."
filename, mod = self.commandLine( title, filename, selection, auto_complete=False, autofix_list=["\\/","."], return_modkey=True, candidate_handler=lredit_commandline.candidate_Filename( base, self.filename_history.items ), status_handler=statusString, enter_handler=onEnter )
if filename==None : return None
self.filename_history.append(filename)
filename = ckit.joinPath( base, filename )
if return_modkey:
return filename, mod
else:
return filename
## コマンドラインでディレクトリ名を入力する
#
# @param self -
# @param title コマンド入力欄の左側に表示されるタイトル文字列
# @param dirname コマンド入力欄の初期文字列
# @param recursive 再帰の有無を入力するかどうか ( None:無効, True/False:有効 )
# @param history 補完入力のためのHistoryオブジェクト
# @param return_modkey 入力欄が閉じたときに押されていたモディファイアキーを取得するか
# @return 入力された文字列
#
def inputDirname( self, title, dirname, recursive=None, history=None, return_modkey=False ):
recursive = [recursive]
if recursive[0]==None:
onKeyDown = None
def statusString( update_info ):
if os.path.isdir(update_info.text):
return "OK"
else:
return " "
else:
def onKeyDown( vk, mod ):
if mod==MODKEY_CTRL:
if vk==VK_R:
recursive[0] = not recursive[0]
return True
def statusString( update_info ):
s = ""
if recursive[0]:
s += "[Recursive] "
else:
s += "[---------] "
if os.path.isdir(update_info.text):
s += "OK"
else:
s += " "
return s
if not dirname:
if len(self.filename_history.items)>0:
dirname = os.path.dirname( self.filename_history.items[0] )
dirname = ckit.joinPath( dirname, "" )
if not dirname:
dirname = os.getcwd()
dirname = ckit.joinPath( dirname, "" )
selection = [ len(dirname), len(dirname) ]
base = "."
history_items = None
history_remove_func = None
if history:
history_items = history.items
history_remove_func = history.candidateRemoveHandler
dirname, mod = self.commandLine( title, dirname, selection, auto_complete=False, autofix_list=["\\/","."], return_modkey=True, candidate_handler=lredit_commandline.candidate_Filename( base, history_items ), candidate_remove_handler=history_remove_func, status_handler=statusString, keydown_handler=onKeyDown )
if dirname==None:
if return_modkey:
return None, recursive[0], 0
else:
return None, recursive[0]
dirname = ckit.joinPath( base, dirname )
if history : history.append(dirname)
if return_modkey:
return dirname, recursive[0], mod
else:
return dirname, recursive[0]
## コマンドラインでドキュメント名を入力する
#
# @param self -
# @param title コマンド入力欄の左側に表示されるタイトル文字列
# @param default_docname コマンド入力欄の初期文字列
# @param return_modkey 入力欄が閉じたときに押されていたモディファイアキーを取得するか
# @return 入力された文字列
#
def inputDocument( self, title, default_docname, return_modkey=False ):
def statusString( update_info ):
for edit in self.edit_list:
doc = edit.doc
if update_info.text==doc.getName():
return "OK"
else:
return " "
def candidate_DocumentName( update_info ):
candidates = []
for edit in self.edit_list:
doc = edit.doc
if doc.getName().lower().startswith( update_info.text.lower() ):
candidates.append( doc.getName() )
return candidates, 0
if default_docname:
text = default_docname
selection = [ 0, len(text) ]
else:
text = ""
selection = [0,0]
docname, mod = self.commandLine( title, text, selection, auto_complete=False, autofix_list=[], return_modkey=True, candidate_handler=candidate_DocumentName, status_handler=statusString )
if return_modkey:
return docname, mod
else:
return docname
## コマンドラインで検索条件を入力する
#
# @param self -
# @param title コマンド入力欄の左側に表示されるタイトル文字列
# @param return_modkey 入力欄が閉じたときに押されていたモディファイアキーを取得するか
# @param keydown_handler キー入力時ハンドラ
# @param update_handler 文字列変更時ハンドラ
# @return 入力された文字列
#
def inputSearch( self, title, return_modkey=False, keydown_handler=None, update_handler=None ):
text = [""]
def callUpdateHandler( word=None, case=None, regex=None ):
if update_handler:
if word==None : word = bool(self.ini.getint( "SEARCH", "word" ))
if case==None : case = bool(self.ini.getint( "SEARCH", "case" ))
if regex==None : regex = bool(self.ini.getint( "SEARCH", "regex" ))
return update_handler( text[0], word, case, regex )
return True
def onKeyDown( vk, mod ):
if keydown_handler:
if keydown_handler(vk,mod):
return True
if mod==MODKEY_CTRL:
if vk==VK_W:
word = self.ini.getint( "SEARCH", "word" )
if callUpdateHandler( word = not word ):
word = not word
self.ini.set( "SEARCH", "word", str(int(word)) )
return True
if vk==VK_E:
case = self.ini.getint( "SEARCH", "case" )
if callUpdateHandler( case = not case ):
case = not case
self.ini.set( "SEARCH", "case", str(int(case)) )
return True
if vk==VK_R:
regex = self.ini.getint( "SEARCH", "regex" )
if callUpdateHandler( regex = not regex ):
regex = not regex
self.ini.set( "SEARCH", "regex", str(int(regex)) )
return True
def onUpdate( update_info ):
text[0] = update_info.text
return callUpdateHandler()
def statusString( update_info ):
s = ""
if self.ini.getint( "SEARCH", "word" ):
s += "[Word] "
else:
s += "[----] "
if self.ini.getint( "SEARCH", "case" ):
s += "[Case] "
else:
s += "[----] "
if self.ini.getint( "SEARCH", "regex" ):
s += "[Regex]"
else:
s += "[-----]"
s = s.rstrip()
return s
if self.search_history.items:
text[0] = self.search_history.items[0]
selection = [ 0, len(text[0]) ]
s, mod = self.commandLine( title, text[0], selection, auto_complete=False, autofix_list=[], return_modkey=True, candidate_handler=self.search_history.candidateHandler, candidate_remove_handler=self.search_history.candidateRemoveHandler, status_handler=statusString, keydown_handler=onKeyDown, update_handler=onUpdate )
if s==None:
if return_modkey:
return None, mod
else:
return None
self.search_history.append(s)
if return_modkey:
return s, mod
else:
return s
## コマンドラインで文字列を入力する
#
# @param self -
# @param title コマンド入力欄の左側に表示されるタイトル文字列
# @param default_string コマンド入力欄の初期文字列
# @param string_list 補完候補文字列のリスト
# @param return_modkey 入力欄が閉じたときに押されていたモディファイアキーを取得するか
# @return 入力された文字列
#
def inputString( self, title, default_string, string_list=[], return_modkey=False ):
def statusString( update_info ):
for s in string_list:
if update_info.text==s:
return "OK"
else:
return " "
def candidate_String( update_info ):
candidates = []
for s in string_list:
if s.lower().startswith( update_info.text.lower() ):
candidates.append( s )
return candidates, 0
text = default_string
selection = [ 0, len(text) ]
s, mod = self.commandLine( title, text, selection, auto_complete=False, autofix_list=[], return_modkey=True, candidate_handler=candidate_String, status_handler=statusString )
if return_modkey:
return s, mod
else:
return s
## コマンドラインで数値を入力する
#
# @param self -
# @param title コマンド入力欄の左側に表示されるタイトル文字列
# @param default_string コマンド入力欄の初期文字列
# @param return_modkey 入力欄が閉じたときに押されていたモディファイアキーを取得するか
# @return 入力された文字列
#
def inputNumber( self, title, default_string="", return_modkey=False ):
def check(text):
try:
n = int(text)
except ValueError:
return False
return True
def statusString( update_info ):
if check(update_info.text):
return "OK"
else:
return " "
def onEnter( commandline, text, mod ):
if not check(text):
return True
return False
selection = [ 0, len(default_string) ]
number, mod = self.commandLine( title, default_string, selection, auto_complete=False, return_modkey=True, status_handler=statusString, enter_handler=onEnter )
if number==None : return None
if return_modkey:
return number, mod
else:
return number
## コマンドラインでオプション設定を入力する
#
# @param self -
# @param title コマンド入力欄の左側に表示されるタイトル文字列
# @param default_options コマンド入力欄の初期文字列
# @param option_list 入力可能なオプションのリスト
# @param return_modkey 入力欄が閉じたときに押されていたモディファイアキーを取得するか
# @return 入力された文字列
#
def inputOptions( self, title, default_options, option_list=[], return_modkey=False ):
option_lower_set = set()
for option in option_list:
option_lower_set.add(option.lower())
def splitOptions(s):
options = []
for option in s.split(','):
options.append(option.strip())
return options
def checkOptions(s):
for option in splitOptions(s):
if option.lower() not in option_lower_set:
return False
return True
def statusOptions( update_info ):
if checkOptions(update_info.text):
return "OK"
else:
return " "
def candidate_Options( update_info ):
left = update_info.text[ : update_info.selection[0] ]
pos_hint = left.rfind(",")+1
hint = left[pos_hint:]
pos_hint += len(hint) - len(hint.lstrip())
hint = left[pos_hint:].lower()
used_options_list = splitOptions(left.lower())
used_options_set = set( used_options_list )
candidate_list = []
for option in option_list:
option_lower = option.lower()
if option_lower.startswith(hint):
if option_lower not in used_options_set:
candidate_list.append(option)
return candidate_list, pos_hint
text = default_options
selection = [ 0, len(text) ]
s, mod = self.commandLine( title, text, selection, auto_complete=True, autofix_list=[","], return_modkey=True, candidate_handler=candidate_Options, status_handler=statusOptions )
if s and not checkOptions(s):
print( ckit.strings["error_unknown_parameter"] % s )
s = None
if return_modkey:
return s, mod
else:
return s
## リストウインドウでドキュメントを選択する
#
# @param self -
# @param title リストウインドウのタイトル文字列
# @param filter_func 表示するドキュメントをフィルタする関数
# @return 入力された文字列
#
def listDocument( self, title, filter_func=None ):
loop = [False]
fullpath_mode = [False]
select = 0
def onKeyDown( vk, mod ):
if vk==VK_SPACE and mod==0:
fullpath_mode[0] = not fullpath_mode[0]
loop[0] = True
list_window.quit()
return True
def onStatusMessage( width, select ):
return ""
while True:
loop[0] = False
items = []
for edit in self.edit_list:
if filter_func and not filter_func(edit):
continue
if fullpath_mode[0]:
s = edit.doc.getFullpath()
if not s:
s = edit.doc.getName()
else:
s = edit.doc.getName()
if edit.doc.isModified():
s += " *"
items.append( ( s, edit ) )
pos = self.centerOfWindowInPixel()
list_window = lredit_listwindow.ListWindow( pos[0], pos[1], 20, 2, self.width()-5, self.height()-3, self, self.ini, True, title, items, initial_select=select, onekey_search=False, keydown_hook=onKeyDown, statusbar_handler=onStatusMessage )
self.enable(False)
list_window.messageLoop()
# チラつき防止の遅延削除
class DelayedCall:
def __call__(self):
self.list_window.destroy()
delay = DelayedCall()
delay.list_window = list_window
self.delayedCall( delay, 10 )
if not loop[0]:
break
select = list_window.getResult()
result = list_window.getResult()
self.enable(True)
self.activate()
if not items or result<0:
return None
edit = items[result][1]
return edit
def _onTimerActivate(self):
if not self.acquireUserInputOwnership(False) : return
try:
self.checkProjectFileModified()
self.checkFileModifiedAll()
finally:
self.releaseUserInputOwnership()
self.killTimer( self._onTimerActivate )
def _onActivate( self, active ):
if self.initialized:
self.paint()
if active:
self.killTimer( self._onTimerActivate )
self.setTimer( self._onTimerActivate, 10 )
else:
self._cancelMouse()
def _onClose( self ):
self.command.Quit()
def _onMove( self, x, y ):
if not self.initialized : return
if self.commandline_edit:
self.commandline_edit.onWindowMove()
for edit in self.edit_list:
edit.onWindowMove()
def _onSize( self, width, height ):
if not self.initialized : return
w = width
if self.left_edit_pane_width>w-1 : self.left_edit_pane_width=w-1
w -= self.left_edit_pane_width
if self.log_pane_height>height-4 : self.log_pane_height=height-4
if self.log_pane_height<0 : self.log_pane_height=0
self.updatePaneRect()
for edit in self.edit_list:
edit.onWindowMove()
if self.wallpaper:
self.wallpaper.adjust()
self.paint()
def updateFont(self):
scale = self.getDisplayScaling()
font_name = self.ini.get("FONT","name")
font_size = self.ini.getint( "FONT", "size" )
font_size = round( font_size * scale )
self.setFont( font_name, font_size )
original_width = self.width()
original_height = self.height()
window_rect = self.getWindowRect()
self.setPosSize( (window_rect[0] + window_rect[2]) // 2, window_rect[1], original_width, original_height, ORIGIN_X_CENTER | ORIGIN_Y_TOP )
def _onDpi( self, scale ):
self.updateFont()
def _onKeyDown( self, vk, mod ):
#print( "_onKeyDown", vk, mod )
self.resetIdleCount()
pane = self.activePane()
if self.mod!=mod:
for hook in self.mod_hooks:
hook( mod, self.mod )
self.mod=mod
if self.keydown_hook:
if self.keydown_hook( vk, mod ):
return True
if not self.acquireUserInputOwnership(False) : return
try:
# アクティブなTextEditWidgetのキー処理
if pane.widget():
result = [None]
if self.profile:
cProfile.runctx( "result[0] = pane.widget().onKeyDown( vk, mod )", globals(), locals() )
else:
result[0] = pane.widget().onKeyDown( vk, mod )
if result[0]:
return result[0]
# メインウインドウのキー処理
try:
func = self.keymap.table[ ckit.KeyEvent(vk,mod) ]
if self.profile:
cProfile.runctx( "func( ckit.CommandInfo() )", globals(), locals() )
else:
func( ckit.CommandInfo() )
return True
except KeyError:
pass
finally:
self.releaseUserInputOwnership()
def _onKeyUp( self, vk, mod ):
#print( "_onKeyUp", vk, mod )
if self.mod!=mod:
for hook in self.mod_hooks:
hook( mod, self.mod )
self.mod=mod
def _onChar( self, ch, mod ):
#print( "_onChar", ch, mod )
self.resetIdleCount()
pane = self.activePane()
if self.char_hook:
if self.char_hook( ch, mod ):
return
if not self.acquireUserInputOwnership(False) : return
try:
# アクティブなTextEditWidgetの文字入力処理
if pane.widget():
result = [None]
if self.profile:
cProfile.runctx( "result[0] = pane.widget().onChar( ch, mod )", globals(), locals() )
else:
result[0] = pane.widget().onChar( ch, mod )
if result[0]:
return result[0]
finally:
self.releaseUserInputOwnership()
def _onLeftButtonDownOutside( self, x, y, mod ):
if not self.acquireUserInputOwnership(False) : return
try:
self._onLeftButtonDown(x, y, mod)
finally:
self.releaseUserInputOwnership()
def _onLeftButtonUpOutside( self, x, y, mod ):
if not self.acquireUserInputOwnership(False) : return
try:
self._onLeftButtonUp(x, y, mod)
finally:
self.releaseUserInputOwnership()
def _onMiddleButtonDownOutside( self, x, y, mod ):
if not self.acquireUserInputOwnership(False) : return
try:
self._onMiddleButtonDown(x, y, mod)
finally:
self.releaseUserInputOwnership()
def _onMiddleButtonUpOutside( self, x, y, mod ):
if not self.acquireUserInputOwnership(False) : return
try:
self._onMiddleButtonUp(x, y, mod)
finally:
self.releaseUserInputOwnership()
def _onRightButtonDownOutside( self, x, y, mod ):
if not self.acquireUserInputOwnership(False) : return
try:
self._onRightButtonDown(x, y, mod)
finally:
self.releaseUserInputOwnership()
def _onRightButtonUpOutside( self, x, y, mod ):
if not self.acquireUserInputOwnership(False) : return
try:
self._onRightButtonUp(x, y, mod)
finally:
self.releaseUserInputOwnership()
def _onLeftButtonDoubleClickOutside( self, x, y, mod ):
if not self.acquireUserInputOwnership(False) : return
try:
self._onLeftButtonDoubleClick(x, y, mod)
finally:
self.releaseUserInputOwnership()
def _onMouseMoveOutside( self, x, y, mod ):
if not self.acquireUserInputOwnership(False) : return
try:
self._onMouseMove(x, y, mod)
finally:
self.releaseUserInputOwnership()
def _onMouseWheelOutside( self, x, y, wheel, mod ):
if not self.acquireUserInputOwnership(False) : return
try:
self._onMouseWheel(x, y, wheel, mod)
finally:
self.releaseUserInputOwnership()
def _mouseCommon( self, x, y, focus=True ):
client_rect = self.getClientRect()
offset_x, offset_y = self.charToClient( 0, 0 )
char_w, char_h = self.getCharSize()
char_x = (x-offset_x) // char_w
char_y = (y-offset_y) // char_h
sub_x = float( (x-offset_x) - char_x * char_w ) // char_w
sub_y = float( (y-offset_y) - char_y * char_h ) // char_h
left_edit_pane_rect = list( self.leftEditPaneRect() )
right_edit_pane_rect = list( self.rightEditPaneRect() )
edit_separator_rect = list( self.editSeparatorRect() )
log_pane_rect = list( self.logPaneRect() )
region = None
pane = None
pane_rect = None
if self.left_edit_pane.edit and left_edit_pane_rect[0]<=char_x<left_edit_pane_rect[2] and right_edit_pane_rect[1]<=char_y<right_edit_pane_rect[3]:
if focus : self.command.FocusLeftEdit()
if char_y==left_edit_pane_rect[1]:
region = REGION_LEFT_TAB
pane = self.left_edit_pane
elif left_edit_pane_rect[1]+1<=char_y<left_edit_pane_rect[3]-1:
region = REGION_LEFT_EDIT
pane = self.left_edit_pane
elif char_y==left_edit_pane_rect[3]-1:
region = REGION_LEFT_FOOTER
pane = self.left_edit_pane
elif self.right_edit_pane.edit and right_edit_pane_rect[0]<=char_x<right_edit_pane_rect[2] and right_edit_pane_rect[1]<=char_y<right_edit_pane_rect[3]:
if focus : self.command.FocusRightEdit()
if char_y==right_edit_pane_rect[1]:
region = REGION_RIGHT_TAB
pane = self.right_edit_pane
elif right_edit_pane_rect[1]+1<=char_y<right_edit_pane_rect[3]-1:
region = REGION_RIGHT_EDIT
pane = self.right_edit_pane
elif char_y==right_edit_pane_rect[3]-1:
region = REGION_RIGHT_FOOTER
pane = self.right_edit_pane
elif edit_separator_rect[0]<=char_x<edit_separator_rect[2] and edit_separator_rect[1]<=char_y<edit_separator_rect[3]:
region = REGION_EDIT_SEPARATOR
pane = None
elif log_pane_rect[0]<=char_x<log_pane_rect[2] and log_pane_rect[1]<=char_y<log_pane_rect[3]:
if focus : self.command.FocusLog()
region = REGION_LOG
pane = self.log_pane
return [ char_x, char_y, sub_x, sub_y, region, pane ]
def _onDropFiles( self, x, y, filename_list ):
#print( "_onDropFiles", x, y )
char_x, char_y, sub_x, sub_y, region, pane = self._mouseCommon( x, y, True )
# プロジェクトファイルがDropされたらそれを開く
textfile_list = []
for filename in filename_list:
if fnmatch.fnmatch( filename, "*.lre" ):
info = ckit.CommandInfo()
info.args = [ filename ]
self.command.OpenProject(info)
else:
textfile_list.append(filename)
if region==REGION_LEFT_EDIT:
for filename in textfile_list:
self.leftOpen( filename=filename )
elif region==REGION_RIGHT_EDIT:
for filename in textfile_list:
self.rightOpen( filename=filename )
else:
for filename in textfile_list:
self.activeOpen( filename=filename )
def _onIpc( self, data ):
args = json.loads(data)
self.processArgument(args)
# アクティブ化
wnd = pyauto.Window.fromHWND(self.getHWND())
if wnd.isMinimized():
wnd.restore()
last_active_wnd = wnd.getLastActivePopup()
last_active_wnd.setForeground(True)
if last_active_wnd.isEnabled():
last_active_wnd.setActive()
def _setClipboard_LogSelected(self):
joint_text = ""
selection_left, selection_right = self.log_pane.selection
if selection_left > selection_right:
selection_left, selection_right = selection_right, selection_left
i = selection_left[0]
while i<=selection_right[0] and i<self.log_pane.log.numLines():
s = self.log_pane.log.getLine(i)
if i==selection_left[0]:
left = selection_left[1]
else:
left = 0
if i==selection_right[0]:
right = selection_right[1]
else:
right = len(s)
joint_text += s[left:right]
if i!=selection_right[0]:
joint_text += "\r\n"
i += 1
if joint_text:
ckit.setClipboardText(joint_text)
def _onLeftButtonDown( self, x, y, mod ):
#print( "_onLeftButtonDown", x, y )
if self.mouse_event_mask : return
self.mouse_click_info=None
char_x, char_y, sub_x, sub_y, region, pane = self._mouseCommon( x, y, True )
if region==REGION_LEFT_EDIT or region==REGION_RIGHT_EDIT or region==REGION_LOG:
pane.edit.onLeftButtonDown( char_x, char_y, sub_x, sub_y, mod )
self.setCapture()
self.mouse_click_info = MouseInfo( "edit", x=x, y=y, mod=mod, pane=pane )
elif region==REGION_LEFT_FOOTER or region==REGION_RIGHT_FOOTER:
self.setCapture()
self.mouse_click_info = MouseInfo( "footer", x=x, y=y, mod=mod, pane=pane )
elif region==REGION_EDIT_SEPARATOR:
self.setCapture()
self.mouse_click_info = MouseInfo( "edit_separator", x=x, y=y, mod=mod, pane=pane )
elif region==REGION_LEFT_TAB or region==REGION_RIGHT_TAB:
pane.tab.onLeftButtonDown( char_x, char_y, mod )
self.setCapture()
self.mouse_click_info = MouseInfo( "tab", x=x, y=y, mod=mod, pane=pane )
def _onLeftButtonUp( self, x, y, mod ):
#print( "_onLeftButtonUp", x, y )
if self.mouse_event_mask : return
if self.mouse_click_info==None : return
if self.mouse_click_info.mode=="edit":
char_x, char_y, sub_x, sub_y, region, pane = self._mouseCommon( x, y, False )
self.mouse_click_info.pane.edit.onLeftButtonUp( char_x, char_y, sub_x, sub_y, mod )
self.releaseCapture()
self.mouse_click_info = None
elif self.mouse_click_info.mode=="footer":
self.releaseCapture()
self.mouse_click_info = None
elif self.mouse_click_info.mode=="edit_separator":
self.releaseCapture()
self.mouse_click_info = None
elif self.mouse_click_info.mode=="tab":
self.releaseCapture()
self.mouse_click_info = None
def _onLeftButtonDoubleClick( self, x, y, mod ):
#print( "_onLeftButtonDoubleClick", x, y )
if self.mouse_event_mask : return
self.mouse_click_info=None
char_x, char_y, sub_x, sub_y, region, pane = self._mouseCommon( x, y, True )
if region==REGION_LEFT_EDIT or region==REGION_RIGHT_EDIT or region==REGION_LOG:
pane.edit.onLeftButtonDoubleClick( char_x, char_y, sub_x, sub_y, mod )
self.setCapture()
self.mouse_click_info = MouseInfo( "edit", x=x, y=y, mod=mod, pane=pane )
def _onMiddleButtonDown( self, x, y, mod ):
#print( "_onMiddleButtonDown", x, y )
if self.mouse_event_mask : return
self.mouse_click_info=None
char_x, char_y, sub_x, sub_y, region, pane = self._mouseCommon( x, y, True )
def _onMiddleButtonUp( self, x, y, mod ):
#print( "_onMiddleButtonUp", x, y )
if self.mouse_event_mask : return
self.mouse_click_info = None
def _onRightButtonDown( self, x, y, mod ):
#print( "_onRightButtonDown", x, y )
if self.mouse_event_mask : return
self.mouse_click_info=None
char_x, char_y, sub_x, sub_y, region, pane = self._mouseCommon( x, y, True )
if region==REGION_LEFT_EDIT or region==REGION_RIGHT_EDIT:
pane.edit.onRightButtonDown( char_x, char_y, sub_x, sub_y, mod )
self.mouse_click_info = MouseInfo( "edit", x=x, y=y, mod=mod, pane=pane )
def _onRightButtonUp( self, x, y, mod ):
#print( "_onRightButtonUp", x, y )
if self.mouse_event_mask : return
if self.mouse_click_info==None : return
if self.mouse_click_info.mode!="edit":
char_x, char_y, sub_x, sub_y, region, pane = self._mouseCommon( x, y, False )
self.mouse_click_info.pane.edit.onRightButtonUp( char_x, char_y, sub_x, sub_y, mod )
self.mouse_click_info=None
def _onMouseMove( self, x, y, mod ):
#print( "_onMouseMove", x, y )
if self.mouse_event_mask : return
char_x, char_y, sub_x, sub_y, region, pane = self._mouseCommon( x, y, False )
if self.mouse_click_info==None:
if region==REGION_LEFT_FOOTER or region==REGION_RIGHT_FOOTER:
self.setMouseCursor(MOUSE_CURSOR_SIZENS)
elif region==REGION_EDIT_SEPARATOR:
self.setMouseCursor(MOUSE_CURSOR_SIZEWE)
elif self.mouse_click_info.mode=="edit":
self.mouse_click_info.pane.edit.onMouseMove( char_x, char_y, sub_x, sub_y, mod )
elif self.mouse_click_info.mode=="footer":
self.setMouseCursor(MOUSE_CURSOR_SIZENS)
self.log_pane_height = self.height()-char_y-2
if self.log_pane_height>self.height()-2-self.tabBarHeight() : self.log_pane_height=self.height()-2-self.tabBarHeight()
if self.log_pane_height<0 : self.log_pane_height=0
self.updatePaneRect()
cursor = self.log_pane.edit.selection.cursor()
self.log_pane.edit.makeVisible(cursor)
self.paint()
elif self.mouse_click_info.mode=="edit_separator":
self.setMouseCursor(MOUSE_CURSOR_SIZEWE)
rect = self.editPaneRect()
self.left_edit_pane_width = char_x - rect[0]
self.left_edit_pane_width = max( self.left_edit_pane_width, 0 )
self.left_edit_pane_width = min( self.left_edit_pane_width, self.editPaneWidth()-self.editSeparatorWidth() )
self.updatePaneRect()
self.paint( REGION_EDIT )
def _onMouseWheel( self, x, y, wheel, mod ):
#print( "_onMouseWheel", x, y, wheel )
if self.mouse_event_mask : return
x, y = self.screenToClient( x, y )
char_x, char_y, sub_x, sub_y, region, pane = self._mouseCommon( x, y, True )
if region==REGION_LEFT_EDIT or region==REGION_RIGHT_EDIT or region==REGION_LOG:
pane.edit.onMouseWheel( char_x, char_y, sub_x, sub_y, wheel, mod )
self.mouse_click_info=None
def _cancelMouse(self):
self.releaseCapture()
self.mouse_click_info = None
def _onCheckNetConnection( self, remote_resource_name ):
def addConnection( hwnd, remote_resource_name ):
try:
lredit_native.addConnection( hwnd, remote_resource_name )
except Exception as e:
print( ckit.strings["error_connection_failed"] % remote_resource_name )
print( e, "\n" )
self.synccall( addConnection, (self.getHWND(), remote_resource_name) )
def tabBarHeight(self):
return 1
def leftEditPaneWidth(self):
return self.left_edit_pane_width
def rightEditPaneWidth(self):
return self.editPaneWidth() - self.editSeparatorWidth() - self.left_edit_pane_width
def editSeparatorWidth(self):
if self.left_edit_pane.edit and self.right_edit_pane.edit:
return 1
else:
return 0
def editPaneWidth(self):
return self.width()
def editPaneHeight(self):
return self.height() - self.log_pane_height - 1
def lowerPaneHeight(self):
return self.log_pane_height + 1
def logPaneHeight(self):
return self.log_pane_height
def editPaneRect(self):
return ( 0, 0, self.width(), self.height() - self.log_pane_height - 1 )
def leftTabBarRect(self):
return ( 0, 0, self.leftEditPaneWidth(), self.tabBarHeight() )
def rightTabBarRect(self):
return ( self.leftEditPaneWidth() + self.editSeparatorWidth(), 0, self.width(), self.tabBarHeight() )
def leftEditPaneRect(self):
return ( 0, 0, self.leftEditPaneWidth(), self.height() - self.log_pane_height - 1 )
def rightEditPaneRect(self):
return ( self.leftEditPaneWidth() + self.editSeparatorWidth(), 0, self.width(), self.height() - self.log_pane_height - 1 )
def editSeparatorRect(self):
return ( self.leftEditPaneWidth(), 0, self.leftEditPaneWidth() + self.editSeparatorWidth(), self.height() - self.log_pane_height - 1 )
def logPaneRect(self):
return ( 0, self.height() - self.log_pane_height - 1, self.width(), self.height()-1 )
def activePaneRect(self):
if self.focus_top==MainWindow.FOCUS_EDIT:
return self.activeEditPaneRect()
elif self.focus_top==MainWindow.FOCUS_LOG:
return self.logPaneRect()
else:
assert(False)
def activeEditPaneRect(self):
if self.focus_edit==MainWindow.FOCUS_EDIT_LEFT:
return self.leftEditPaneRect()
elif self.focus_edit==MainWindow.FOCUS_EDIT_RIGHT:
return self.rightEditPaneRect()
else:
assert(False)
def inactiveEditPaneRect(self):
if self.focus_edit==MainWindow.FOCUS_EDIT_LEFT:
return self.rightEditPaneRect()
elif self.focus_edit==MainWindow.FOCUS_EDIT_RIGHT:
return self.leftEditPaneRect()
else:
assert(False)
def ratioToScreen( self, ratio ):
rect = self.getWindowRect()
return ( int(rect[0] * (1-ratio[0]) + rect[2] * ratio[0]), int(rect[1] * (1-ratio[1]) + rect[3] * ratio[1]) )
## メインウインドウの中心位置を、スクリーン座標系で返す
#
# @return ( X軸座標, Y軸座標 )
#
def centerOfWindowInPixel(self):
rect = self.getWindowRect()
return ( (rect[0]+rect[2])//2, (rect[1]+rect[3])//2 )
def centerOfFocusedPaneInPixel(self):
window_rect = self.getWindowRect()
pane_rect = self.activeEditPaneRect()
if self.width()>0:
x_ratio = float(pane_rect[0]+pane_rect[2])/2/self.width()
else:
x_ratio = 0.5
if self.height()>0:
y_ratio = float(pane_rect[1]+pane_rect[3])/2/self.height()
else:
y_ratio = 0.5
return ( int(window_rect[0] * (1-x_ratio) + window_rect[2] * (x_ratio)), int(window_rect[1] * (1-y_ratio) + window_rect[3] * (y_ratio)) )
## 左の編集ペインを取得する
def leftPane(self):
return self.left_edit_pane
## 右の編集ペインを取得する
def rightPane(self):
return self.right_edit_pane
## アクティブなペインを取得する
def activePane(self):
if self.focus_top==MainWindow.FOCUS_EDIT:
return self.activeEditPane()
elif self.focus_top==MainWindow.FOCUS_LOG:
return self.log_pane
else:
assert(False)
## アクティブな編集ペインを取得する
def activeEditPane(self):
if self.focus_edit==MainWindow.FOCUS_EDIT_LEFT:
return self.left_edit_pane
elif self.focus_edit==MainWindow.FOCUS_EDIT_RIGHT:
return self.right_edit_pane
else:
assert(False)
## 非アクティブな編集ペインを取得する
def inactiveEditPane(self):
if self.focus_edit==MainWindow.FOCUS_EDIT_LEFT:
return self.right_edit_pane
elif self.focus_edit==MainWindow.FOCUS_EDIT_RIGHT:
return self.left_edit_pane
else:
assert(False)
def updateCursor(self):
if self.commandline_edit:
if self.left_edit_pane.edit : self.left_edit_pane.edit.enableCursor(False)
if self.right_edit_pane.edit : self.right_edit_pane.edit.enableCursor(False)
self.log_pane.edit.enableCursor(False)
self.commandline_edit.enableCursor(True)
else:
active_pane = self.activePane()
if self.left_edit_pane.edit : self.left_edit_pane.edit.enableCursor( active_pane==self.left_edit_pane )
if self.right_edit_pane.edit : self.right_edit_pane.edit.enableCursor( active_pane==self.right_edit_pane )
self.log_pane.edit.enableCursor( active_pane==self.log_pane )
self.updateTitleBar()
self.updateTabBar()
## 1画面モードであるか
def isLayoutOne(self):
return not (self.left_edit_pane.edit and self.right_edit_pane.edit)
## 2画面モードであるか
def isLayoutTwo(self):
return (self.left_edit_pane.edit and self.right_edit_pane.edit)
def executeCommand( self, name, info ):
#print( "executeCommand", name )
if self.activePane().widget():
if self.activePane().widget().executeCommand( name, info ):
return True
try:
command = getattr( self, "command_" + name )
except AttributeError:
return False
command(info)
return True
def enumCommand(self):
if self.activePane().widget():
for item in self.activePane().widget().enumCommand():
yield item
for attr in dir(self):
if attr.startswith("command_"):
yield attr[ len("command_") : ]
def prepareMenuBar(self):
def isWholeMenuEnabled():
if not self.acquireUserInputOwnership(False) : return False
try:
return True
finally:
self.releaseUserInputOwnership()
def isEncoding(encoding):
def func():
edit = self.activeEditPane().edit
if not edit : return False
if encoding in ("utf-8", "utf-8n"):
if edit.doc.encoding.encoding=="utf-8":
if edit.doc.encoding.bom and encoding=="utf-8" : return True
if not edit.doc.encoding.bom and encoding=="utf-8n" : return True
return False
return edit.doc.encoding.encoding==encoding
return func
def isLineEnd(lineend):
def func():
edit = self.activeEditPane().edit
if not edit : return False
return edit.doc.lineend==lineend
return func
def isSelected():
edit = self.activeEditPane().edit
if not edit : return False
return edit.selection.direction!=0
def isProjectOpened():
return (self.project!=None)
# [開き直す]のサブメニュー項目
def menuitems_ReopenEncoding():
def command_ReopenSpecificEncoding( encoding ):
def func(info):
info = ckit.CommandInfo()
info.args = [encoding]
self.command.ReopenEncoding(info)
return func
encoding_list = [
"utf-8",
"utf-8n",
"shift-jis",
"euc-jp",
"iso-2022-jp",
"utf-16-le",
"utf-16-be",
]
menuitems = []
for encoding in encoding_list:
menuitems.append( ckit.MenuNode( encoding, encoding, command_ReopenSpecificEncoding(encoding), checked=isEncoding(encoding) ) )
return menuitems
# [エンコーディング]のサブメニュー項目
def menuitems_Encoding():
def command_SpecificEncoding( encoding ):
def func(info):
info = ckit.CommandInfo()
info.args = [encoding]
self.command.Encoding(info)
return func
encoding_list = [
"utf-8",
"utf-8n",
"shift-jis",
"euc-jp",
"iso-2022-jp",
"utf-16-le",
"utf-16-be",
]
menuitems = []
for encoding in encoding_list:
menuitems.append( ckit.MenuNode( encoding, encoding, command_SpecificEncoding(encoding), checked=isEncoding(encoding) ) )
return menuitems
# 改行コード
def menuitems_LineEnd():
def command_SpecificLineEnd( lineend ):
def func(info):
info = ckit.CommandInfo()
info.args = [lineend]
self.command.LineEnd(info)
return func
lineend_list = [
( "crlf", "\r\n" ),
( "lf", "\n" ),
( "cr", "\r" ),
]
menuitems = []
for lineend_name, lineend in lineend_list:
menuitems.append( ckit.MenuNode( lineend, lineend_name, command_SpecificLineEnd(lineend_name), checked=isLineEnd(lineend) ) )
return menuitems
# [最近のファイル]のサブメニュー項目
def menuitems_RecentFiles():
class command_OpenSpecificFile:
def __init__( command_self, filename ):
command_self.filename = filename
def __call__( command_self, info ):
self.activeOpen( filename=command_self.filename )
menu_items = []
i = 0
for filename in self.filename_history.items:
if not fnmatch.fnmatch( filename, "*.lre" ):
menu_items.append( ckit.MenuNode( "recent_file%d"%i, "&%d %s" % ( (i+1)%10, filename ), command_OpenSpecificFile(filename) ) )
i += 1
if i>=10: break
return menu_items
# [最近のプロジェクト]のサブメニュー項目
def menuitems_RecentProjects():
class command_OpenSpecificProject:
def __init__( command_self, filename ):
command_self.filename = filename
def __call__( command_self, info ):
info = ckit.CommandInfo()
info.args = [ command_self.filename ]
self.command.OpenProject(info)
menu_items = []
i = 0
for filename in self.filename_history.items:
if fnmatch.fnmatch( filename, "*.lre" ):
menu_items.append( ckit.MenuNode( "recent_project%d"%i, "&%d %s" % ( (i+1)%10, filename ), command_OpenSpecificProject(filename) ) )
i += 1
if i>=10: break
return menu_items
# メニュー全体の定義
self.menu_bar = ckit.MenuNode(
enabled = isWholeMenuEnabled,
items=[
ckit.MenuNode(
"file", ckit.strings["menu_file"],
items=[
ckit.MenuNode( "new", ckit.strings["menu_new"], self.command.New ),
ckit.MenuNode( "open", ckit.strings["menu_open"], self.command.Open ),
ckit.MenuNode( "reopen", ckit.strings["menu_reopen"],
items=[
menuitems_ReopenEncoding
]
),
ckit.MenuNode( "close", ckit.strings["menu_close"], self.command.Close ),
ckit.MenuNode( separator=True ),
ckit.MenuNode( "project", ckit.strings["menu_project"],
items = [
ckit.MenuNode( "open_project", ckit.strings["menu_open_project"], self.command.OpenProject ),
ckit.MenuNode( "close_project", ckit.strings["menu_close_project"], self.command.CloseProject, enabled=isProjectOpened ),
ckit.MenuNode( "edit_project", ckit.strings["menu_edit_project"], self.command.EditProject, enabled=isProjectOpened ),
]
),
ckit.MenuNode( "project_file_list", ckit.strings["menu_project_file_list"], self.command.ProjectFileList, enabled=isProjectOpened ),
ckit.MenuNode( separator=True ),
ckit.MenuNode( "save", ckit.strings["menu_save"], self.command.Save ),
ckit.MenuNode( "save_as", ckit.strings["menu_save_as"], self.command.SaveAs ),
ckit.MenuNode( "save_all", ckit.strings["menu_save_all"], self.command.SaveAll ),
ckit.MenuNode( separator=True ),
ckit.MenuNode( "encoding", ckit.strings["menu_encoding"],
items=[
menuitems_Encoding,
ckit.MenuNode( separator=True ),
menuitems_LineEnd,
]
),
ckit.MenuNode( separator=True ),
ckit.MenuNode(
"recent_files", ckit.strings["menu_recent_files"],
items=[
menuitems_RecentFiles,
]
),
ckit.MenuNode(
"recent_projects", ckit.strings["menu_recent_projects"],
items=[
menuitems_RecentProjects,
]
),
ckit.MenuNode( separator=True ),
ckit.MenuNode( "quit", ckit.strings["menu_quit"], self.command.Quit ),
]
),
ckit.MenuNode(
"edit", ckit.strings["menu_edit"],
items=[
ckit.MenuNode( "undo", ckit.strings["menu_undo"], self.command.Undo ),
ckit.MenuNode( "redo", ckit.strings["menu_redo"], self.command.Redo ),
ckit.MenuNode( separator=True ),
ckit.MenuNode( "cut", ckit.strings["menu_cut"], self.command.Cut, enabled=isSelected ),
ckit.MenuNode( "copy", ckit.strings["menu_copy"], self.command.Copy, enabled=isSelected ),
ckit.MenuNode( "paste", ckit.strings["menu_paste"], self.command.Paste ),
ckit.MenuNode( "delete", ckit.strings["menu_delete"], self.command.Delete ),
ckit.MenuNode( "select_all", ckit.strings["menu_select_all"], self.command.SelectDocument ),
ckit.MenuNode( separator=True ),
ckit.MenuNode( "convert_char", ckit.strings["menu_convert_char"],
items = [
ckit.MenuNode( "to_upper", ckit.strings["menu_to_upper"], self.command.ToUpper, enabled=isSelected ),
ckit.MenuNode( "to_lower", ckit.strings["menu_to_lower"], self.command.ToLower, enabled=isSelected ),
ckit.MenuNode( "to_zenkaku", ckit.strings["menu_to_zenkaku"], self.command.ToZenkaku, enabled=isSelected ),
ckit.MenuNode( "to_hankaku", ckit.strings["menu_to_hankaku"], self.command.ToHankaku, enabled=isSelected ),
]
),
ckit.MenuNode( separator=True ),
ckit.MenuNode( "complete", ckit.strings["menu_complete"], self.command.CompleteAbbrev ),
ckit.MenuNode( separator=True ),
ckit.MenuNode( "jump_lineno", ckit.strings["menu_jump_lineno"], self.command.JumpLineNo ),
]
),
ckit.MenuNode(
"search", ckit.strings["menu_search"],
items=[
ckit.MenuNode( "search", ckit.strings["menu_search"], self.command.Search ),
ckit.MenuNode( "search_next", ckit.strings["menu_search_next"], self.command.SearchNext ),
ckit.MenuNode( "search_prev", ckit.strings["menu_search_prev"], self.command.SearchPrev ),
ckit.MenuNode( separator=True ),
ckit.MenuNode( "grep", ckit.strings["menu_grep"], self.command.Grep ),
ckit.MenuNode( separator=True ),
ckit.MenuNode( "tags", ckit.strings["menu_tags"],
items = [
ckit.MenuNode( "tags_jump", ckit.strings["menu_tags_jump"], self.command.TagsJump, enabled=isProjectOpened ),
ckit.MenuNode( "tags_back", ckit.strings["menu_tags_back"], self.command.TagsBack, enabled=isProjectOpened ),
ckit.MenuNode( "load_tags", ckit.strings["menu_load_tags"], self.command.LoadTags, enabled=isProjectOpened ),
ckit.MenuNode( "generate_tags", ckit.strings["menu_generate_tags"], self.command.GenerateTags, enabled=isProjectOpened ),
]
),
ckit.MenuNode( separator=True ),
ckit.MenuNode( "replace", ckit.strings["menu_replace"], self.command.Replace ),
ckit.MenuNode( "compare", ckit.strings["menu_compare"], self.command.Compare ),
]
),
ckit.MenuNode(
"view", ckit.strings["menu_view"],
items=[
ckit.MenuNode( "another_pane", ckit.strings["menu_another_pane"], self.command.AnotherPane ),
ckit.MenuNode( separator=True ),
ckit.MenuNode( "doclist", ckit.strings["menu_doclist"], self.command.DocumentList ),
]
),
ckit.MenuNode(
"tool", ckit.strings["menu_tool"],
items=[
ckit.MenuNode( "bookmark_list", ckit.strings["menu_bookmark_list"], self.command.BookmarkList ),
ckit.MenuNode( "bookmark1", ckit.strings["menu_bookmark1"], self.command.Bookmark1 ),
ckit.MenuNode( "bookmark2", ckit.strings["menu_bookmark2"], self.command.Bookmark2 ),
ckit.MenuNode( "bookmark3", ckit.strings["menu_bookmark3"], self.command.Bookmark3 ),
ckit.MenuNode( "bookmark_next", ckit.strings["menu_bookmark_next"], self.command.CursorBookmarkNext ),
ckit.MenuNode( "bookmark_prev", ckit.strings["menu_bookmark_prev"], self.command.CursorBookmarkPrev ),
ckit.MenuNode( separator=True ),
ckit.MenuNode( "outline", ckit.strings["menu_outline"], self.command.Outline ),
ckit.MenuNode( separator=True ),
ckit.MenuNode( "expand_tab", ckit.strings["menu_expand_tab"], self.command.ExpandTab, enabled=isSelected ),
ckit.MenuNode( "remove_trailing_space", ckit.strings["menu_remove_trailing_space"], self.command.RemoveTrailingSpace, enabled=isSelected ),
ckit.MenuNode( "remove_empty_lines", ckit.strings["menu_remove_empty_lines"], self.command.RemoveEmptyLines, enabled=isSelected ),
ckit.MenuNode( "remove_marked_lines", ckit.strings["menu_remove_marked_lines"], self.command.RemoveMarkedLines, enabled=isSelected ),
ckit.MenuNode( "remove_unmarked_lines", ckit.strings["menu_remove_unmarked_lines"], self.command.RemoveUnmarkedLines, enabled=isSelected ),
ckit.MenuNode( separator=True, name="custom_tools_begin" ),
ckit.MenuNode( separator=True, name="custom_tools_end" ),
ckit.MenuNode( "config_menu", ckit.strings["menu_config_menu"], self.command.ConfigMenu ),
ckit.MenuNode( "config_edit", ckit.strings["menu_config_edit"], self.command.EditConfig ),
ckit.MenuNode( "config_reload", ckit.strings["menu_config_reload"], self.command.ReloadConfig ),
]
),
ckit.MenuNode(
"help", ckit.strings["menu_help"],
items=[
ckit.MenuNode( "help", ckit.strings["menu_help"], self.command.Help ),
ckit.MenuNode( separator=True ),
ckit.MenuNode( "about", ckit.strings["menu_about"], self.command.About ),
]
),
]
)
def applyMenuBar(self):
visible = self.ini.getint( "MENUBAR", "visible" )
if visible:
self.setMenu(self.menu_bar)
else:
self.setMenu(None)
window_rect = self.getWindowRect()
self.setPosSize( window_rect[0], window_rect[1], self.width(), self.height(), 0 )
# メニューの検索
def findMenu( self, node, route ):
pos = 0
for item in node.items:
if isinstance(item,ckit.MenuNode):
if item.name == route[0]:
if len(route)==1:
return node, pos
return self.findMenu( item, route[1:] )
pos += 1
return None, -1
## メニュー項目の挿入
def insertMenu( self, route, item ):
menu, pos = self.findMenu( self.menu_bar, route )
if menu and pos>=0:
menu.items.insert( pos, item )
else:
print( "ERROR : insertMenu : not found : ", route )
## メニュー項目の追加
def appendMenu( self, route, item ):
menu, pos = self.findMenu( self.menu_bar, route )
if menu and pos>=0:
menu.items[pos].items.append(item)
else:
print( "ERROR : appendMenu : not found : ", route )
def statusBar(self):
return self.status_bar
def _onStatusMessageTimedout(self):
self.clearStatusMessage()
## ステータスバーにメッセージを表示する
#
# @param self -
# @param message 表示するメッセージ文字列
# @param timeout 表示時間 (ミリ秒単位)
# @param error エラー形式(赤文字)で表示するか
# @param log メッセージを標準出力にも出力するか
#
# LREdit のメインウインドウの下端にあるステータスバーに、任意の文字列を表示するための関数です。\n\n
#
# 引数 timeout に整数を指定すると、時間制限付の表示となり、自動的に消えます。\n
# 引数 timeout に None を渡すと、時間制限のない表示となり、clearStatusMessage() が呼ばれるまで表示されたままになります。
#
# @sa clearStatusMessage
#
def setStatusMessage( self, message, timeout=None, error=False, log=False ):
if log:
if error:
print( ckit.strings["error_prefix"] + message )
else:
print( message )
self.status_bar_layer.setMessage(message,error)
if not self.status_bar_resistered:
self.status_bar.registerLayer(self.status_bar_layer)
self.status_bar_resistered = True
if timeout!=None:
self.killTimer( self._onStatusMessageTimedout )
self.setTimer( self._onStatusMessageTimedout, timeout )
self.paint( REGION_STATUS_BAR )
if error:
ckit.messageBeep()
## ステータスバーのメッセージを消す
#
# LREdit のステータスバーに表示されたメッセージを消します。
#
# @sa setStatusMessage
#
def clearStatusMessage( self ):
self.status_bar_layer.setMessage("")
if self.status_bar_resistered:
self.status_bar.unregisterLayer(self.status_bar_layer)
self.status_bar_resistered = False
self.killTimer(self._onStatusMessageTimedout)
self.paint( REGION_STATUS_BAR )
def _onProgressTimedout(self):
self.clearProgress()
## プログレスバーを表示する
#
# @param self -
# @param value プログレス値 ( 0.0 ~ 1.0、または、[ 0.0 ~ 1.0, ... ] )
# @param timeout 表示時間 (ミリ秒単位)
#
# LREdit のメインウインドウの右下の端にある領域に、プログレスバーを表示するか、すでに表示されているプログレスバーの進捗度を変更するための関数です。\n\n
#
# 引数 value には、進捗度合いを 0 から 1 までの浮動少数で渡します。\n
# 通常は、引数 value には単一の浮動少数を渡しますが、二つ以上の進捗度を格納した配列を渡すことも可能で、その場合は複数のプログレスバーが縦に並んで表示されます。\n
# 引数 value に None を渡したときは、[ビジーインジケータ] としての動作となり、プログレスバーが左右にアニメーションします。\n
#
# 引数 timeout に整数を指定すると、時間制限付の表示となり、自動的に消えます。\n
# 引数 timeout に None を渡すと、時間制限のない表示となり、clearProgress() が呼ばれるまで表示されたままになります。
#
# @sa clearProgress
#
def setProgressValue( self, value, timeout=None ):
if self.progress_bar==None:
self.progress_bar = ckit.ProgressBarWidget( self, self.width(), self.height()-1, 0, 0 )
self.progress_bar.setValue(value)
if timeout!=None:
self.killTimer( self._onProgressTimedout )
self.setTimer( self._onProgressTimedout, timeout )
self.paint( REGION_STATUS_BAR )
## プログレスバーを消す
#
# LREdit のプログレスバーを消します。
#
# @sa setProgressValue
#
def clearProgress( self ):
if self.progress_bar:
self.progress_bar.destroy()
self.progress_bar = None
self.paint( REGION_STATUS_BAR )
self.killTimer( self._onProgressTimedout )
#--------------------------------------------------------------------------
## タイトルバーの表示を更新する
def updateTitleBar(self):
title = "%s" % lredit_resource.lredit_appname
if self.project:
title += " - %s" % self.project.name
edit = self.activeEditPane().edit
if edit:
filename = edit.doc.getFullpath()
if not filename:
filename = edit.doc.getName()
title += " - [%s]" % filename
if edit.doc.isModified():
title += " *"
if self.title != title:
self.setTitle(title)
self.title = title
def _onTabSelectionChanged( self, selection, item ):
edit = item[1]
self.activeOpen( edit=edit )
## タブバーの状態を更新する
def updateTabBar(self):
for pane in ( self.left_edit_pane, self.right_edit_pane ):
tab_items = []
selection = None
for i, edit in enumerate( pane.edit_list ):
tab_items.append( ( edit.doc.getName(), edit ) )
if edit==pane.edit:
selection = i
pane.tab.setItems( tab_items )
pane.tab.setSelection(selection)
## フッタとタイトルバー表示を更新する
def updateInformation( self, doc=None, edit=None ):
if ((edit and edit==self.left_edit_pane.edit) or
(doc and self.left_edit_pane.edit and doc==self.left_edit_pane.edit.doc)):
self.paint( REGION_LEFT_FOOTER )
if ((edit and edit==self.right_edit_pane.edit) or
(doc and self.right_edit_pane.edit and doc==self.right_edit_pane.edit.doc)):
self.paint( REGION_RIGHT_FOOTER )
self.updateTitleBar()
def onDocumentTextModified( self, edit, left, old_right, new_right ):
# 編集内容にジャンプリストの行番号を追従させる
if old_right.line != new_right.line:
for jump in self.jump_list:
jump.shiftLineNo( edit.doc.getFullpath(), left.line, old_right.line, new_right.line )
self.updateInformation(doc=edit.doc)
def _onEditSelectionChanged( self, edit, anchor, cursor ):
self.updateInformation(edit=edit)
# BookmarkTable から TextEditWidget にブックマーク情報を反映させる
def loadBookmarkList( self, edit ):
fullpath = edit.doc.getFullpath()
if fullpath:
bookmark_list = self.bookmarks.getBookmarkList(fullpath)
edit.setBookmarkList(bookmark_list)
# TextEditWidget から BookmarkTable にブックマーク情報を反映させる
def storeBookmarkList( self, edit ):
fullpath = edit.doc.getFullpath()
if fullpath:
bookmark_list = edit.getBookmarkList()
self.bookmarks.setBookmarkList( fullpath, bookmark_list )
def _createEditWidget( self, doc ):
edit = ckit.TextWidget( self, 0, 0, 0, 0, message_handler=self.setStatusMessage )
edit.setDocument(doc)
self.loadBookmarkList(edit)
edit.selection_changed_handler_list.append( self._onEditSelectionChanged )
edit.configure()
return edit
def _findEditFromDocument( self, doc ):
for edit in self.edit_list:
if edit.doc==doc:
return edit
return None
def _findEditFromFilename( self, filename ):
filename = os.path.abspath(filename)
filename = os.path.normpath(filename)
filename = filename.lower()
for edit in self.edit_list:
doc_filename = edit.doc.getFullpath()
if doc_filename:
doc_filename = os.path.normpath(doc_filename)
doc_filename = doc_filename.lower()
if doc_filename==filename:
return edit
return None
def _open( self, pane, another_pane, doc=None, edit=None, filename=None, lineno=None, sort=True, duplicate=False, focus=False, pane_stable=False ):
layout2_old = (self.inactiveEditPane().edit!=None)
# ファイル名を履歴に残す
if filename!=None:
filename = os.path.abspath(filename)
filename = os.path.normpath(filename)
if not filename.lower().startswith(ckit.getTempPath().lower()):
self.filename_history.append( ckit.normPath(filename) )
# Documentかファイル名からEditを導く
if not edit:
if doc:
edit = self._findEditFromDocument(doc)
else:
edit = self._findEditFromFilename(filename)
if edit:
# 既存のEditである場合、すでに所属しているPaneで開く
if pane_stable:
if edit in another_pane.edit_list:
tmp = another_pane
another_pane = pane
pane = tmp
# Editを複製する
if duplicate:
edit = self._createEditWidget(edit.doc)
pane.edit_list.append(edit)
# 反対側のPaneのリストから削除する
if edit in another_pane.edit_list:
another_pane.edit_list.remove(edit)
if another_pane.edit == edit:
for edit2 in self.edit_list:
if edit2 in another_pane.edit_list:
another_pane.edit = edit2
another_pane.edit.show(True)
break
else:
another_pane.edit = None
if pane==self.left_edit_pane:
self.focus_edit = MainWindow.FOCUS_EDIT_LEFT
else:
self.focus_edit = MainWindow.FOCUS_EDIT_RIGHT
pane.edit_list.append(edit)
# Editのリストの先頭にする
if sort:
try:
self.edit_list.remove(edit)
except ValueError:
pass
self.edit_list.insert(0,edit)
else:
# ファイル名からDocumentを作る
try:
if doc==None:
# FIXME : 大きなファイルのために subThreadCall を使うべき
doc = ckit.Document( filename=filename, mode=self.createModeFromFilename(filename) )
except IOError as e:
print( ckit.strings["error_open_failed"] % filename )
self.setStatusMessage( ckit.strings["statusbar_open_failed"] % filename, 3000, error=True )
self.filename_history.remove( ckit.normPath(filename) )
return
except UnicodeError as e:
print( ckit.strings["error_load_failed"] % filename )
print( " : " + ckit.strings["not_textfile"] )
self.setStatusMessage( ckit.strings["statusbar_open_failed"] % filename, 3000, error=True )
self.filename_history.remove( ckit.normPath(filename) )
return
# DocumentからEditを作る
edit = self._createEditWidget(doc)
pane.edit_list.append(edit)
self.edit_list.insert(0,edit)
if pane.edit:
pane.edit.show(False)
pane.edit = edit
pane.edit.show(True)
if focus:
if pane==self.left_edit_pane:
self.focus_edit = MainWindow.FOCUS_EDIT_LEFT
else:
self.focus_edit = MainWindow.FOCUS_EDIT_RIGHT
self.updateCursor()
# 1画面/2画面を切り替える
layout2_new = (self.inactiveEditPane().edit!=None)
if layout2_new and not layout2_old:
self.command.MoveSeparatorCenter()
elif not layout2_new and layout2_old:
self.command.MoveSeparatorMaximizeH()
self.updatePaneRect()
if lineno!=None:
pane.edit.jumpLineNo(lineno)
## 左の編集ペインでオープンする
def leftOpen( self, doc=None, edit=None, filename=None, lineno=None, sort=True, duplicate=False ):
self._open( self.left_edit_pane, self.right_edit_pane, doc, edit, filename, lineno, sort, duplicate, focus=False, pane_stable=False )
self.paint( REGION_EDIT )
## 右の編集ペインでオープンする
def rightOpen( self, doc=None, edit=None, filename=None, lineno=None, sort=True, duplicate=False ):
self._open( self.right_edit_pane, self.left_edit_pane, doc, edit, filename, lineno, sort, duplicate, focus=False, pane_stable=False )
self.paint( REGION_EDIT )
## アクティブな編集ペインでオープンする
def activeOpen( self, doc=None, edit=None, filename=None, lineno=None, sort=True, duplicate=False ):
self._open( self.activeEditPane(), self.inactiveEditPane(), doc, edit, filename, lineno, sort, duplicate, focus=True, pane_stable=True )
self.paint( REGION_EDIT )
## 非アクティブな編集ペインでオープンする
def inactiveOpen( self, doc=None, edit=None, filename=None, lineno=None, sort=True, duplicate=False ):
self._open( self.inactiveEditPane(), self.activeEditPane(), doc, edit, filename, lineno, sort, duplicate, focus=False, pane_stable=False )
self.paint( REGION_EDIT )
def _close( self, edit ):
if edit in self.left_edit_pane.edit_list:
pane = self.left_edit_pane
another_pane = self.right_edit_pane
else:
pane = self.right_edit_pane
another_pane = self.left_edit_pane
# Compare の片方を閉じるときは両方の色をクリアする
if edit.isDiffColorMode():
for edit2 in self.edit_list:
edit2.clearDiffColor()
self.jump_list = []
# 変更済みのEditに関しては保存確認する
if edit.doc.isModified():
self.activeOpen( edit=edit )
result = self.saveDocument( edit.doc, confirm=True )
if result==None:
return None
self.storeBookmarkList(edit)
pane.edit.show(False)
pane.edit = None
self.edit_list.remove(edit)
pane.edit_list.remove(edit)
edit.destroy()
# Closeしたあと、次のEditを表示する
for edit in self.edit_list:
if edit in pane.edit_list:
self.activeOpen( edit=edit )
break
else:
# Paneに所属しているEditがなくなったら、2 Paneモードを解除
if another_pane.edit!=None:
if another_pane==self.left_edit_pane:
self.focus_edit = MainWindow.FOCUS_EDIT_LEFT
else:
self.focus_edit = MainWindow.FOCUS_EDIT_RIGHT
self.updateCursor()
self.command.MoveSeparatorMaximizeH()
else:
# 1つもEditがなくなったら、[undefined] を新規作成
self.command.New()
return True
## ドキュメントを保存する
#
def saveDocument( self, doc, filename=None, confirm=False, input_name=False ):
if confirm:
result = lredit_msgbox.popMessageBox(
self,
lredit_msgbox.MSGBOX_TYPE_YESNO,
ckit.strings["msgbox_title_save"],
ckit.strings["msgbox_ask_save_document"] % doc.getName() )
if result==lredit_msgbox.MSGBOX_RESULT_YES:
pass
elif result==lredit_msgbox.MSGBOX_RESULT_NO:
return False
else:
return None
if not filename:
filename = doc.filename
if not filename or input_name:
filename = self.inputFilename( "Save", filename )
if not filename : return None
filename = os.path.abspath(filename)
filename = ckit.normPath(filename)
with open( filename, "wb" ) as fd:
doc.writeFile(fd)
doc.filename = filename
doc.clearModified()
doc.clearFileModified()
self.checkProjectFileModified()
self.paint( REGION_EDIT )
self.updateTitleBar()
self.updateTabBar()
return True
## ドキュメントを全て保存する
def saveDocumentAll( self, confirm=False, untitled=False ):
result = True
for edit in self.edit_list:
if untitled or edit.doc.getFullpath():
if edit.doc.isModified():
result = self.saveDocument( edit.doc, confirm=confirm )
if result==None:
break
self.storeBookmarkList(edit)
return result
def checkFileModifiedAll(self):
cancel = False
for edit in self.edit_list:
doc = edit.doc
if doc.getFullpath():
if doc.isFileModified():
if not cancel:
result = lredit_msgbox.popMessageBox(
self,
lredit_msgbox.MSGBOX_TYPE_YESNO,
ckit.strings["msgbox_title_modified_reload"],
ckit.strings["msgbox_ask_modified_reload"] % doc.getName() )
else:
result = lredit_msgbox.MSGBOX_RESULT_NO
if result==None:
cancel = True
if result==lredit_msgbox.MSGBOX_RESULT_YES:
filename = doc.getFullpath()
# FIXME : 大きなファイルのために subThreadCall を使うべき
doc = ckit.Document( filename=filename, mode=self.createModeFromFilename(filename) )
edit.setDocument(doc)
else:
doc.clearFileModified()
def checkProjectFileModified(self):
if self.project and self.project.isFileModified():
self.project = lredit_project.Project(self.project.filename)
print( ckit.strings["project_reloaded"] + "\n" )
#--------------------------------------------------------------------------
def createModeFromName( self, name ):
for mode in self.mode_list:
if mode.name==name:
return mode()
return ckit.TextMode()
def createModeFromFilename( self, filename ):
filename = os.path.basename(filename)
for item in self.fileext_list:
for pattern in item[0].split():
if fnmatch.fnmatch( filename, pattern ):
return self.createModeFromName(item[1])
return ckit.TextMode()
## 左の編集ペインのモードを取得する
def leftPaneMode(self):
try:
return self.left_edit_pane.edit.doc.mode
except AttributeError:
return None
## 右の編集ペインのモードを取得する
def rightPaneMode(self):
try:
return self.right_edit_pane.edit.doc.mode
except AttributeError:
return None
## アクティブな編集ペインのモードを取得する
def activeEditPaneMode(self):
if self.focus_edit==MainWindow.FOCUS_EDIT_LEFT:
return self.leftPaneMode()
elif self.focus_edit==MainWindow.FOCUS_EDIT_RIGHT:
return self.rightPaneMode()
else:
assert(False)
## 非アクティブな編集ペインのモードを取得する
def inactiveEditPaneMode(self):
if self.focus_edit==MainWindow.FOCUS_EDIT_LEFT:
return self.rightPaneMode()
elif self.focus_edit==MainWindow.FOCUS_EDIT_RIGHT:
return self.rightPaneMode()
else:
assert(False)
#--------------------------------------------------------------------------
def loadTheme(self):
name = self.ini.get( "THEME", "name" )
default_color = {
"line_cursor" : (255,128,128),
"diff_bg1" : (100,50,50),
"diff_bg2" : (50,100,50),
"diff_bg3" : (50,50,100),
}
ckit.setTheme( name, default_color )
self.theme_enabled = False
def reloadTheme(self):
self.loadTheme()
self.destroyThemePlane()
self.createThemePlane()
self.updateColor()
self.updateWallpaper()
def createThemePlane(self):
self.plane_edit_separator = ckit.ThemePlane3x3( self, 'vseparator.png' )
self.plane_footer = ckit.ThemePlane3x3( self, 'footer.png' )
self.plane_isearch = ckit.ThemePlane3x3( self, 'isearch.png', 1 )
self.plane_statusbar = ckit.ThemePlane3x3( self, 'statusbar.png', 1.5 )
self.plane_commandline = ckit.ThemePlane3x3( self, 'commandline.png', 1 )
self.plane_isearch.show(False)
self.plane_commandline.show(False)
self.left_edit_pane.tab.createThemePlane()
self.right_edit_pane.tab.createThemePlane()
for edit in self.edit_list:
edit.createThemePlane()
self.log_pane.edit.createThemePlane()
self.theme_enabled = True
self.updatePaneRect()
def destroyThemePlane(self):
self.plane_edit_separator.destroy()
self.plane_footer.destroy()
self.plane_isearch.destroy()
self.plane_statusbar.destroy()
self.plane_commandline.destroy()
self.left_edit_pane.tab.destroyThemePlane()
self.right_edit_pane.tab.destroyThemePlane()
for edit in self.edit_list:
edit.destroyThemePlane()
self.log_pane.edit.destroyThemePlane()
self.theme_enabled = False
def updatePaneRect(self):
if self.left_edit_pane.edit==None and self.right_edit_pane.edit:
self.left_edit_pane_width = 0
elif self.left_edit_pane.edit and self.right_edit_pane.edit==None:
self.left_edit_pane_width = self.editPaneWidth()
if self.left_edit_pane.tab:
rect = self.leftTabBarRect()
x = rect[0]
y = rect[1]
width = rect[2]-rect[0]
height = rect[3]-rect[1]
self.left_edit_pane.tab.setPosSize( x, y, width, height )
if self.right_edit_pane.tab:
rect = self.rightTabBarRect()
x = rect[0]
y = rect[1]
width = rect[2]-rect[0]
height = rect[3]-rect[1]
self.right_edit_pane.tab.setPosSize( x, y, width, height )
if self.left_edit_pane.edit:
rect = self.leftEditPaneRect()
x = rect[0]
y = rect[1]
width = rect[2]-rect[0]
height = rect[3]-rect[1]
if self.left_edit_pane.edit : self.left_edit_pane.edit.setPosSize( x, y+1, width, height-2 )
if self.right_edit_pane.edit:
rect = self.rightEditPaneRect()
x = rect[0]
y = rect[1]
width = rect[2]-rect[0]
height = rect[3]-rect[1]
if self.right_edit_pane.edit : self.right_edit_pane.edit.setPosSize( x, y+1, width, height-2 )
if self.log_pane.edit:
rect = self.logPaneRect()
x = rect[0]
y = rect[1]
width = rect[2]-rect[0]
height = rect[3]-rect[1]
self.log_pane.edit.setPosSize( x, y, width, height )
if self.theme_enabled:
client_rect = self.getClientRect()
offset_x, offset_y = self.charToClient( 0, 0 )
char_w, char_h = self.getCharSize()
if self.left_edit_pane.edit and self.right_edit_pane.edit:
rect = self.editSeparatorRect()
x = rect[0]
y = rect[1]
width = rect[2]-rect[0]
height = rect[3]-rect[1]
self.plane_edit_separator.setPosSizeByChar( self, x, y, 1, height )
self.plane_edit_separator.show(True)
else:
self.plane_edit_separator.show(False)
rect = self.editPaneRect()
x = rect[0]
y = rect[3]-1
width = rect[2]-rect[0]
height = 1
self.plane_footer.setPosSizeByChar( self, x, y, width, height )
self.plane_statusbar.setPosSize( 0, (self.height()-1)*char_h+offset_y, client_rect[2], client_rect[3]-((self.height()-1)*char_h+offset_y) )
#--------------------------------------------------------------------------
def updateColor(self):
ckit.TextWidget.updateColor()
self.setBGColor( ckit.getColor("bg") )
self.setCursorColor( ckit.getColor("cursor0"), ckit.getColor("cursor1") )
self.paint()
#--------------------------------------------------------------------------
def updateWallpaper(self):
visible = self.ini.getint( "WALLPAPER", "visible" )
strength = self.ini.getint( "WALLPAPER", "strength" )
filename = self.ini.get( "WALLPAPER", "filename" )
def destroyWallpaper():
if self.wallpaper:
self.wallpaper.destroy()
self.wallpaper = None
if visible:
if filename=="":
lredit_msgbox.popMessageBox(
self,
lredit_msgbox.MSGBOX_TYPE_OK,
ckit.strings["msgbox_title_wallpaper_error"],
ckit.strings["msgbox_wallpaper_filename_empty"] )
destroyWallpaper()
self.ini.set( "WALLPAPER", "visible", "0" )
return
destroyWallpaper()
self.wallpaper = ckit.Wallpaper(self)
try:
self.wallpaper.load(filename,strength)
except:
print( ckit.strings["error_invalid_wallpaper"] % filename )
destroyWallpaper()
self.ini.set( "WALLPAPER", "visible", "0" )
self.ini.set( "WALLPAPER", "filename", "" )
return
self.wallpaper.adjust()
else:
destroyWallpaper()
#--------------------------------------------------------------------------
## ウインドウの内容を描画する
def paint( self, option=REGION_ALL ):
if not self.initialized : return
"""
if option & REGION_FOCUSED:
if option & REGION_FOCUSED_EDIT:
option |= [ REGION_LEFT_EDIT, REGION_RIGHT_EDIT ][self.focus_edit]
if option & REGION_FOCUSED_FOOTER:
option |= [ REGION_LEFT_FOOTER, REGION_RIGHT_FOOTER ][self.focus_edit]
"""
if option & (REGION_LEFT_TAB|REGION_LEFT_EDIT|REGION_LEFT_FOOTER) and self.left_edit_pane.widget():
rect = self.leftEditPaneRect()
x = rect[0]
y = rect[1]
width = rect[2]-rect[0]
height = rect[3]-rect[1]
if option & REGION_LEFT_TAB and height>=1 :
self.left_edit_pane.tab.paint()
if option & REGION_LEFT_EDIT and height>=1 :
self.left_edit_pane.widget().paint()
if option & REGION_LEFT_FOOTER and height>=1 :
if self.left_edit_pane.footer_paint_hook:
self.left_edit_pane.footer_paint_hook( x, y+height-1, width, 1, self.left_edit_pane )
else:
self._paintFooterInfo( x, y+height-1, width, 1, self.left_edit_pane )
if option & (REGION_RIGHT_TAB|REGION_RIGHT_EDIT|REGION_RIGHT_FOOTER) and self.right_edit_pane.widget():
rect = self.rightEditPaneRect()
x = rect[0]
y = rect[1]
width = rect[2]-rect[0]
height = rect[3]-rect[1]
if option & REGION_RIGHT_TAB and height>=1 :
self.right_edit_pane.tab.paint()
if option & REGION_RIGHT_EDIT and height>=1 :
self.right_edit_pane.widget().paint()
if option & REGION_RIGHT_FOOTER and height>=1 :
if self.right_edit_pane.footer_paint_hook:
self.right_edit_pane.footer_paint_hook( x, y+height-1, width, 1, self.right_edit_pane )
else:
self._paintFooterInfo( x, y+height-1, width, 1, self.right_edit_pane )
if option & REGION_EDIT_SEPARATOR:
rect = self.editSeparatorRect()
x = rect[0]
y = rect[1]
width = rect[2]-rect[0]
height = rect[3]-rect[1]
attr = ckit.Attribute( fg=ckit.getColor("bar_fg") )
str_whitespace = " " * width
for i in range( y, y+height ):
self.putString( x, i, width, 1, attr, str_whitespace )
if option & REGION_LOG:
self.log_pane.widget().paint()
if option & REGION_STATUS_BAR:
if self.status_bar_paint_hook:
if self.progress_bar:
self.progress_bar.show(False)
self.status_bar_paint_hook( 0, self.height()-1, self.width(), 1 )
else:
if self.progress_bar:
progress_width = min( self.width() // 2, 20 )
self.progress_bar.setPosSize( self.width()-progress_width, self.height()-1, progress_width, 1 )
self.progress_bar.show(True)
self.progress_bar.paint()
else:
progress_width = 0
self.status_bar.paint( self, 0, self.height()-1, self.width()-progress_width, 1 )
if not self.isActive():
self.setCursorPos( -1, -1 )
def _paintFooterInfo( self, x, y, width, height, pane ):
attr = ckit.Attribute( fg=ckit.getColor("bar_fg") )
self.putString( x, y, width, height, attr, " " * width )
str_info = pane.edit.doc.getName()
if pane.edit.doc.isModified() : str_info += " *"
if pane.edit.doc.minor_mode_list:
str_info += " (%s:%s)" % ( pane.edit.doc.mode.name, " ".join( map(lambda mode:mode.name, pane.edit.doc.minor_mode_list) ) )
else:
str_info += " (%s)" % pane.edit.doc.mode.name
def lineendName(lineend):
if lineend=="\r\n":
return "crlf"
elif lineend=="\r":
return "cr"
elif lineend=="\n":
return "lf"
else:
return ""
str_info += " [%s:%s]" % ( pane.edit.doc.encoding, lineendName(pane.edit.doc.lineend) )
cursor = pane.edit.selection.cursor()
column = pane.edit.getColumnFromIndex( cursor.line, cursor.index )
str_cursor = "%d:%d" % (cursor.line+1, column+1)
str_info += " %8s" % str_cursor
margin = max((width-len(str_info))//2,0)
self.putString( x+margin, y, width-margin, height, attr, str_info )
# チラつきの原因になるのでコメントアウト
#self.flushPaint()
#--------------------------------------------------------------------------
def registerStdio( self ):
class Stdout:
def write( writer_self, s ):
edit = self.log_pane.edit
end = edit.pointDocumentEnd()
edit.modifyText( end, end, s, append_undo=False, ignore_readonly=True )
class Stderr:
def write( writer_self, s ):
edit = self.log_pane.edit
end = edit.pointDocumentEnd()
edit.modifyText( end, end, s, append_undo=False, ignore_readonly=True )
if not self.debug:
sys.stdout = Stdout()
sys.stderr = Stderr()
def unregisterStdio( self ):
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
## 設定を読み込む
#
# キーマップや モードリスト などをリセットした上で、config,py を再読み込みします。
#
def configure( self ):
# キーボードの種別によってキー定義文字列を割り当てる
ckit.Keymap.init()
## メインウインドウのキー割り当て
self.keymap = ckit.Keymap()
self.keymap[ "C-A-Left" ] = self.command.MoveSeparatorLeft
self.keymap[ "C-A-Right" ] = self.command.MoveSeparatorRight
self.keymap[ "C-A-Up" ] = self.command.MoveSeparatorUp
self.keymap[ "C-A-Down" ] = self.command.MoveSeparatorDown
self.keymap[ "S-Escape" ] = self.command.CancelTask
self.keymap[ "C-Minus" ] = self.command.MoveSeparatorCenter
self.keymap[ "C-S-Minus" ] = self.command.MoveSeparatorMaximizeH
self.keymap[ "C-F4" ] = self.command.Close
self.keymap[ "C-Q" ] = self.command.Close
self.keymap[ "A-F4" ] = self.command.Quit
self.keymap[ "C-Tab" ] = self.command.DocumentNext
self.keymap[ "A-X" ] = self.command.CommandLine
self.keymap[ "C-O" ] = self.command.Open
self.keymap[ "C-S" ] = self.command.Save
self.keymap[ "C-J" ] = self.command.Jump
self.keymap[ "C-F" ] = self.command.Search
self.keymap[ "C-R" ] = self.command.Replace
self.keymap[ "C-S-F" ] = self.command.Grep
self.keymap[ "F11" ] = self.command.SearchResultNext
self.keymap[ "S-F11" ] = self.command.SearchResultPrev
self.keymap[ "F4" ] = self.command.TagsJump
self.keymap[ "S-F4" ] = self.command.TagsBack
self.keymap[ "Left" ] = self.command.CursorLeft
self.keymap[ "Right" ] = self.command.CursorRight
self.keymap[ "C-Left" ] = self.command.CursorWordLeft
self.keymap[ "C-Right" ] = self.command.CursorWordRight
self.keymap[ "Home" ] = CommandSequence( self.command.CursorLineFirstGraph, self.command.CursorLineBegin )
self.keymap[ "End" ] = self.command.CursorLineEnd
self.keymap[ "Up" ] = self.command.CursorUp
self.keymap[ "Down" ] = self.command.CursorDown
self.keymap[ "PageUp" ] = self.command.CursorPageUp
self.keymap[ "PageDown" ] = self.command.CursorPageDown
self.keymap[ "A-Up" ] = self.command.CursorModifiedOrBookmarkPrev
self.keymap[ "A-Down" ] = self.command.CursorModifiedOrBookmarkNext
self.keymap[ "A-S-Up" ] = self.command.SelectModifiedOrBookmarkPrev
self.keymap[ "A-S-Down" ] = self.command.SelectModifiedOrBookmarkNext
self.keymap[ "C-Home" ] = self.command.CursorDocumentBegin
self.keymap[ "C-End" ] = self.command.CursorDocumentEnd
self.keymap[ "C-B" ] = self.command.CursorCorrespondingBracket
self.keymap[ "C-Up" ] = self.command.ScrollUp
self.keymap[ "C-Down" ] = self.command.ScrollDown
self.keymap[ "C-L" ] = self.command.ScrollCursorCenter
self.keymap[ "S-Left" ] = self.command.SelectLeft
self.keymap[ "S-Right" ] = self.command.SelectRight
self.keymap[ "C-S-Left" ] = self.command.SelectWordLeft
self.keymap[ "C-S-Right" ] = self.command.SelectWordRight
self.keymap[ "S-Home" ] = self.command.SelectLineBegin
self.keymap[ "S-End" ] = self.command.SelectLineEnd
self.keymap[ "S-Up" ] = self.command.SelectUp
self.keymap[ "S-Down" ] = self.command.SelectDown
self.keymap[ "S-PageUp" ] = self.command.SelectPageUp
self.keymap[ "S-PageDown" ] = self.command.SelectPageDown
self.keymap[ "C-S-B" ] = self.command.SelectCorrespondingBracket
self.keymap[ "C-S-Home" ] = self.command.SelectDocumentBegin
self.keymap[ "C-S-End" ] = self.command.SelectDocumentEnd
self.keymap[ "C-A" ] = self.command.SelectDocument
self.keymap[ "C-S-Up" ] = self.command.SelectScrollUp
self.keymap[ "C-S-Down" ] = self.command.SelectScrollDown
self.keymap[ "Return" ] = CommandSequence( self.command.Enter, self.command.InsertReturnAutoIndent )
self.keymap[ "Tab" ] = CommandSequence( self.command.IndentSelection, self.command.InsertTab )
self.keymap[ "S-Tab" ] = CommandSequence( self.command.UnindentSelection, self.command.CursorTabLeft )
self.keymap[ "Delete" ] = self.command.Delete
self.keymap[ "Back" ] = self.command.DeleteCharLeft
self.keymap[ "C-Delete" ] = self.command.DeleteWordRight
self.keymap[ "C-Back" ] = self.command.DeleteWordLeft
self.keymap[ "C-D" ] = self.command.DeleteCharRight
self.keymap[ "C-H" ] = self.command.DeleteCharLeft
self.keymap[ "C-K" ] = self.command.DeleteLineRight
self.keymap[ "C-C" ] = self.command.Copy
self.keymap[ "C-X" ] = self.command.Cut
self.keymap[ "C-V" ] = self.command.Paste
self.keymap[ "C-Z" ] = self.command.Undo
self.keymap[ "C-Y" ] = self.command.Redo
self.keymap[ "C-N" ] = self.command.SearchNext
self.keymap[ "C-S-N" ] = self.command.SearchPrev
self.keymap[ "C-Space" ] = self.command.CompleteAbbrev
self.keymap[ "C-E" ] = self.command.ExtensionMenu
self.keymap[ "C-M" ] = self.command.Bookmark1
self.keymap[ "Escape" ] = CommandSequence( self.command.CloseList, self.command.FocusEdit, self.command.SelectCancel )
## エディットメニューの項目
self.ext_menu_items = [
( "Another Pane", "C-W", self.command.AnotherPane ),
( "Project Files", "C-P", self.command.ProjectFileList ),
( "Recent Files", "C-H", self.command.RecentFileList ),
( "Bookmark List", "C-M", self.command.BookmarkList ),
( "Document List", "C-D", self.command.DocumentList ),
( "Outline Analysis", "C-O", self.command.Outline ),
( "Search Result", "C-S", self.command.SearchResultList ),
]
## モードのリスト
self.mode_list = [
lredit_mode.PythonMode,
lredit_mode.PerlMode,
lredit_mode.JavaScriptMode,
lredit_mode.CMode,
lredit_mode.CppMode,
lredit_mode.ObjectiveCMode,
lredit_mode.ObjectiveCppMode,
lredit_mode.CsharpMode,
lredit_mode.JavaMode,
lredit_mode.GlslMode,
lredit_mode.XmlMode,
lredit_mode.HtmlMode,
lredit_mode.MakefileMode,
lredit_mode.BatchMode,
lredit_mode.SqlMode,
ckit.TextMode,
]
## マイナーモードのリスト
self.minor_mode_list = [
lredit_minormode.TestMode,
]
## ファイル名とモードの関連付け
self.fileext_list = [
( "*.py *.pyw *.pys", "python" ),
( "*.pl", "perl" ),
( "*.js", "javascript" ),
( "*.cpp *.cc *.cxx *.hpp *.hh *.hxx *.h", "c++" ),
( "*.c *.h", "c" ),
( "*.mm *.h", "objective-c++" ),
( "*.m *.h", "objective-c" ),
( "*.cs", "c#" ),
( "*.java", "java" ),
( "*.vert *.frag *.geo", "glsl" ),
( "*.xml", "xml" ),
( "*.html *.htm", "html" ),
( "makefile *.mk", "makefile" ),
( "*.bat", "batch" ),
( "*.sql", "sql" ),
( "*", "text" ),
]
## コマンドラインの機能リスト
self.commandline_list = [
self.launcher,
lredit_commandline.commandline_Open(self),
lredit_commandline.commandline_Document(self),
lredit_commandline.commandline_Mode(self),
lredit_commandline.commandline_MinorMode(self),
lredit_commandline.commandline_Int32Hex(),
lredit_commandline.commandline_Calculator(),
]
## コマンドラインから実行可能な追加のコマンド
self.launcher.command_list = [
]
self.prepareMenuBar()
ckit.reloadConfigScript( self.config_filename )
ckit.callConfigFunc("configure",self)
for mode in self.mode_list:
try:
mode.staticconfigure(self)
except:
traceback.print_exc()
for mode in self.minor_mode_list:
try:
mode.staticconfigure(self)
except:
traceback.print_exc()
for edit in self.edit_list:
try:
edit.configure()
except:
traceback.print_exc()
try:
self.log_pane.edit.configure()
except:
traceback.print_exc()
self.applyMenuBar()
def loadState(self):
if os.path.exists(self.ini_filename):
try:
fd = open( self.ini_filename, "r", encoding="utf-8" )
msvcrt.locking( fd.fileno(), msvcrt.LK_LOCK, 1 )
self.ini.readfp(fd)
fd.close()
except Exception as e:
MB_OK = 0
ctypes.windll.user32.MessageBoxW(
0,
ckit.strings["error_ini_file_load_failed"] + "\n\n" + traceback.format_exc(),
ckit.strings["msgbox_title_generic_error"],
MB_OK )
# ini ファイルの読み込みに失敗したので保存しない
self.ini_filename = None
ini_version = "0.00"
try:
ini_version = self.ini.get("GLOBAL","version")
except:
pass
try:
self.ini.add_section("GLOBAL")
except configparser.DuplicateSectionError:
pass
try:
self.ini.add_section("GEOMETRY")
except configparser.DuplicateSectionError:
pass
try:
self.ini.add_section("FONT")
except configparser.DuplicateSectionError:
pass
try:
self.ini.add_section("THEME")
except configparser.DuplicateSectionError:
pass
try:
self.ini.add_section("MENUBAR")
except configparser.DuplicateSectionError:
pass
try:
self.ini.add_section("WALLPAPER")
except configparser.DuplicateSectionError:
pass
try:
self.ini.add_section("FILENAME")
except configparser.DuplicateSectionError:
pass
try:
self.ini.add_section("SEARCH")
except configparser.DuplicateSectionError:
pass
try:
self.ini.add_section("REPLACE")
except configparser.DuplicateSectionError:
pass
try:
self.ini.add_section("GREP")
except configparser.DuplicateSectionError:
pass
try:
self.ini.add_section("COMPARE")
except configparser.DuplicateSectionError:
pass
try:
self.ini.add_section("ZENHAN")
except configparser.DuplicateSectionError:
pass
try:
self.ini.add_section("COMMANDLINE")
except configparser.DuplicateSectionError:
pass
try:
self.ini.add_section("BOOKMARK")
except configparser.DuplicateSectionError:
pass
try:
self.ini.add_section("MISC")
except configparser.DuplicateSectionError:
pass
try:
self.ini.add_section("DEBUG")
except configparser.DuplicateSectionError:
pass
self.ini.set( "GLOBAL", "version", lredit_resource.lredit_version )
if not self.ini.has_option( "GEOMETRY", "x" ):
self.ini.set( "GEOMETRY", "x", str(0) )
if not self.ini.has_option( "GEOMETRY", "y" ):
self.ini.set( "GEOMETRY", "y", str(0) )
if not self.ini.has_option( "GEOMETRY", "width" ):
self.ini.set( "GEOMETRY", "width", str(80) )
if not self.ini.has_option( "GEOMETRY", "height" ):
self.ini.set( "GEOMETRY", "height", str(32) )
if not self.ini.has_option( "GEOMETRY", "log_pane_height" ):
self.ini.set( "GEOMETRY", "log_pane_height", str(10) )
if not self.ini.has_option( "GEOMETRY", "left_edit_pane_width" ):
self.ini.set( "GEOMETRY", "left_edit_pane_width", str( (self.ini.getint( "GEOMETRY", "width" )-1)//2 ) )
if not self.ini.has_option( "FONT", "name" ):
self.ini.set( "FONT", "name", "" )
if not self.ini.has_option( "FONT", "size" ):
self.ini.set( "FONT", "size", "12" )
if not self.ini.has_option( "THEME", "name" ):
self.ini.set( "THEME", "name", "black" )
if not self.ini.has_option( "MENUBAR", "visible" ):
self.ini.set( "MENUBAR", "visible", "1" )
if not self.ini.has_option( "WALLPAPER", "visible" ):
self.ini.set( "WALLPAPER", "visible", "0" )
if not self.ini.has_option( "WALLPAPER", "strength" ):
self.ini.set( "WALLPAPER", "strength", "30" )
if not self.ini.has_option( "WALLPAPER", "filename" ):
self.ini.set( "WALLPAPER", "filename", "" )
if not self.ini.has_option( "SEARCH", "word" ):
self.ini.set( "SEARCH", "word", "0" )
if not self.ini.has_option( "SEARCH", "case" ):
self.ini.set( "SEARCH", "case", "0" )
if not self.ini.has_option( "SEARCH", "regex" ):
self.ini.set( "SEARCH", "regex", "0" )
if not self.ini.has_option( "GREP", "target" ):
self.ini.set( "GREP", "target", "" )
if not self.ini.has_option( "GREP", "recursive" ):
self.ini.set( "GREP", "recursive", str(1) )
if not self.ini.has_option( "COMPARE", "options" ):
self.ini.set( "COMPARE", "options", "Strict" )
if not self.ini.has_option( "ZENHAN", "options" ):
self.ini.set( "ZENHAN", "options", "Ascii,Digit" )
if not self.ini.has_option( "MISC", "locale" ):
self.ini.set( "MISC", "locale", locale.getdefaultlocale()[0] )
if not self.ini.has_option( "MISC", "isearch_type" ):
self.ini.set( "MISC", "isearch_type", "strict" )
if not self.ini.has_option( "MISC", "beep_type" ):
self.ini.set( "MISC", "beep_type", "enabled" )
if not self.ini.has_option( "MISC", "directory_separator" ):
self.ini.set( "MISC", "directory_separator", "backslash" )
if not self.ini.has_option( "MISC", "drive_case" ):
self.ini.set( "MISC", "drive_case", "nocare" )
if not self.ini.has_option( "MISC", "app_name" ):
self.ini.set( "MISC", "app_name", "LREdit" )
if not self.ini.has_option( "DEBUG", "detect_block" ):
self.ini.set( "DEBUG", "detect_block", "0" )
if not self.ini.has_option( "DEBUG", "print_errorinfo" ):
self.ini.set( "DEBUG", "print_errorinfo", "0" )
if self.ini.get( "MISC", "beep_type" )=="enabled":
ckit.enableBeep(True)
else:
ckit.enableBeep(False)
if self.ini.get( "MISC", "directory_separator" )=="slash":
ckit.setPathSlash(True)
else:
ckit.setPathSlash(False)
if self.ini.get( "MISC", "drive_case" )=="upper":
ckit.setPathDriveUpper(True)
elif self.ini.get( "MISC", "drive_case" )=="lower":
ckit.setPathDriveUpper(False)
else:
ckit.setPathDriveUpper(None)
lredit_resource.lredit_appname = self.ini.get( "MISC", "app_name" )
lredit_resource.setLocale( self.ini.get( "MISC", "locale" ) )
def saveState(self):
# 何らかの理由で ini ファイルを保存しない
if self.ini_filename==None:
return
print( ckit.strings["saving"] )
try:
normal_rect = self.getNormalWindowRect()
normal_size = self.getNormalSize()
self.ini.set( "GEOMETRY", "x", str(normal_rect[0]) )
self.ini.set( "GEOMETRY", "y", str(normal_rect[1]) )
self.ini.set( "GEOMETRY", "width", str(normal_size[0]) )
self.ini.set( "GEOMETRY", "height", str(normal_size[1]) )
self.ini.set( "GEOMETRY", "log_pane_height", str(self.log_pane_height) )
self.ini.set( "GEOMETRY", "left_edit_pane_width", str(self.left_edit_pane_width) )
self.filename_history.save( self.ini, "FILENAME" )
self.commandline_history.save( self.ini, "COMMANDLINE" )
self.search_history.save( self.ini, "SEARCH" )
self.replace_history.save( self.ini, "REPLACE" )
self.grep_location_history.save( self.ini, "GREP", "location" )
self.grep_filename_pattern_history.save( self.ini, "GREP", "filename_pattern" )
self.grep_dirname_exclude_pattern_history.save( self.ini, "GREP", "dirname_exclude_pattern" )
self.bookmarks.save( self.ini, "BOOKMARK" )
tmp_ini_filename = self.ini_filename + ".tmp"
fd = open( tmp_ini_filename, "w", encoding="utf-8" )
msvcrt.locking( fd.fileno(), msvcrt.LK_LOCK, 1 )
self.ini.write(fd)
fd.close()
try:
os.unlink( self.ini_filename )
except OSError:
pass
os.rename( tmp_ini_filename, self.ini_filename )
except Exception as e:
print( ckit.strings["common_failed"] )
print( " %s" % str(e) )
else:
print( ckit.strings["common_done"] )
#--------------------------------------------------------------------------
# コマンドラインの引数を処理する
def processArgument( self, args ):
arg_readonly = args["readonly"]
arg_text = args["text"]
arg_project = args["project"]
arg_compare = args["compare"]
arg_file = args["file"]
text_list = []
project = None
for filename in arg_file:
if fnmatch.fnmatch( filename, "*.lre" ):
project = filename
else:
text_list.append( ( filename, 1, 1 ) )
for item in arg_text:
text_list.append(item)
if arg_project:
project = arg_project[0]
# プロジェクトファイルを開く
if project:
info = ckit.CommandInfo()
info.args = [ project ]
self.command.OpenProject(info)
# テキストファイルを開く
for filename, line, index in text_list:
self.activeOpen(filename=filename)
edit = self.activeEditPane().edit
# オープンに失敗しているなら続きを処理しない
if edit==None or edit.doc.getFullpath() != ckit.normPath(filename):
continue
if arg_readonly:
edit.doc.setReadOnly(True)
point = edit.point(line-1,index-1)
point = max( point, edit.pointDocumentBegin() )
point = min( point, edit.pointDocumentEnd() )
point = max( point, point.lineBegin() )
point = min( point, point.lineEnd() )
edit.setCursor(point)
self.command.ScrollCursorCenter()
# ファイルが渡されなかったか、渡されたファイルが開けなかった場合は、untitled を開く
if not self.edit_list:
doc = ckit.Document( filename=None, mode=self.createModeFromName("text") )
self.activeOpen(doc=doc)
# 起動引数で比較が要求されたら Compare を呼び出す
if arg_compare:
self.leftOpen( filename=arg_compare[0] )
self.rightOpen( filename=arg_compare[1] )
self.command.Compare()
def startup( self, args ):
print( lredit_resource.startupString() )
if self.profile:
cProfile.runctx( "self.processArgument(args)", globals(), locals() )
else:
self.processArgument(args)
#--------------------------------------------------------
# ここから下のメソッドはキーに割り当てることができる
#--------------------------------------------------------
## 編集ペインにフォーカスする
def command_FocusEdit( self, info ):
if self.focus_top==MainWindow.FOCUS_EDIT:
info.result = False
return
self.focus_top = MainWindow.FOCUS_EDIT
self.updateCursor()
self.paint()
## ログペインにフォーカスする
def command_FocusLog( self, info ):
if self.focus_top==MainWindow.FOCUS_LOG:
info.result = False
return
self.focus_top = MainWindow.FOCUS_LOG
self.updateCursor()
self.paint()
## アクティブではないほうの編集ペインにフォーカスする
def command_FocusInactiveEdit( self, info ):
if self.focus_edit==MainWindow.FOCUS_EDIT_LEFT:
self.command_FocusRightEdit(info)
else:
self.command_FocusLeftEdit(info)
## 左編集ペインにフォーカスする
def command_FocusLeftEdit( self, info ):
if self.focus_top==MainWindow.FOCUS_EDIT and self.focus_edit==MainWindow.FOCUS_EDIT_LEFT:
info.result = False
return
self.focus_top = MainWindow.FOCUS_EDIT
if self.left_edit_pane.edit:
self.focus_edit = MainWindow.FOCUS_EDIT_LEFT
else:
info.result=False
self.updateCursor()
self.paint()
## 右編集ペインにフォーカスする
def command_FocusRightEdit( self, info ):
if self.focus_top==MainWindow.FOCUS_EDIT and self.focus_edit==MainWindow.FOCUS_EDIT_RIGHT:
info.result = False
return
self.focus_top = MainWindow.FOCUS_EDIT
if self.right_edit_pane.edit:
self.focus_edit = MainWindow.FOCUS_EDIT_RIGHT
else:
info.result=False
self.updateCursor()
self.paint()
## 左右のペインを分離するセパレータを左方向に動かす
def command_MoveSeparatorLeft( self, info ):
if self.focus_top==MainWindow.FOCUS_EDIT:
self.left_edit_pane_width = max( self.left_edit_pane_width-3, 0 )
self.updatePaneRect()
self.paint( REGION_EDIT )
else:
info.result = False
return
## 左右のペインを分離するセパレータを右方向に動かす
def command_MoveSeparatorRight( self, info ):
if self.focus_top==MainWindow.FOCUS_EDIT:
self.left_edit_pane_width = min( self.left_edit_pane_width+3, self.editPaneWidth()-self.editSeparatorWidth() )
self.updatePaneRect()
self.paint( REGION_EDIT )
else:
info.result = False
return
## 上下のペインを分離するセパレータを上方向に動かす
def command_MoveSeparatorUp( self, info ):
self.log_pane_height += 3
if self.log_pane_height>self.height()-2-self.tabBarHeight() : self.log_pane_height=self.height()-2-self.tabBarHeight()
self.updatePaneRect()
cursor = self.log_pane.edit.selection.cursor()
self.log_pane.edit.makeVisible(cursor)
self.paint()
## 上下のペインを分離するセパレータを下方向に動かす
def command_MoveSeparatorDown( self, info ):
self.log_pane_height -= 3
if self.log_pane_height<0 : self.log_pane_height=0
self.updatePaneRect()
cursor = self.log_pane.edit.selection.cursor()
self.log_pane.edit.makeVisible(cursor)
self.paint()
## 左右のペインを分離するセパレータを左方向に高速に動かす
#
# 中央か端に達するまで、セパレータを左方向に動かします。
#
def command_MoveSeparatorLeftQuick( self, info ):
center = (self.width()-self.editSeparatorWidth()) // 2
if self.left_edit_pane_width > center :
self.left_edit_pane_width = center
else:
self.left_edit_pane_width = 0
self.updatePaneRect()
self.paint( REGION_EDIT )
## 左右のペインを分離するセパレータを右方向に高速に動かす
#
# 中央か端に達するまで、セパレータを右方向に動かします。
#
def command_MoveSeparatorRightQuick( self, info ):
center = (self.width()-self.editSeparatorWidth()) // 2
if self.left_edit_pane_width < center :
self.left_edit_pane_width = center
else:
self.left_edit_pane_width = self.width()
self.updatePaneRect()
self.paint( REGION_EDIT )
## 上下のペインを分離するセパレータを上方向に高速に動かす
#
# 縦3分割した位置に達するまで、セパレータを上方向に動かします。
#
def command_MoveSeparatorUpQuick( self, info ):
pos_list = [
(self.height()-2) * 1 // 3,
(self.height()-2) * 2 // 3,
(self.height()-2) * 3 // 3,
]
for pos in pos_list:
if pos > self.log_pane_height : break
self.log_pane_height = pos
self.updatePaneRect()
cursor = self.log_pane.edit.selection.cursor()
self.log_pane.edit.makeVisible(cursor)
self.paint()
## 上下のペインを分離するセパレータを下方向に高速に動かす
#
# 縦3分割した位置に達するまで、セパレータを下方向に動かします。
#
def command_MoveSeparatorDownQuick( self, info ):
pos_list = [
(self.height()-2) * 3 // 3,
(self.height()-2) * 2 // 3,
(self.height()-2) * 1 // 3,
0,
]
for pos in pos_list:
if pos < self.log_pane_height : break
self.log_pane_height = pos
self.updatePaneRect()
cursor = self.log_pane.edit.selection.cursor()
self.log_pane.edit.makeVisible(cursor)
self.paint()
## 左右のペインを分離するセパレータを中央にリセットする
def command_MoveSeparatorCenter( self, info ):
self.left_edit_pane_width = (self.editPaneWidth()-self.editSeparatorWidth()) // 2
self.updatePaneRect()
self.paint()
## 左右のペインを分離するセパレータを、アクティブなペインが最大化するように、片方に寄せる
def command_MoveSeparatorMaximizeH( self, info ):
if self.focus_edit==MainWindow.FOCUS_EDIT_LEFT:
self.left_edit_pane_width=self.width()-self.editSeparatorWidth()
elif self.focus_edit==MainWindow.FOCUS_EDIT_RIGHT:
self.left_edit_pane_width=0
self.updatePaneRect()
self.paint( REGION_EDIT )
## バックグラウンドタスクを全てキャンセルする
def command_CancelTask( self, info ):
for task_queue in self.task_queue_stack:
task_queue.cancel()
## ログペインを1行上方向にスクロールする
def command_LogUp( self, info ):
self.log_pane.edit.scrollV(-1)
## ログペインを1行下方向にスクロールする
def command_LogDown( self, info ):
self.log_pane.edit.scrollV(+1)
## ログペインを1ページ上方向にスクロールする
def command_LogPageUp( self, info ):
self.log_pane.edit.scrollV(-self.logPaneHeight())
## ログペインを1ページ下方向にスクロールする
def command_LogPageDown( self, info ):
self.log_pane.edit.scrollV(+self.logPaneHeight())
## 終了する
def command_Quit( self, info ):
result = self.saveDocumentAll(confirm=True)
if result==None:
return
try:
self.quit( name="commandline" )
except ValueError:
pass
self.quit( name="top" )
## 次のLREditに切り替える
def command_ActivateNext( self, info ):
desktop = pyauto.Window.getDesktop()
wnd = desktop.getFirstChild()
last_found = None
while wnd:
if wnd.getClassName()=="CkitWindowClass":
last_found = wnd
wnd = wnd.getNext()
if last_found:
wnd = last_found.getLastActivePopup()
wnd.setForeground()
## ログペインの内容をクリップボードにコピーする
def command_SetClipboard_Log( self, info ):
lines = []
for i in range(self.log_pane.log.numLines()):
lines.append( self.log_pane.log.getLine(i) )
lredit_misc.setClipboardText( '\r\n'.join(lines) )
## Pythonインタプリタのメモリの統計情報を出力する(デバッグ目的)
def command_MemoryStat( self, info ):
print( ckit.strings["memory_statistics"] + ' :' )
gc.collect()
objs = gc.get_objects()
stat = {}
for obj in objs:
str_type = str(type(obj))
if str_type.find("'instance'")>=0:
str_type += " " + str(obj.__class__)
try:
stat[str_type] += 1
except KeyError:
stat[str_type] = 1
keys = stat.keys()
keys.sort()
# 最長の名前を調べる
max_len = 10
for k in keys:
k_len = self.getStringWidth(k)
if max_len < k_len:
max_len = k_len
for k in keys:
print( " %s%s : %d" % ( k, ' '*(max_len-self.getStringWidth(k)), stat[k] ) )
print( '' )
print( ckit.strings["common_done"] + '\n' )
## ファイルがオープンされっぱなしになっているバグを調査するためのコマンド(デバッグ目的)
#
# 引数には、( クラス名, 探索の最大の深さ ) を渡します。
#
# ex) RefererTree;ZipInfo;5
#
def command_RefererTree( self, info ):
kwd = info.args[0]
max_depth = 5
if len(info.args)>1:
max_depth = int(info.args[1])
known_id_table = {}
gc.collect()
objs = gc.get_objects()
def isRelatedObject(obj):
if type(obj).__name__ == kwd:
return True
if type(obj).__name__ == 'instance':
if obj.__class__.__name__ == kwd:
return True
return False
def dumpReferer(obj,depth):
if known_id_table.has_key(id(obj)):
return
known_id_table[id(obj)] = True
str_type = str(type(obj))
if str_type.find("'instance'")>=0:
str_type += " " + str(obj.__class__)
print( " " * depth, str_type )
if depth==max_depth: return
referers = gc.get_referrers(obj)
for referer in tuple(referers):
dumpReferer(referer,depth+1)
print( "---- referer --------" )
for obj in tuple(objs):
if isRelatedObject(obj):
dumpReferer(obj,0)
print( "-----------------------------" )
## コマンドラインにコマンドを入力する
def command_CommandLine( self, info ):
def _getHint( update_info ):
left = update_info.text[ : update_info.selection[0] ]
left_lower = left.lower()
pos_arg = left.rfind(";")+1
arg = left[ pos_arg : ]
pos_dir = max( arg.rfind("/")+1, arg.rfind("\\")+1 )
return left_lower, pos_arg, pos_dir
def onCandidate( update_info ):
left_lower, pos_arg, pos_dir = _getHint(update_info)
candidate_list = []
candidate_set = set()
for item in self.commandline_history.items:
item_lower = item.lower()
if item_lower.startswith(left_lower) and len(item_lower)!=len(left_lower):
right = item[ pos_arg + pos_dir: ]
candidate_list.append(right)
candidate_set.add(right)
for commandline_function in self.commandline_list:
for candidate in commandline_function.onCandidate( update_info ):
if candidate not in candidate_set:
candidate_list.append(candidate)
candidate_set.add(candidate)
return candidate_list, pos_arg + pos_dir
def onCandidateRemove(text):
try:
self.commandline_history.remove(text)
return True
except KeyError:
pass
return False
def statusString( update_info ):
if update_info.text:
for commandline_function in self.commandline_list:
s = commandline_function.onStatusString(update_info.text)
if s!=None:
return s
return " "
def onEnter( commandline, text, mod ):
for commandline_function in self.commandline_list:
if commandline_function.onEnter( commandline, text, mod ):
break
return True
self.commandLine( "Command", auto_complete=False, autofix_list=["\\/",".",";"], candidate_handler=onCandidate, candidate_remove_handler=onCandidateRemove, status_handler=statusString, enter_handler=onEnter )
# カーソル位置の再設定
self.activePane().widget().paint()
## フォントサイズを変更する
def command_FontSize( self, info ):
if len(info.args) >= 1:
try:
size = int( info.args[0] )
except ValueError as e:
print("ERROR : invalid font size")
return
else:
size = self.ini.getint( "FONT", "size" )
size = self.inputString( u"Size", str(size), [] )
try:
size = int(size)
except ValueError as e:
print("ERROR : invalid font size")
return
self.ini.set("FONT","size",size)
self.updateFont()
## 引数に渡された画像ファイルを壁紙にする
def command_Wallpaper( self, info ):
if len(info.args)>=1:
filename = info.args[0]
else:
filename = self.inputFilename( "Wallpaper", None, ensure_exists=True )
if not filename : return
self.ini.set( "WALLPAPER", "visible", "1" )
self.ini.set( "WALLPAPER", "filename", filename )
self.updateWallpaper()
## 設定メニューをポップアップする
def command_ConfigMenu( self, info ):
lredit_configmenu.doConfigMenu( self )
## 設定スクリプトを編集する
def command_EditConfig( self, info ):
self.activeOpen( filename = self.config_filename )
## 設定スクリプトをリロードする
def command_ReloadConfig( self, info ):
self.configure()
print( ckit.strings["config_reloaded"] + "\n" )
## ヘルプを表示する
def command_Help( self, info ):
print( ckit.strings["help_opening"] + " :" )
help_path = os.path.join( ckit.getAppExePath(), 'doc\\index.html' )
pyauto.shellExecute( None, help_path, "", "" )
print( ckit.strings["common_done"] + '\n' )
## バージョン情報を出力する
def command_About( self, info ):
print( lredit_resource.startupString() )
self.setStatusMessage( "%s version %s" % (lredit_resource.lredit_appname, lredit_resource.lredit_version), 3000 )
## 新規文書を開く
def command_New( self, info ):
doc = ckit.Document( filename=None, mode=self.createModeFromName("text") )
self.activeOpen(doc=doc)
## ファイルを開く
def command_Open( self, info ):
if len(info.args)>=1:
filename_list = info.args
else:
filename = self.activeEditPane().edit.doc.getFullpath()
filename = self.inputFilename( "Open", filename, ensure_exists=True )
if not filename : return
filename_list = [ filename ]
for filename in filename_list:
self.activeOpen(filename=filename)
## エンコーディングを特定してファイルを開きなおす
def command_ReopenEncoding( self, info ):
encoding_list = [
( "utf-8", ckit.TextEncoding("utf-8",b"\xEF\xBB\xBF") ),
( "utf-8n", ckit.TextEncoding("utf-8") ),
( "shift-jis", ckit.TextEncoding("cp932") ),
( "euc-jp", ckit.TextEncoding("euc-jp") ),
( "iso-2022-jp", ckit.TextEncoding("iso-2022-jp") ),
( "utf-16-le", ckit.TextEncoding("utf-16-le",b"\xFF\xFE") ),
( "utf-16-be", ckit.TextEncoding("utf-16-be",b"\xFE\xFF") ),
]
pane = self.activeEditPane()
edit = pane.edit
doc = edit.doc
filename = doc.getFullpath()
if not filename:
return
if len(info.args)>=1:
encoding_name = info.args[0]
else:
candidate_list = []
for encoding in encoding_list:
candidate_list.append(encoding[0])
encoding_name = self.inputString( "Reopen Encoding", edit.doc.encoding.encoding, candidate_list )
if not encoding_name : return
for encoding in encoding_list:
if encoding[0] == encoding_name:
break
else:
print( ckit.strings["error_unknown_encoding"] % encoding_name )
return
if edit.doc.isModified():
result = lredit_msgbox.popMessageBox(
self,
lredit_msgbox.MSGBOX_TYPE_YESNO,
ckit.strings["msgbox_title_modified_reopen"],
ckit.strings["msgbox_ask_modified_reopen"] % edit.doc.getName() )
if result==lredit_msgbox.MSGBOX_RESULT_YES:
pass
else:
return
# FIXME : subThreadCall を使うべき
fd = open( filename, "rb" )
doc.readFile( fd, encoding[1] )
fd.close()
self.paint()
## エンコーディングを変更する
def command_Encoding( self, info ):
encoding_list = [
( "utf-8", ckit.TextEncoding("utf-8",b"\xEF\xBB\xBF") ),
( "utf-8n", ckit.TextEncoding("utf-8") ),
( "shift-jis", ckit.TextEncoding("cp932") ),
( "euc-jp", ckit.TextEncoding("euc-jp") ),
( "iso-2022-jp", ckit.TextEncoding("iso-2022-jp") ),
( "utf-16-le", ckit.TextEncoding("utf-16-le",b"\xFF\xFE") ),
( "utf-16-be", ckit.TextEncoding("utf-16-be",b"\xFE\xFF") ),
]
if len(info.args)>=1:
encoding_name = info.args[0]
else:
encoding = self.activeEditPane().edit.doc.encoding
if encoding.encoding=="utf-8" and encoding.bom==None:
encoding_name = "utf-8n"
else:
encoding_name = encoding.encoding
candidate_list = []
for e in encoding_list:
candidate_list.append(e[0])
encoding_name = self.inputString( "Encoding", encoding_name, candidate_list )
if not encoding_name : return
for encoding in encoding_list:
if encoding[0] == encoding_name:
self.activeEditPane().edit.setEncoding( encoding[1] )
break
else:
print( ckit.strings["error_unknown_encoding"] % encoding_name )
return
self.paint()
## 改行コードを変更する
def command_LineEnd( self, info ):
lineend_list = [
( "crlf", "\r\n" ),
( "lf", "\n" ),
( "cr", "\r" ),
]
if len(info.args)>=1:
lineend_name = info.args[0]
else:
lineend = self.activeEditPane().edit.doc.lineend
lineend_name = ""
for item in lineend_list:
if item[1]==lineend:
lineend_name = item[0]
break
candidate_list = []
for item in lineend_list:
candidate_list.append(item[0])
lineend_name = self.inputString( "LineEnd", lineend_name, candidate_list )
if not lineend_name : return
for item in lineend_list:
if item[0] == lineend_name:
self.activeEditPane().edit.setLineEnd( item[1] )
break
else:
print( ckit.strings["error_unknown_lineend"] % lineend_name )
return
self.paint()
## アクティブな文書をもうひとつのEditで開く
def command_Duplicate( self, info ):
edit = self.activeEditPane().edit
self.inactiveOpen( doc=edit.doc, duplicate=True )
self.command.MoveSeparatorCenter()
## ファイルを保存する
def command_Save( self, info ):
edit = self.activeEditPane().edit
self.saveDocument(edit.doc)
self.storeBookmarkList(edit)
## ファイルを名前を付けて保存する
def command_SaveAs( self, info ):
edit = self.activeEditPane().edit
if len(info.args)>=1:
self.saveDocument( edit.doc, filename=info.args[0] )
else:
self.saveDocument( edit.doc, input_name=True )
self.storeBookmarkList(edit)
## すべてのファイルを保存する
def command_SaveAll( self, info ):
self.saveDocumentAll()
## ファイルを閉じる
def command_Close( self, info ):
self._close( self.activeEditPane().edit )
def _documentNextModkey( self, mod, mod_old ):
pane = self.activeEditPane()
self.activeOpen( edit=pane.edit, sort=True )
self.mod_hooks.remove(self._documentNextModkey)
self.document_next_pivot = None
## 次のドキュメントに切り替える
def command_DocumentNext( self, info ):
pane = self.activeEditPane()
another_pane = self.inactiveEditPane()
edit = None
if self.document_next_pivot==None:
# 一発目は反対のPaneに
self.document_next_pivot = pane.edit
if another_pane.edit:
edit = another_pane.edit
elif self.document_next_pivot in another_pane.edit_list:
# 反対のPaneからPivotのPaneに戻る
i = another_pane.edit_list.index(self.document_next_pivot) + 1
if i>=len(another_pane.edit_list) : i=0
edit = another_pane.edit_list[i]
if not edit:
if len(pane.edit_list)==1 and another_pane.edit:
# タブが1つしかないときはPaneの切り替え
edit = another_pane.edit
else:
# 次のタブへ
i = pane.edit_list.index(pane.edit) + 1
if i>=len(pane.edit_list) : i=0
edit = pane.edit_list[i]
self.activeOpen( edit=edit, sort=False )
# モディファイアキーが離されたときにソートを確定する
if self.mod:
try:
self.mod_hooks.remove(self._documentNextModkey)
except ValueError:
pass
self.mod_hooks.append(self._documentNextModkey)
else:
self._documentNextModkey()
## 文書の名前を入力し切り替える
def command_Document( self, info ):
if len(info.args)>=1:
docname = info.args[0]
else:
docname = self.inputDocument( "Document", "" )
if docname==None : return
for edit in self.edit_list:
if edit.doc.getName().lower()==docname.lower():
self.activeOpen( edit=edit )
return
else:
self.setStatusMessage( ckit.strings["statusbar_switch_doc_failed"] % docname, 3000, error=True )
## 文書を一覧表示し切り替える
def command_DocumentList( self, info ):
edit = self.listDocument( "Documents" )
if edit:
self.activeOpen( edit=edit )
## ブックマークを一覧表示し切り替える
def command_BookmarkList( self, info ):
if not self.bookmarks.table:
self.setStatusMessage( ckit.strings["bookmark_not_found"], 3000, error=True )
return
for edit in self.edit_list:
self.storeBookmarkList(edit)
edit = self.activeEditPane().edit
active_edit_filename = edit.doc.getFullpath()
active_edit_lineno = edit.selection.cursor().line
loop = [False]
fullpath_mode = [False]
local_mode = [True]
select = [None]
def onKeyDown( vk, mod ):
if vk==VK_SPACE and mod==0:
fullpath_mode[0] = not fullpath_mode[0]
select[0] = list_window.getResult()
loop[0] = True
list_window.quit()
return True
elif vk==VK_LEFT and mod==0:
if not local_mode[0]:
local_mode[0] = True
select[0] = 0
loop[0] = True
list_window.quit()
return True
elif vk==VK_RIGHT and mod==0:
if local_mode[0]:
local_mode[0] = False
select[0] = 0
loop[0] = True
list_window.quit()
return True
elif vk==VK_DELETE and mod==0:
select[0] = list_window.getResult()
s = items[select[0]][0]
filename, bookmark = items[select[0]][1]
bookmark = list(bookmark)
bookmark[1] = 0
self.bookmarks.setBookmark( filename, bookmark )
list_window.remove(select[0])
return True
def onStatusMessage( width, select ):
return ""
while True:
loop[0] = False
if local_mode[0] and self.project:
project_filenames = set( self.project.enumFullpath() )
items = []
for filename, bookmark_list in self.bookmarks.table:
filename = ckit.normPath(filename)
if local_mode[0]:
found = False
for edit in self.edit_list:
if filename==edit.doc.getFullpath():
found = True
break
if not found and self.project:
if filename in project_filenames:
found = True
if not found:
continue
for bookmark in bookmark_list:
if fullpath_mode[0]:
s = "%s:%d: %s" % ( filename, bookmark[0]+1, bookmark[2] )
else:
s = "%s:%d: %s" % ( os.path.basename(filename), bookmark[0]+1, bookmark[2] )
items.append( ( s, (filename,bookmark) ) )
if select[0]==None and active_edit_filename==filename and active_edit_lineno==bookmark[0]:
select[0] = len(items)-1
if select[0]==None:
select[0] = 0
if local_mode[0]:
title = "Bookmarks (Local)"
else:
title = "Bookmarks (Global)"
pos = self.centerOfWindowInPixel()
list_window = lredit_listwindow.ListWindow( pos[0], pos[1], 20, 2, self.width()-5, self.height()-3, self, self.ini, True, title, items, initial_select=select[0], onekey_search=False, keydown_hook=onKeyDown, statusbar_handler=onStatusMessage )
self.enable(False)
list_window.messageLoop()
# チラつき防止の遅延削除
class DelayedCall:
def __call__(self):
self.list_window.destroy()
delay = DelayedCall()
delay.list_window = list_window
self.delayedCall( delay, 10 )
if not loop[0]:
break
result = list_window.getResult()
self.enable(True)
self.activate()
for edit in self.edit_list:
self.loadBookmarkList(edit)
self.paint( REGION_EDIT )
if result<0 : return
if not items : return
filename, bookmark = items[result][1]
self.activeOpen( filename=filename, lineno=bookmark[0] )
## 文字列を下に向けて検索する
def command_Search( self, info ):
edit = self.activePane().edit
original_cursor = edit.selection.cursor()
search_cursor = [ edit.selection.cursor() ]
text = [""]
search_object = [None]
regex_error = [False]
if len(info.args)>=1:
text[0] = info.args[0]
def onKeyDown( vk, mod ):
if vk==VK_DOWN and mod==MODKEY_CTRL:
if regex_error[0]:
self.setStatusMessage( ckit.strings["statusbar_regex_wrong"] % text[0], 3000, error=True )
return True
edit.search( search_object=search_object[0], direction=1 )
search_cursor[0] = edit.selection.cursor()
return True
elif vk==VK_UP and mod==MODKEY_CTRL:
if regex_error[0]:
self.setStatusMessage( ckit.strings["statusbar_regex_wrong"] % text[0], 3000, error=True )
return True
edit.search( search_object=search_object[0], direction=-1 )
search_cursor[0] = edit.selection.cursor()
return True
def onUpdate( new_text, word, case, regex ):
if len(text[0])>0 and len(new_text)==0:
search_cursor[0] = edit.selection.cursor()
elif len(text[0])==0 and len(new_text)>0:
search_cursor[0] = original_cursor.copy()
text[0] = new_text
try:
search_object[0] = ckit.Search( text[0], word, case, regex )
regex_error[0] = False
except re.error:
regex_error[0] = True
return True
edit.search( search_object=search_object[0], point=search_cursor[0], direction=1, paint=False, message=False )
edit.paint()
return True
s = self.inputSearch( "Search", keydown_handler=onKeyDown, update_handler=onUpdate )
if s==None : return
word = self.ini.getint( "SEARCH", "word" )
case = self.ini.getint( "SEARCH", "case" )
regex = self.ini.getint( "SEARCH", "regex" )
try:
search_object[0] = ckit.Search( s, word, case, regex )
except re.error:
self.setStatusMessage( ckit.strings["statusbar_regex_wrong"] % s, 3000, error=True )
return
edit.search( search_object=search_object[0], point=search_cursor[0], direction=1 )
self.search_object = search_object[0]
## 前回検索した条件で次を検索する
def command_SearchNext( self, info ):
if self.search_object:
self.activePane().edit.search( search_object=self.search_object, direction=1 )
## 前回検索した条件で前を検索する
def command_SearchPrev( self, info ):
if self.search_object:
self.activePane().edit.search( search_object=self.search_object, direction=-1 )
## 文字列を検索し置換する
def command_Replace( self, info ):
before = self.inputSearch( "Replace(Before)" )
if before==None : return
word = self.ini.getint( "SEARCH", "word" )
case = self.ini.getint( "SEARCH", "case" )
regex = self.ini.getint( "SEARCH", "regex" )
try:
search_object = ckit.Search( before, word, case, regex )
except re.error:
self.setStatusMessage( ckit.strings["statusbar_regex_wrong"] % before, 3000, error=True )
return
edit = self.activePane().edit
edit.search( search_object=search_object, direction=1 )
def replace( after, paint=True, message=True ):
left = edit.selection.left()
right = edit.selection.right()
# 選択位置が検索条件にヒットする場合だけ置換する
selected_text = edit.getText(left,right)
search_result = search_object.search(selected_text)
if search_result==None or search_result[0]!=0 or search_result[1]!=len(selected_text):
return False
# 置換する
if regex:
after2 = ""
pos = 0
while pos<len(after):
if after[pos]=='\\':
pos += 1
if pos<len(after):
second_char = after[pos]
if second_char=='\\':
after2 += second_char
elif second_char=='t':
after2 += '\t'
elif second_char=='n':
after2 += '\n'
elif '0'<=second_char<='9':
after2 += edit.search_re_result.group(int(second_char))
else:
after2 += '\\'
else:
after2 += after[pos]
pos += 1
edit.modifyText( left, right, after2, paint=paint )
else:
edit.modifyText( left, right, after, paint=paint )
edit.search( search_object=search_object, direction=1, paint=paint, message=message )
return True
def statusString( update_info ):
return ""
def onKeyDown( vk, mod ):
if vk==VK_DOWN and mod==MODKEY_CTRL:
edit.search( search_object=search_object, direction=1 )
return True
elif vk==VK_UP and mod==MODKEY_CTRL:
edit.search( search_object=search_object, direction=-1 )
return True
def onEnter( commandline, text, mod ):
after = text
self.replace_history.append(after)
if mod==0:
replace(after)
return True
elif mod==MODKEY_SHIFT:
edit.atomicUndoBegin( True, edit.pointDocumentBegin(), edit.pointDocumentEnd() )
try:
count = 0
while True:
if replace( after, paint=False, message=False ):
count += 1
continue
break
finally:
edit.atomicUndoEnd( edit.pointDocumentBegin(), edit.pointDocumentEnd() )
self.paint(REGION_EDIT)
self.setStatusMessage( ckit.strings["statusbar_replace_finished"] % count, 3000 )
return False
return True
self.commandLine( "Replace(After)", text="", auto_complete=False, autofix_list=[], candidate_handler=self.replace_history.candidateHandler, candidate_remove_handler=self.replace_history.candidateRemoveHandler, status_handler=statusString, enter_handler=onEnter, keydown_handler=onKeyDown )
## 選択範囲の行末の空白文字を削除する
def command_RemoveTrailingSpace( self, info ):
edit = self.activePane().edit
def func(line):
return line.rstrip(" \t")
edit.replaceLines(func)
## 選択範囲のTABを空白文字に展開する
def command_ExpandTab( self, info ):
edit = self.activePane().edit
tab_width = edit.doc.mode.tab_width
def func(line):
return ckit.expandTab( self, line, tab_width )
edit.replaceLines(func)
## 選択範囲の空行を削除する
def command_RemoveEmptyLines( self, info ):
edit = self.activePane().edit
def func( text, info ):
return len(text)>0
edit.filterLines(func)
## ブックマークされた行を削除する
def command_RemoveMarkedLines( self, info ):
edit = self.activePane().edit
def func( text, info ):
return info.bookmark==0
edit.filterLines(func)
## ブックマークされてない行を削除する
def command_RemoveUnmarkedLines( self, info ):
edit = self.activePane().edit
def func( text, info ):
return info.bookmark!=0
edit.filterLines(func)
## 選択範囲を大文字に変換する
def command_ToUpper( self, info ):
edit = self.activePane().edit
left = edit.selection.left()
right = edit.selection.right()
text = edit.getText( left, right )
text = text.upper()
edit.modifyText( text=text )
## 選択範囲を小文字に変換する
def command_ToLower( self, info ):
edit = self.activePane().edit
left = edit.selection.left()
right = edit.selection.right()
text = edit.getText( left, right )
text = text.lower()
edit.modifyText( text=text )
def _zenhanCommon( self, info, func ):
option_list = [
"Ascii",
"Digit",
"Kana",
"Space",
"All",
]
if len(info.args)>=1:
options = info.args[0]
else:
options = self.ini.get( "ZENHAN", "options" )
options = self.inputOptions( "Charactor Types", options, option_list )
if options==None : return
self.ini.set( "ZENHAN", "options", options )
edit = self.activePane().edit
left = edit.selection.left()
right = edit.selection.right()
text = edit.getText( left, right )
z2h_option = 0
for option in options.split(","):
option = option.strip()
if option.lower() == "All".lower():
z2h_option |= lredit_zenhan.ALL
elif option.lower() == "Ascii".lower():
z2h_option |= lredit_zenhan.ASCII
elif option.lower() == "Digit".lower():
z2h_option |= lredit_zenhan.DIGIT
elif option.lower() == "Kana".lower():
z2h_option |= lredit_zenhan.KANA
elif option.lower() == "Space".lower():
z2h_option |= lredit_zenhan.SPACE
text = func( text, z2h_option )
edit.modifyText( text=text )
## 選択範囲を半角に変換する
def command_ToHankaku( self, info ):
self._zenhanCommon( info, lredit_zenhan.z2h )
## 選択範囲を全角に変換する
def command_ToZenkaku( self, info ):
self._zenhanCommon( info, lredit_zenhan.h2z )
## 左右の文書を比較する
def command_Compare( self, info ):
active_edit = self.activeEditPane().edit
# 保存されていない文書は比較できない
if not active_edit.doc.getFullpath():
self.setStatusMessage( ckit.strings["statusbar_not_saved"], 3000, error=True )
return
# 1画面モードの場合は比較対象の文書をリスト選択する
if self.inactiveEditPane().edit==None:
def func(edit):
return ( edit!=active_edit and edit.doc.getFullpath() )
edit = self.listDocument( "Compare With", filter_func=func )
if not edit: return
self.inactiveOpen(edit=edit)
self.command.MoveSeparatorCenter()
for edit in self.edit_list:
edit.clearDiffColor()
jump_list = []
self.jump_list = jump_list
self.jump_selection = None
left_lines = []
right_lines = []
ignore_case = False
ignore_whitespace = False
options = self.ini.get( "COMPARE", "options" )
for option in options.split(","):
option = option.strip()
if option.lower() == "Strict".lower():
ignore_case = False
ignore_whitespace = False
elif option.lower() == "Ignore.Case".lower():
ignore_case = True
elif option.lower() == "Ignore.WhiteSpace".lower():
ignore_whitespace = True
# FIXME : 巨大ファイルに対応
def listLines( lines ):
hashed_lines = []
for line in lines:
s = line.s
if ignore_whitespace:
s = re.sub( "[ \t]+", "", s )
if ignore_case:
s = s.lower()
hashed_lines.append(s)
return hashed_lines
left_lines = listLines(self.left_edit_pane.edit.doc.lines)
right_lines = listLines(self.right_edit_pane.edit.doc.lines)
diff_object = difflib.unified_diff( left_lines, right_lines, self.left_edit_pane.edit.doc.getName(), self.right_edit_pane.edit.doc.getName(), n=0 )
color = 1
self.left_edit_pane.edit.setDiffColorMode()
self.right_edit_pane.edit.setDiffColorMode()
# 差分に背景色をつける
re_pattern = re.compile( "@@ -([0-9]+)(,([0-9]+))? \+([0-9]+)(,([0-9]+))? @@" )
for line in diff_object:
if line.startswith("@@"):
re_result = re_pattern.match(line)
begin1 = int(re_result.group(1))-1
if not re_result.group(3):
end1 = begin1 + 1
elif re_result.group(3)=='0':
begin1 += 1
end1 = begin1
else:
end1 = begin1 + int(re_result.group(3))
for i in range( begin1, end1 ):
self.left_edit_pane.edit.doc.lines[i].bg = color
begin2 = int(re_result.group(4))-1
if not re_result.group(6):
end2 = begin2 + 1
elif re_result.group(6)=='0':
begin2 += 1
end2 = begin2
else:
end2 = begin2 + int(re_result.group(6))
for i in range( begin2, end2 ):
self.right_edit_pane.edit.doc.lines[i].bg = color
color += 1
if color>=4:
color=1
left_fullpath = self.left_edit_pane.edit.doc.getFullpath()
right_fullpath = self.right_edit_pane.edit.doc.getFullpath()
left_lineno = ckit.adjustStringWidth( self, "%s" % (begin1+1), self.left_edit_pane.edit.lineNoWidth()-2 )
right_lineno = ckit.adjustStringWidth( self, "%s" % (begin2+1), self.left_edit_pane.edit.lineNoWidth()-2 )
text = "%s:%s - %s:%s" % ( os.path.basename(left_fullpath), left_lineno, os.path.basename(right_fullpath), right_lineno )
jump_list.append(
CompareJumpItem(
self,
left_fullpath,
begin1,
right_fullpath,
begin2,
text
)
)
self.command.SearchResultNext()
self.paint()
## ファイル比較時のアルファベット大小の扱いを設定する
def command_CompareOptions( self, info ):
option_list = [
"Strict",
"Ignore.Case",
"Ignore.WhiteSpace",
]
if len(info.args)>=1:
options = info.args[0]
else:
options = self.ini.get( "COMPARE", "options" )
options = self.inputOptions( "Compare Options", options, option_list )
if options==None : return
self.ini.set( "COMPARE", "options", options )
active_edit = self.activeEditPane().edit
inactive_edit = self.inactiveEditPane().edit
if active_edit and inactive_edit and active_edit.isDiffColorMode() and inactive_edit.isDiffColorMode():
self.command.Compare()
## 複数のファイルから文字列を検索する
def command_Grep( self, info ):
# 検索する文字列
keyword = ""
if self.search_history.items:
keyword = self.search_history.items[0]
# 検索オプション
word = bool(self.ini.getint( "SEARCH", "word" ))
case = bool(self.ini.getint( "SEARCH", "case" ))
regex = bool(self.ini.getint( "SEARCH", "regex" ))
# ファイル名パターン
filename_pattern = ""
if self.grep_filename_pattern_history.items : filename_pattern = self.grep_filename_pattern_history.items[0]
# 検索対象 (プロジェクト、ディレクトリ)
target_list = ( "Project", "Directory" )
target = self.ini.get( "GREP", "target" )
if not self.project or target not in target_list: target = "Directory"
# ディレクトリ
location = self.activeEditPane().edit.doc.getFullpath()
if location:
location = os.path.dirname(location)
location = ckit.joinPath(location,"")
else:
location = ""
# 前回使用したディレクトリが編集中のファイルパスを含んでいたら前回のディレクトリを使う
if self.grep_location_history.items:
prev_location = self.grep_location_history.items[0]
if location.startswith(prev_location) or not location:
location = prev_location
# 無視するディレクトリ名パターン
dirname_exclude_pattern = ""
if self.grep_dirname_exclude_pattern_history.items : dirname_exclude_pattern = self.grep_dirname_exclude_pattern_history.items[0]
# 再帰的に検索するか
recursive = bool(self.ini.getint( "GREP", "recursive" ))
def isTargetDirectory(dialog):
return dialog.getValueById("target")==1
dialog = ckit.Dialog( self, "Grep", items=[
ckit.Dialog.Edit( "keyword", 0, 70, "Keyword", keyword, candidate_handler=self.search_history.candidateHandler, candidate_remove_handler=self.search_history.candidateRemoveHandler ),
ckit.Dialog.StaticText(0,""),
ckit.Dialog.CheckBox( "word", 9, "Word", word ),
ckit.Dialog.CheckBox( "case", 9, "Case", case ),
ckit.Dialog.CheckBox( "regex", 9, "Regex", regex ),
ckit.Dialog.StaticText(0,""),
ckit.Dialog.Edit( "filename", 0, 70, "Filename", filename_pattern, autofix_list=[" ","."], candidate_handler=self.grep_filename_pattern_history.candidateHandler, candidate_remove_handler=self.grep_filename_pattern_history.candidateRemoveHandler ),
ckit.Dialog.StaticText(0,""),
ckit.Dialog.Choice( "target", 0, "Target", target_list, target_list.index(target) ),
ckit.Dialog.StaticText( 0, "", visible=isTargetDirectory),
ckit.Dialog.Edit( "directory", 4, 66, "Location", location, autofix_list=["\\/","."], candidate_handler=lredit_commandline.candidate_Filename( ".", self.grep_location_history.items ), candidate_remove_handler=self.grep_location_history.candidateRemoveHandler, visible=isTargetDirectory ),
ckit.Dialog.StaticText( 0, "", visible=isTargetDirectory),
ckit.Dialog.Edit( "exclude", 4, 66, "Dirname Exclude", dirname_exclude_pattern, autofix_list=[" ","."], candidate_handler=self.grep_dirname_exclude_pattern_history.candidateHandler, candidate_remove_handler=self.grep_dirname_exclude_pattern_history.candidateRemoveHandler, visible=isTargetDirectory ),
ckit.Dialog.StaticText( 0, "", visible=isTargetDirectory),
ckit.Dialog.CheckBox( "recursive", 4, "Recursive", recursive, visible=isTargetDirectory ),
])
self.enable(False)
dialog.messageLoop()
result, values = dialog.getResult()
self.enable(True)
self.activate()
dialog.destroy()
keyword = values["keyword"]
word = values["word"]
case = values["case"]
regex = values["regex"]
filename_pattern = values["filename"]
target = target_list[ values["target"] ]
location = values["directory"]
dirname_exclude_pattern = values["exclude"]
recursive = values["recursive"]
self.search_history.append(keyword)
self.ini.set( "SEARCH", "word", str(int(word)) )
self.ini.set( "SEARCH", "case", str(int(case)) )
self.ini.set( "SEARCH", "regex", str(int(regex)) )
self.grep_filename_pattern_history.append(filename_pattern)
self.ini.set( "GREP", "target", target )
self.grep_location_history.append(location)
self.grep_dirname_exclude_pattern_history.append(dirname_exclude_pattern)
self.ini.set( "GREP", "recursive", str(int(recursive)) )
if result != ckit.Dialog.RESULT_OK:
return
# ターゲットが "Project" で、プロジェクトがオープンされていなかったらエラー
if target=="Project" and not self.project:
self.setStatusMessage( ckit.strings["project_not_opened"], 3000, error=True, log=True )
return
# ターゲットが "Project" のときは、ログ出力の相対パスはプロジェクトファイルの場所から表示
if target_list == "Project":
location = self.project.dirname
# 保存確認
result = self.saveDocumentAll(confirm=True)
if result==None:
return
try:
search_object = ckit.Search( keyword, word, case, regex )
except re.error:
self.setStatusMessage( ckit.strings["statusbar_regex_wrong"] % keyword, 3000, error=True )
return
def enumFiles():
file_filter_list = filename_pattern.split(" ")
def checkFilter(filename):
result = False
for pattern in file_filter_list:
if pattern.startswith("!"):
pattern = pattern[1:]
if fnmatch.fnmatch( filename, pattern ):
return False
else:
if fnmatch.fnmatch( filename, pattern ):
result = True
return result
if target=="Directory":
print( 'Grep : %s : %s : %s' % ( target, location, keyword ) )
dir_ignore_list = dirname_exclude_pattern.split(" ")
for root, dirs, files in os.walk( location ):
if not recursive : del dirs[:]
# 無視するディレクトリを除外
for item in dirs:
for pattern in dir_ignore_list:
if fnmatch.fnmatch( item, pattern ):
dirs.remove(item)
break
for filename in files:
if checkFilter(filename):
fullpath = os.path.join( root, filename )
yield fullpath
elif target=="Project":
print( 'Grep : %s : %s' % (target,keyword) )
for filename in self.project.enumFullpath():
if checkFilter(filename):
yield filename
for edit in self.edit_list:
edit.clearDiffColor()
jump_list = []
self.jump_list = jump_list
self.jump_selection = None
def jobGrep( job_item ):
def onFound( filename, lineno, line ):
path_from_here = ckit.normPath( os.path.relpath( filename, location ) )
text = "%s:%d: %s" % ( path_from_here, lineno, line.strip() )
print( text )
jump_list.append(
GrepJumpItem(
self,
filename,
lineno,
search_object,
text
)
)
self.setProgressValue(None)
try:
lredit_grep.grep( job_item, enumFiles, keyword, word, case, regex, found_handler=onFound )
finally:
self.clearProgress()
def jobGrepFinished( job_item ):
if job_item.isCanceled():
self.setStatusMessage( ckit.strings["statusbar_aborted"], 3000 )
else:
self.setStatusMessage( ckit.strings["statusbar_grep_finished"] % len(jump_list), 3000 )
if self.jump_selection==None:
self.command.SearchResultNext()
self.paint()
job_item = ckit.JobItem( jobGrep, jobGrepFinished )
self.taskEnqueue( job_item, "Grep" )
## 行番号やTAGSでジャンプする
def command_Jump( self, info ):
edit = self.activeEditPane().edit
active_edit_lineno = edit.selection.cursor().line
# プロジェクトファイルの隣のTAGSファイルをロードする
if self.project:
tags_filename = ckit.joinPath(os.path.dirname(self.project.filename),"tags")
try:
self.loadTags(tags_filename)
except IOError:
pass
symbol_list = set()
for tags in self.tags_list:
symbol_list = symbol_list.union( list( tags.symbols() ) )
symbol_list = list(symbol_list)
symbol_list.sort()
if len(info.args)>=1:
destination = info.args[0]
else:
destination = str(active_edit_lineno+1)
destination = self.inputString( "Jump", destination, symbol_list )
if not destination : return
if re.match( "[0-9]+", destination ):
info = ckit.CommandInfo()
info.args = [ destination ]
self.command.JumpLineNo(info)
else:
info = ckit.CommandInfo()
info.args = [ destination ]
self.command.TagsJump(info)
## 指定された行番号にジャンプする
def command_JumpLineNo( self, info ):
edit = self.activeEditPane().edit
active_edit_lineno = edit.selection.cursor().line
if len(info.args)>=1:
try:
lineno = int(info.args[0]) - 1
except ValueError:
self.setStatusMessage( ckit.strings["statusbar_jump_failed"], 3000, error=True )
return
else:
lineno = self.inputNumber( "JumpLineNo", str(active_edit_lineno+1) )
if lineno==None : return
lineno = int(lineno) - 1
edit.jumpLineNo(lineno)
def _searchResultCommon(self,direction):
if not self.jump_list:
return
if self.jump_selection==None:
if direction>0:
self.jump_selection = 0
else:
self.jump_selection = len(self.jump_list)-1
else:
if 0 <= self.jump_selection + direction < len(self.jump_list):
self.jump_selection += direction
else:
self.setStatusMessage( ckit.strings["statusbar_jump_failed"], 3000, error=True )
return
if self.jump_selection>=len(self.jump_list):
return
jump = self.jump_list[self.jump_selection]
jump()
## 次の位置にジャンプする
def command_SearchResultNext( self, info ):
self._searchResultCommon(1)
## 前の位置にジャンプする
def command_SearchResultPrev( self, info ):
self._searchResultCommon(-1)
## 検索結果をリスト表示
def command_SearchResultList( self, info ):
if not self.jump_list:
return
select = self.jump_selection
def onStatusMessage( width, select ):
return ""
items = []
for jump in self.jump_list:
items.append( ( jump.text, jump ) )
pos = self.centerOfWindowInPixel()
list_window = lredit_listwindow.ListWindow( pos[0], pos[1], 20, 2, self.width()-5, self.height()-3, self, self.ini, True, "Jump", items, initial_select=select, onekey_search=False, statusbar_handler=onStatusMessage )
self.enable(False)
list_window.messageLoop()
result = list_window.getResult()
self.enable(True)
self.activate()
list_window.destroy()
if result<0 : return
if not items : return
self.jump_selection = result
jump = self.jump_list[self.jump_selection]
jump()
## アクティブな文書をもうひとつのPaneに移動させる
def command_AnotherPane( self, info ):
if len(self.edit_list)<=1:
self.command.New()
edit = self.activeEditPane().edit
self.inactiveOpen( edit=edit )
self.command.FocusInactiveEdit()
## プロジェクトファイルを開く
def command_OpenProject( self, info ):
if len(info.args)>=1:
filename = info.args[0]
else:
filename = self.activeEditPane().edit.doc.getFullpath()
if filename:
filename = os.path.dirname(filename)
filename = ckit.joinPath(filename,"")
filename = self.inputFilename( "Project", filename, ensure_exists=True )
if not filename : return
try:
self.project = lredit_project.Project(filename)
except IOError:
print( ckit.strings["error_open_failed"] % filename )
self.setStatusMessage( ckit.strings["statusbar_open_failed"] % filename, 3000, error=True )
self.filename_history.remove(filename)
return
self.updateTitleBar()
self.setStatusMessage( ckit.strings["statusbar_project_opened"] % filename, 3000 )
# ファイル名を履歴に残す
filename = os.path.abspath(filename)
filename = os.path.normpath(filename)
if not filename.lower().startswith(ckit.getTempPath().lower()):
self.filename_history.append( ckit.normPath(filename) )
## プロジェクトファイルを閉じる
def command_CloseProject( self, info ):
if not self.project:
self.setStatusMessage( ckit.strings["project_not_opened"], 3000, error=True )
return
self.project = None
self.updateTitleBar()
self.setStatusMessage( ckit.strings["statusbar_project_closed"], 3000 )
## プロジェクトファイルを編集する
def command_EditProject( self, info ):
if not self.project:
self.setStatusMessage( ckit.strings["project_not_opened"], 3000, error=True )
return
self.activeOpen( filename = self.project.filename )
## プロジェクト中のファイルを一覧表示する
def command_ProjectFileList( self, info ):
if not self.project:
self.setStatusMessage( ckit.strings["project_not_opened"], 3000, error=True )
return
edit = self.activeEditPane().edit
active_edit_filename = edit.doc.getFullpath()
loop = [False]
fullpath_mode = [False]
select = None
def onKeyDown( vk, mod ):
if vk==VK_SPACE and mod==0:
fullpath_mode[0] = not fullpath_mode[0]
loop[0] = True
list_window.quit()
return True
def onStatusMessage( width, select ):
return ""
while True:
loop[0] = False
items = []
for filename, fullpath in zip( self.project.enumName(), self.project.enumFullpath() ):
if fullpath_mode[0]:
items.append( ( fullpath, ) )
else:
items.append( ( filename, ) )
if select==None and active_edit_filename==fullpath:
select = len(items)-1
if select==None:
select = 0
pos = self.centerOfWindowInPixel()
list_window = lredit_listwindow.ListWindow( pos[0], pos[1], 20, 2, self.width()-5, self.height()-3, self, self.ini, True, "Files", items, initial_select=select, onekey_search=False, keydown_hook=onKeyDown, statusbar_handler=onStatusMessage )
self.enable(False)
list_window.messageLoop()
# チラつき防止の遅延削除
class DelayedCall:
def __call__(self):
self.list_window.destroy()
delay = DelayedCall()
delay.list_window = list_window
self.delayedCall( delay, 10 )
if not loop[0]:
break
select = list_window.getResult()
result = list_window.getResult()
self.enable(True)
self.activate()
if result<0 : return
filename = items[result][0]
if fullpath_mode[0]:
fullpath = filename
else:
fullpath = ckit.normPath( ckit.joinPath( self.project.dirname, filename ) )
self.activeOpen( filename=fullpath )
## 最近開いたファイルを一覧表示する
def command_RecentFileList( self, info ):
loop = [False]
fullpath_mode = [False]
project_mode = [False]
select = [None]
def onKeyDown( vk, mod ):
if vk==VK_SPACE and mod==0:
fullpath_mode[0] = not fullpath_mode[0]
loop[0] = True
list_window.quit()
return True
elif vk==VK_LEFT and mod==0:
if project_mode[0]:
project_mode[0] = False
select[0] = 0
loop[0] = True
list_window.quit()
return True
elif vk==VK_RIGHT and mod==0:
if not project_mode[0]:
project_mode[0] = True
select[0] = 0
loop[0] = True
list_window.quit()
return True
elif vk==VK_DELETE and mod==0:
select[0] = list_window.getResult()
filename = items[select[0]][1]
self.filename_history.remove(filename)
list_window.remove(select[0])
return True
def onStatusMessage( width, select ):
return ""
while True:
loop[0] = False
items = []
for filename in self.filename_history.items:
if fnmatch.fnmatch( filename, "*.lre" ):
if not project_mode[0]:
continue
else:
if project_mode[0]:
continue
if fullpath_mode[0]:
s = "%s" % filename
else:
s = "%s" % ckit.splitPath(filename)[1]
items.append( ( s, filename ) )
if select[0]==None:
select[0] = 0
if project_mode[0]:
title = "Recent Projects"
else:
title = "Recent Files"
pos = self.centerOfWindowInPixel()
list_window = lredit_listwindow.ListWindow( pos[0], pos[1], 20, 2, self.width()-5, self.height()-3, self, self.ini, True, title, items, initial_select=select[0], onekey_search=False, keydown_hook=onKeyDown, statusbar_handler=onStatusMessage )
self.enable(False)
list_window.messageLoop()
# チラつき防止の遅延削除
class DelayedCall:
def __call__(self):
self.list_window.destroy()
delay = DelayedCall()
delay.list_window = list_window
self.delayedCall( delay, 10 )
if not loop[0]:
break
select[0] = list_window.getResult()
result = list_window.getResult()
self.enable(True)
self.activate()
if result<0 : return
if not items : return
filename = items[result][1]
if project_mode[0]:
info = ckit.CommandInfo()
info.args = [ filename ]
self.command.OpenProject(info)
else:
self.activeOpen( filename=filename )
## モードを切り替える
def command_Mode( self, info ):
if len(info.args)>=1:
mode_name = info.args[0]
else:
def statusString( update_info ):
for mode in self.mode_list:
if mode.__name__==update_info.text:
return "OK"
return " "
mode_name = self.commandLine( "Mode", auto_complete=False, autofix_list=[], candidate_handler=None, status_handler=statusString )
if mode_name==None : return
for mode in self.mode_list:
if mode.__name__==mode_name:
break
else:
self.setStatusMessage( ckit.strings["mode_not_found"] % mode_name, 3000, error=True, log=True )
return
edit = self.activeEditPane().edit
edit.doc.setMode( mode() )
edit.configure()
self.setStatusMessage( ckit.strings["mode_enabled"] % mode_name, 3000 )
## マイナーモードをOn/Off
def command_MinorMode( self, info ):
if len(info.args)>=1:
mode_name = info.args[0]
else:
def statusString( update_info ):
for mode in self.minor_mode_list:
if mode.__name__==update_info.text:
return "OK"
return " "
mode_name = self.commandLine( "MinorMode", auto_complete=False, autofix_list=[], candidate_handler=None, status_handler=statusString )
if mode_name==None : return
for mode in self.minor_mode_list:
if mode.__name__==mode_name:
break
else:
self.setStatusMessage( ckit.strings["mode_not_found"] % mode_name, 3000, error=True, log=True )
return
edit = self.activeEditPane().edit
if edit.doc.hasMinorMode( mode.name ):
edit.doc.removeMinorMode( mode.name )
edit.configure()
self.setStatusMessage( ckit.strings["mode_disabled"] % mode_name, 3000 )
else:
edit.doc.appendMinorMode( mode() )
edit.configure()
self.setStatusMessage( ckit.strings["mode_enabled"] % mode_name, 3000 )
## メニューを出す
def menu( self, title, items, pos=None ):
selection = 0
keydown_func = [None]
keymap = ckit.Keymap()
for item in items:
if item[1]:
keymap[ item[1] ] = item[2]
def onKeyDown( vk, mod ):
try:
keydown_func[0] = keymap.table[ ckit.KeyEvent(vk,mod) ]
list_window.cancel()
return True
except KeyError:
pass
list_window = lredit_listwindow.ListWindow( 0, 0, 5, 1, self.width()-5, self.height()-3, self, self.ini, False, title, items, initial_select=selection, keydown_hook=onKeyDown, onekey_search=False )
if pos:
x, y = pos
screen_x, screen_y1 = self.charToScreen( x, y )
screen_x, screen_y2 = self.charToScreen( x, y+1 )
pos = ( screen_x, screen_y2 )
list_window.setPosSize( pos[0], pos[1], list_window.width(), list_window.height(), ORIGIN_X_LEFT | ORIGIN_Y_TOP )
else:
pos = self.centerOfWindowInPixel()
list_window.setPosSize( pos[0], pos[1], list_window.width(), list_window.height(), ORIGIN_X_CENTER | ORIGIN_Y_CENTER )
list_window.show(True)
self.enable(False)
list_window.messageLoop()
result = list_window.getResult()
self.enable(True)
self.activate()
list_window.destroy()
if result>=0:
items[result][2]( ckit.CommandInfo() )
elif keydown_func[0]:
keydown_func[0]( ckit.CommandInfo() )
## 拡張メニューを出す
def command_ExtensionMenu( self, info ):
edit = self.activePane().edit
if not edit: return
self.menu( None, self.ext_menu_items, pos=edit.getCursorPos() )
## TAGSファイルを生成する
def command_GenerateTags( self, info ):
if not self.project:
self.setStatusMessage( ckit.strings["project_not_opened"], 3000, error=True )
return
tags_filename = ckit.joinPath( self.project.dirname, "tags" )
srcs_filename = ckit.joinPath( self.project.dirname, "tags.srcs" )
class SubThread( threading.Thread ):
def __init__(thread_self):
threading.Thread.__init__(thread_self)
thread_self.p = None
def createSourceFilesList( thread_self, filename ):
fd = open( filename, "w", encoding="mbcs" )
for filename in self.project.enumName():
fd.write(os.path.normpath(filename))
fd.write("\r\n")
fd.close()
def run(thread_self):
lredit_native.setBlockDetector()
thread_self.createSourceFilesList(srcs_filename)
cmd = [ os.path.join( ckit.getAppExePath(), "bin/ctags.exe" ) ]
cmd += [ "-o", tags_filename ]
cmd += [ "-n" ] # タグ情報として行番号を使用する
cmd += [ "-L", srcs_filename ]
thread_self.p = ckit.SubProcess(cmd,cwd=self.project.dirname,env=None)
thread_self.p()
thread_self = None
os.unlink(srcs_filename)
def cancel(thread_self):
if thread_self.p:
thread_self.p.cancel()
thread_self.p = None
def jobGenerateTags( job_item ):
self.setStatusMessage( ckit.strings["statusbar_tags_generating"] )
self.setProgressValue(None)
sub_thread = SubThread()
sub_thread.start()
while sub_thread.isAlive():
if job_item.isCanceled():
sub_thread.cancel()
break
time.sleep(0.1)
sub_thread.join()
def jobGenerateTagsFinished( job_item ):
self.clearProgress()
if job_item.isCanceled():
self.setStatusMessage( ckit.strings["statusbar_aborted"], 3000 )
else:
self.setStatusMessage( ckit.strings["statusbar_tags_generated"], 3000 )
job_item = ckit.JobItem( jobGenerateTags, jobGenerateTagsFinished )
self.taskEnqueue( job_item, "GenerateTags" )
def loadTags( self, filename ):
filename = ckit.normPath(filename)
for tags in self.tags_list:
if tags.getFullpath() == filename:
if tags.isFileModified():
self.tags_list.remove(tags)
break
else:
return
tags = lredit_tags.Tags(filename)
cancel_requested = [False]
def cancel():
cancel_requested[0] = True
tags.cancel()
self.setStatusMessage( ckit.strings["statusbar_tags_loading"] )
self.setProgressValue(None)
try:
self.subThreadCall( tags.parse, (), cancel, raise_error=True )
finally:
self.clearProgress()
if cancel_requested[0]:
self.setStatusMessage( ckit.strings["statusbar_aborted"], 3000 )
else:
self.setStatusMessage( ckit.strings["statusbar_tags_loaded"], 3000 )
if not cancel_requested[0]:
self.tags_list.insert( 0, tags )
## TAGSファイルをロードする
def command_LoadTags( self, info ):
if len(info.args)>=1:
filename = info.args[0]
else:
filename = self.inputFilename( "Tags", "", ensure_exists=True )
if not filename : return
self.loadTags(filename)
## TAGSにしたがってシンボルの定義位置にジャンプする
#
# 引数にシンボル名を渡すことができます。
# 引数を渡さなかった場合は、カーソル位置の単語をシンボル名として扱います。
#
def command_TagsJump( self, info ):
edit = self.activeEditPane().edit
if len(info.args):
symbol = info.args[0]
else:
if edit.selection.direction==0:
cursor = edit.selection.cursor()
right = cursor.wordRight(False)
left = right.wordLeft(False)
symbol = edit.getText( left, right )
else:
symbol = edit.getText( edit.selection.left(), edit.selection.right() )
if not symbol:
return
# プロジェクトファイルの隣のTAGSファイルをロードする
if self.project:
tags_filename = ckit.joinPath(os.path.dirname(self.project.filename),"tags")
try:
self.loadTags(tags_filename)
except IOError:
pass
for tags in self.tags_list:
tags_items = tags.find(symbol)
if tags_items:
break
else:
self.setStatusMessage( ckit.strings["statusbar_symbol_not_found"] % symbol, 3000, error=True )
return
if len(tags_items)>=2:
list_items = []
for item in tags_items:
name = "%s (%s)" % ( ckit.normPath(item[1]), item[2] )
name = ckit.adjustStringWidth( self, name, self.width()-5, align=ckit.ALIGN_LEFT, ellipsis=ckit.ELLIPSIS_MID )
name = name.rstrip()
list_items.append( ( name, item ) )
def onStatusMessage( width, select ):
return ""
pos = self.centerOfWindowInPixel()
list_window = lredit_listwindow.ListWindow( pos[0], pos[1], 5, 1, self.width()-5, self.height()-3, self, self.ini, True, "Symbols : [%s]" % symbol, list_items, initial_select=0, onekey_search=False, statusbar_handler=onStatusMessage )
self.enable(False)
list_window.messageLoop()
result = list_window.getResult()
self.enable(True)
self.activate()
list_window.destroy()
if result<0 : return
tags_item = list_items[result][1]
else:
tags_item = tags_items[0]
symbol2, filename, position = tags_item
# TagsJumpの履歴に現在位置を追加する
current = ( edit.doc.getFullpath(), edit.selection.cursor() )
self.tags_jump_history.append(current)
filename = ckit.joinPath( ckit.splitPath(tags.getFullpath())[0], filename )
self.activeOpen(filename=filename)
edit = self.activePane().edit
if isinstance( position, str ):
try:
search_object = ckit.Search( position, word=False, case=True, regex=False )
except re.error:
self.setStatusMessage( ckit.strings["statusbar_regex_wrong"] % position, 3000, error=True )
return
edit.setCursor( edit.pointDocumentBegin() )
edit.search( search_object=search_object, direction=1, select=False, hitmark=False )
if isinstance( position, int ):
point = edit.point(position-1)
point = max( point, edit.pointDocumentBegin() )
point = min( point, edit.pointDocumentEnd() )
edit.setCursor( point, make_visible=False, paint=False )
edit.makeVisible( point, jump_mode=True )
else:
return
## TAGSジャンプのジャンプ元に戻る
def command_TagsBack( self, info ):
if not self.tags_jump_history:
return
filename, point = self.tags_jump_history.pop()
self.activeOpen( filename=filename )
edit = self.activePane().edit
edit.setCursor( point, make_visible=False, paint=False )
edit.makeVisible( point, jump_mode=True )
## アウトライン解析結果をリスト表示
def command_Outline( self, info ):
edit = self.activeEditPane().edit
src_filename = edit.doc.getFullpath()
current_lineno = edit.selection.cursor().line
encoding = edit.doc.encoding
if src_filename==None:
return
if edit.doc.isModified():
src_filename = ckit.makeTempFile( "tags.src_", os.path.splitext(src_filename)[1] )
with open( src_filename, "wb" ) as fd:
edit.doc.writeFile(fd)
tags_filename = ckit.makeTempFile("tags_")
cmd = [ os.path.join( ckit.getAppExePath(), "bin/ctags.exe" ) ]
cmd += [ "-o", tags_filename ]
cmd += [ "--sort=no" ] # ソートしない
cmd += [ "--excmd=pattern" ] # 文字列による検索パターン
cmd += [ "--fields=mKnsStz" ] # http://ctags.sourceforge.net/ctags.html
cmd += [ src_filename ]
self.subProcessCall(cmd)
select = 0
def onStatusMessage( width, select ):
return ""
items = []
fd = open( tags_filename, "r", encoding=encoding.encoding, errors="ignore" )
lines = fd.readlines()
fd.close()
pattern_line = re.compile( '(.*)\t(.*)\t/\^(.*)\$/;".*line:([0-9]+).*' )
for line in lines:
result = pattern_line.match(line)
if result:
s = result.group(3)
s = ckit.removeBom(s)
s = ckit.expandTab( self, s, edit.doc.mode.tab_width )
lineno = int(result.group(4))-1
if lineno <= current_lineno:
select = len(items)
items.append( ( s, lineno ) )
pos = self.centerOfWindowInPixel()
list_window = lredit_listwindow.ListWindow( pos[0], pos[1], 20, 2, self.width()-5, self.height()-3, self, self.ini, True, "Outline", items, initial_select=select, onekey_search=False, statusbar_handler=onStatusMessage )
self.enable(False)
list_window.messageLoop()
result = list_window.getResult()
self.enable(True)
self.activate()
list_window.destroy()
if result<0 : return
edit.jumpLineNo( items[result][1] )
#--------------------------------------------------------------------
## @} mainwindow
| 1.851563 | 2 |
Pyro/ext/ServiceTest.py | irmen/Pyro3 | 3 | 12759232 | #############################################################################
#
# A test for the PyroNS_NTService program
# Author: <NAME> <EMAIL>
#
# This is part of "Pyro" - Python Remote Objects
# Which is (c) <NAME> - <EMAIL>
#
#############################################################################
import unittest
import win32serviceutil
import win32service
import time
import Pyro.nsc
ServiceName = 'PyroNS'
class Test(unittest.TestCase):
def setUp(self):
win32serviceutil.StartService(ServiceName)
def testStartPending(self):
svcType, svcState, svcControls, err, svcErr, svcCP, svcWH = \
win32serviceutil.QueryServiceStatus(ServiceName)
assert svcState & win32service.SERVICE_START_PENDING
def testFullyStarted(self):
self._waitForStarted()
svcType, svcState, svcControls, err, svcErr, svcCP, svcWH = \
win32serviceutil.QueryServiceStatus(ServiceName)
assert svcType & win32service.SERVICE_WIN32_OWN_PROCESS
assert svcState & win32service.SERVICE_RUNNING
assert svcControls & win32service.SERVICE_ACCEPT_STOP
def testStop(self):
self._waitForStarted()
svcType, svcState, svcControls, err, svcErr, svcCP, svcWH = \
win32serviceutil.StopService(ServiceName)
assert svcState & win32service.SERVICE_STOPPED
assert svcType & win32service.SERVICE_WIN32_OWN_PROCESS
def testNameserverAvailable(self):
self._waitForStarted()
ctrl = Pyro.nsc.PyroNSControl()
ctrl.args(None)
ctrl.ping()
def testNameserverShutdownFromNsc(self):
self._waitForStarted()
ctrl = Pyro.nsc.PyroNSControl()
ctrl.args(None)
ctrl.shutdown()
for each in range(100):
svcType, svcState, svcControls, err, svcErr, svcCP, svcWH = \
win32serviceutil.QueryServiceStatus(ServiceName)
if svcState & win32service.SERVICE_STOPPED:
return
time.sleep(0.20)
self.fail()
def tearDown(self):
for each in range(1000):
svcType, svcState, svcControls, err, svcErr, svcCP, svcWH = \
win32serviceutil.QueryServiceStatus(ServiceName)
if svcState & win32service.SERVICE_RUNNING:
svcType, svcState, svcControls, err, svcErr, svcCP, svcWH = \
win32serviceutil.StopService(ServiceName)
time.sleep(0.1)
elif svcState & win32service.SERVICE_STOPPED:
time.sleep(0.10)
break
else:
time.sleep(0.10)
assert svcState & win32service.SERVICE_STOPPED
time.sleep(3)
def _waitForStarted(self):
for each in range(100):
svcType, svcState, svcControls, err, svcErr, svcCP, svcWH = \
win32serviceutil.QueryServiceStatus(ServiceName)
if svcState & win32service.SERVICE_RUNNING:
break
else:
time.sleep(0.10)
if __name__ == '__main__':
unittest.main()
| 2.25 | 2 |
main.py | suhailvs/pyinventory | 6 | 12759233 | #!/usr/bin/python3
if __name__=="__main__":
# executable in ubuntu: https://stackoverflow.com/a/64641595/2351696
import models
from forms import home
models.create_tables_if_not_exist()
root=home.Tk()
root['bg']='black'
root.resizable(0,0)
frmmenu=home.FormMenu(root)
root.mainloop()
| 2.03125 | 2 |
text_extensions_for_pandas/array/tensor.py | Monireh2/text-extensions-for-pandas | 0 | 12759234 | <gh_stars>0
#
# Copyright (c) 2020 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# tensor.py
#
# Part of text_extensions_for_pandas
#
# Pandas extensions to support columns of N-dimensional tensors of equal shape.
#
from distutils.version import LooseVersion
import numbers
import os
from typing import *
import numpy as np
import pandas as pd
from pandas.compat import set_function_name
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.indexers import check_array_indexer, validate_indices
""" Begin Patching of ExtensionArrayFormatter """
from pandas.io.formats.format import ExtensionArrayFormatter
def _format_strings_patched(self) -> List[str]:
from pandas.core.construction import extract_array
from pandas.io.formats.format import format_array
if not isinstance(self.values, TensorArray):
return self._format_strings_orig()
values = extract_array(self.values, extract_numpy=True)
array = np.asarray(values)
if array.ndim == 1:
return self._format_strings_orig()
def format_array_wrap(array_, formatter_):
fmt_values = format_array(
array_,
formatter_,
float_format=self.float_format,
na_rep=self.na_rep,
digits=self.digits,
space=self.space,
justify=self.justify,
decimal=self.decimal,
leading_space=self.leading_space,
quoting=self.quoting,
)
return fmt_values
flat_formatter = self.formatter
if flat_formatter is None:
flat_formatter = values._formatter(boxed=True)
# Flatten array, call function, reshape (use ravel_compat in v1.3.0)
flat_array = array.ravel("K")
fmt_flat_array = np.asarray(
format_array_wrap(flat_array, flat_formatter))
order = "F" if array.flags.f_contiguous else "C"
fmt_array = fmt_flat_array.reshape(array.shape, order=order)
# Format the array of nested strings, use default formatter
return format_array_wrap(fmt_array, None)
def _format_strings_patched_v1_0_0(self) -> List[str]:
from functools import partial
from pandas.core.construction import extract_array
from pandas.io.formats.format import format_array
from pandas.io.formats.printing import pprint_thing
if not isinstance(self.values, TensorArray):
return self._format_strings_orig()
values = extract_array(self.values, extract_numpy=True)
array = np.asarray(values)
if array.ndim == 1:
return self._format_strings_orig()
def format_array_wrap(array_, formatter_):
fmt_values = format_array(
array_,
formatter_,
float_format=self.float_format,
na_rep=self.na_rep,
digits=self.digits,
space=self.space,
justify=self.justify,
decimal=self.decimal,
leading_space=self.leading_space,
)
return fmt_values
flat_formatter = self.formatter
if flat_formatter is None:
flat_formatter = values._formatter(boxed=True)
# Flatten array, call function, reshape (use ravel_compat in v1.3.0)
flat_array = array.ravel("K")
fmt_flat_array = np.asarray(
format_array_wrap(flat_array, flat_formatter))
order = "F" if array.flags.f_contiguous else "C"
fmt_array = fmt_flat_array.reshape(array.shape, order=order)
# Slimmed down version of GenericArrayFormatter due to pandas-dev GH#33770
def format_strings_slim(array_, leading_space):
formatter = partial(
pprint_thing,
escape_chars=("\t", "\r", "\n"),
)
def _format(x):
return str(formatter(x))
fmt_values = []
for v in array_:
tpl = "{v}" if leading_space is False else " {v}"
fmt_values.append(tpl.format(v=_format(v)))
return fmt_values
return format_strings_slim(fmt_array, self.leading_space)
_FORMATTER_ENABLED_KEY = "TEXT_EXTENSIONS_FOR_PANDAS_FORMATTER_ENABLED"
if os.getenv(_FORMATTER_ENABLED_KEY, "true").lower() == "true":
ExtensionArrayFormatter._format_strings_orig = \
ExtensionArrayFormatter._format_strings
if LooseVersion("1.1.0") <= LooseVersion(pd.__version__) < LooseVersion("1.3.0"):
ExtensionArrayFormatter._format_strings = _format_strings_patched
else:
ExtensionArrayFormatter._format_strings = _format_strings_patched_v1_0_0
ExtensionArrayFormatter._patched_by_text_extensions_for_pandas = True
""" End Patching of ExtensionArrayFormatter """
@pd.api.extensions.register_extension_dtype
class TensorDtype(pd.api.extensions.ExtensionDtype):
"""
Pandas data type for a column of tensors with the same shape.
"""
base = None
@property
def type(self):
"""The type for a single row of a TensorArray column."""
return TensorElement
@property
def name(self) -> str:
"""A string representation of the dtype."""
return "TensorDtype"
@classmethod
def construct_from_string(cls, string: str):
"""
See docstring in `ExtensionDType` class in `pandas/core/dtypes/base.py`
for information about this method.
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
# Upstream code uses exceptions as part of its normal control flow and
# will pass this method bogus class names.
if string == cls.__name__:
return cls()
else:
raise TypeError(
f"Cannot construct a '{cls.__name__}' from '{string}'")
@classmethod
def construct_array_type(cls):
"""
See docstring in `ExtensionDType` class in `pandas/core/dtypes/base.py`
for information about this method.
"""
return TensorArray
def __from_arrow__(self, extension_array):
from text_extensions_for_pandas.array.arrow_conversion import arrow_to_tensor_array
return arrow_to_tensor_array(extension_array)
class TensorOpsMixin(pd.api.extensions.ExtensionScalarOpsMixin):
"""
Mixin to provide operators on underlying ndarray.
TODO: would be better to derive from ExtensionOpsMixin, but not available
"""
@classmethod
def _create_method(cls, op, coerce_to_dtype=True, result_dtype=None):
# NOTE: this overrides, but coerce_to_dtype, result_dtype might not be needed
def _binop(self, other):
lvalues = self._tensor
if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)):
# Rely on pandas to unbox and dispatch to us.
return NotImplemented
# divmod returns a tuple
if op_name in ["__divmod__", "__rdivmod__"]:
# TODO: return tuple
# div, mod = result
raise NotImplementedError
if isinstance(other, (TensorArray, TensorElement)):
rvalues = other._tensor
else:
rvalues = other
result = op(lvalues, rvalues)
# Force a TensorArray if rvalue is not a scalar
if isinstance(self, TensorElement) and \
(not isinstance(other, TensorElement) or not np.isscalar(other)):
result_wrapped = TensorArray(result)
else:
result_wrapped = cls(result)
return result_wrapped
op_name = f"__{op.__name__}__"
return set_function_name(_binop, op_name, cls)
class TensorElement(TensorOpsMixin):
"""
Class representing a single element in a TensorArray, or row in a Pandas column of dtype
TensorDtype. This is a light wrapper over a numpy.ndarray
"""
def __init__(self, values: np.ndarray):
"""
Construct a TensorElement from an numpy.ndarray.
:param values: tensor values for this instance.
"""
self._tensor = values
def __repr__(self):
return self._tensor.__repr__()
def __str__(self):
return self._tensor.__str__()
def to_numpy(self):
"""
Return the values of this element as a numpy.ndarray
:return: numpy.ndarray
"""
return np.asarray(self._tensor)
def __array__(self):
return np.asarray(self._tensor)
class TensorArray(pd.api.extensions.ExtensionArray, TensorOpsMixin):
"""
A Pandas `ExtensionArray` that represents a column of `numpy.ndarray`s,
or tensors, where the outer dimension is the count of tensors in the column.
Each tensor must have the same shape.
"""
def __init__(self, values: Union[np.ndarray, Sequence[Union[np.ndarray, TensorElement]],
TensorElement, Any]):
"""
:param values: A `numpy.ndarray` or sequence of `numpy.ndarray`s of equal shape.
"""
if isinstance(values, np.ndarray):
if values.dtype.type is np.object_ and len(values) > 0 and \
isinstance(values[0], TensorElement):
self._tensor = np.array([np.asarray(v) for v in values])
else:
self._tensor = values
elif isinstance(values, Sequence):
if len(values) == 0:
self._tensor = np.array([])
else:
self._tensor = np.stack([np.asarray(v) for v in values], axis=0)
elif isinstance(values, TensorElement):
self._tensor = np.array([np.asarray(values)])
elif np.isscalar(values):
# `values` is a single element: pd.Series(np.nan, index=[1, 2, 3], dtype=TensorDtype())
self._tensor = np.array([values])
elif isinstance(values, TensorArray):
raise TypeError("Use the copy() method to create a copy of a TensorArray")
else:
raise TypeError(f"Expected a numpy.ndarray or sequence of numpy.ndarray, "
f"but received {values} "
f"of type '{type(values)}' instead.")
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
if copy and isinstance(scalars, np.ndarray):
scalars = scalars.copy()
elif isinstance(scalars, TensorArray):
scalars = scalars._tensor.copy() if copy else scalars._tensor
return TensorArray(scalars)
@classmethod
def _from_factorized(cls, values, original):
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
raise NotImplementedError
@classmethod
def _concat_same_type(
cls, to_concat: Sequence["TensorArray"]
) -> "TensorArray":
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
return TensorArray(np.concatenate([a._tensor for a in to_concat]))
def isna(self) -> np.array:
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
if self._tensor.dtype.type is np.object_:
return self._tensor == None
elif self._tensor.dtype.type is np.str_:
return np.all(self._tensor == "", axis=-1)
else:
return np.all(np.isnan(self._tensor), axis=-1)
def copy(self) -> "TensorArray":
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
ret = TensorArray(
self._tensor.copy(),
)
# TODO: Copy cached properties too
return ret
def take(
self, indices: Sequence[int], allow_fill: bool = False,
fill_value: Any = None
) -> "TensorArray":
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
if allow_fill:
# From API docs: "[If allow_fill == True, then] negative values in
# `indices` indicate missing values and are set to `fill_value`
indices = np.asarray(indices, dtype=np.intp)
validate_indices(indices, len(self._tensor))
# Check if there are missing indices to fill, if not can use numpy take below
has_missing = np.any(indices < 0)
if has_missing:
if fill_value is None:
fill_value = np.nan
# Create an array populated with fill value
values = np.full((len(indices),) + self._tensor.shape[1:], fill_value)
# Iterate over each index and set non-missing elements
for i, idx in enumerate(indices):
if idx >= 0:
values[i] = self._tensor[idx]
return TensorArray(values)
# Delegate take to numpy array
values = self._tensor.take(indices, axis=0)
return TensorArray(values)
@property
def dtype(self) -> pd.api.extensions.ExtensionDtype:
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
return TensorDtype()
@property
def inferred_type(self) -> str:
"""
Return string describing type of TensorArray. Delegates to
`pandas.api.types.infer_dtype`. See docstring for more information.
:return: string describing numpy type of this TensorArray
"""
return pd.api.types.infer_dtype(self._tensor)
@property
def nbytes(self) -> int:
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
return self._tensor.nbytes
def to_numpy(self, dtype=None, copy=False, na_value=pd.api.extensions.no_default):
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
if dtype is not None:
dtype = pd.api.types.pandas_dtype(dtype)
if copy:
values = np.array(self._tensor, dtype=dtype, copy=True)
else:
values = self._tensor.astype(dtype)
elif copy:
values = self._tensor.copy()
else:
values = self._tensor
return values
@property
def numpy_dtype(self):
"""
Get the dtype of the tensor.
:return: The numpy dtype of the backing ndarray
"""
return self._tensor.dtype
@property
def numpy_ndim(self):
"""
Get the number of tensor dimensions.
:return: integer for the number of dimensions
"""
return self._tensor.ndim
@property
def numpy_shape(self):
"""
Get the shape of the tensor.
:return: A tuple of integers for the numpy shape of the backing ndarray
"""
return self._tensor.shape
def astype(self, dtype, copy=True):
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
dtype = pd.api.types.pandas_dtype(dtype)
if isinstance(dtype, TensorDtype):
values = TensorArray(self._tensor.copy()) if copy else self
elif not pd.api.types.is_object_dtype(dtype) and \
pd.api.types.is_string_dtype(dtype):
values = np.array([str(t) for t in self._tensor])
if isinstance(dtype, pd.StringDtype):
return dtype.construct_array_type()._from_sequence(values, copy=False)
else:
return values
else:
values = self._tensor.astype(dtype, copy=copy)
return values
def any(self, axis=None, out=None, keepdims=False):
"""
Test whether any array element along a given axis evaluates to True.
See numpy.any() documentation for more information
https://numpy.org/doc/stable/reference/generated/numpy.any.html#numpy.any
:param axis: Axis or axes along which a logical OR reduction is performed.
:param out: Alternate output array in which to place the result.
:param keepdims: If this is set to True, the axes which are reduced are left in the
result as dimensions with size one.
:return: single boolean unless axis is not None else TensorArray
"""
result = self._tensor.any(axis=axis, out=out, keepdims=keepdims)
return result if axis is None else TensorArray(result)
def all(self, axis=None, out=None, keepdims=False):
"""
Test whether all array elements along a given axis evaluate to True.
:param axis: Axis or axes along which a logical AND reduction is performed.
:param out: Alternate output array in which to place the result.
:param keepdims: If this is set to True, the axes which are reduced are left in the
result as dimensions with size one.
:return: single boolean unless axis is not None else TensorArray
"""
result = self._tensor.all(axis=axis, out=out, keepdims=keepdims)
return result if axis is None else TensorArray(result)
def __len__(self) -> int:
return len(self._tensor)
def __getitem__(self, item) -> Union["TensorArray", "TensorElement"]:
"""
See docstring in `Extension Array` class in `pandas/core/arrays/base.py`
for information about this method.
"""
# Return scalar if single value is selected, a TensorElement for single array element,
# or TensorArray for slice
if isinstance(item, int):
value = self._tensor[item]
if np.isscalar(value):
return value
else:
return TensorElement(value)
else:
if isinstance(item, TensorArray):
item = np.asarray(item)
item = check_array_indexer(self, item)
return TensorArray(self._tensor[item])
def __setitem__(self, key: Union[int, np.ndarray], value: Any) -> None:
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
key = check_array_indexer(self, key)
if isinstance(value, TensorElement) or np.isscalar(value):
value = np.asarray(value)
if isinstance(value, list):
value = [np.asarray(v) if isinstance(v, TensorElement) else v for v in value]
if isinstance(value, ABCSeries) and isinstance(value.dtype, TensorDtype):
value = value.values
if value is None or isinstance(value, Sequence) and len(value) == 0:
nan_fill = np.full_like(self._tensor[key], np.nan)
self._tensor[key] = nan_fill
elif isinstance(key, (int, slice, np.ndarray)):
self._tensor[key] = value
else:
raise NotImplementedError(f"__setitem__ with key type '{type(key)}' "
f"not implemented")
def __contains__(self, item) -> bool:
if isinstance(item, TensorElement):
npitem = np.asarray(item)
if npitem.size == 1 and np.isnan(npitem).all():
return self.isna().any()
return super().__contains__(item)
def __repr__(self):
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
return self._tensor.__repr__()
def __str__(self):
return self._tensor.__str__()
def _values_for_factorize(self) -> Tuple[np.ndarray, Any]:
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
# TODO return self._tensor, np.nan
raise NotImplementedError
def _reduce(self, name, skipna=True, **kwargs):
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
if name == "sum":
return TensorElement(np.sum(self._tensor, axis=0))
elif name == "all":
return TensorElement(np.all(self._tensor, axis=0))
elif name == "any":
return TensorElement(np.any(self._tensor, axis=0))
else:
raise NotImplementedError(f"'{name}' aggregate not implemented.")
def __array__(self, dtype=None):
"""
Interface to return the backing tensor as a numpy array with optional dtype.
If dtype is not None, then the tensor will be casted to that type, otherwise this is a no-op.
"""
return np.asarray(self._tensor, dtype=dtype)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
"""
Interface to handle numpy ufuncs that will accept TensorArray as input, and wrap the output
back as another TensorArray.
"""
out = kwargs.get('out', ())
for x in inputs + out:
if not isinstance(x, (TensorArray, np.ndarray, numbers.Number)):
return NotImplemented
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(x._tensor if isinstance(x, TensorArray) else x
for x in inputs)
if out:
kwargs['out'] = tuple(
x._tensor if isinstance(x, TensorArray) else x
for x in out)
result = getattr(ufunc, method)(*inputs, **kwargs)
if type(result) is tuple:
# multiple return values
return tuple(type(self)(x) for x in result)
elif method == 'at':
# no return value
return None
else:
# one return value
return type(self)(result)
def __arrow_array__(self, type=None):
from text_extensions_for_pandas.array.arrow_conversion import ArrowTensorArray
return ArrowTensorArray.from_numpy(self._tensor)
# Add operators from the mixin to the class
TensorElement._add_arithmetic_ops()
TensorElement._add_comparison_ops()
TensorArray._add_arithmetic_ops()
TensorArray._add_comparison_ops()
| 1.898438 | 2 |
Pacote de Conteudo/Mundo 02/Exercicios/ex045.py | Michelle-On/ExerciciosPython_2020-21 | 0 | 12759235 | <reponame>Michelle-On/ExerciciosPython_2020-21
from random import randint
from time import sleep
print('-=-'*5)
print(' JOKENPO')
print('-=-'*5)
ops = ('Pedra','Papel','Tesoura')
print('\033[1;36mSuas opções:\033[m')
print('[ 0 ] PEDRA')
print('[ 1 ] PAPEL')
print('[ 2 ] TESOURA')
j1=int(input('Qual é a sua \033[1;32mjogada\033[m? '))
jc= randint(0,2)
print('\033[1;35mJO')
sleep(1)
print('KEN')
sleep(1)
print('PO!!\033[m')
sleep(1)
print('-='*15)
print('O computador escolheu \033[1;31m{}\033[m'.format(ops[jc]))
print('O jogador escolheu \033[1;31m{}\033[m'.format(ops[j1]))
print('-='*15)
if jc == 0:
if j1 == 0:
print('\033[1;33mEMPATE\033[m')
elif j1 == 1:
print('\033[1;32mO JOGADOR VENCEU!\033[m')
elif j1 == 2:
print('\033[1;31mO COMPUTADOR VENCEU\03[m')
elif jc == 1:
if j1 == 0:
print('\033[1;31mO COMPUTADOR VENCEU\033[m')
if j1 == 1:
print('\033[1;33mEMPATE\033[m')
if j1 == 2:
print('\033[1;32mO JOGADOR VENCEU\033[m')
elif jc == 2:
if j1 == 0:
print('\033[1;32mO JOGADOR VENCEU\033[m')
if j1 == 1:
print('\033[1;31mO COMPUTADOR VENCEU\033[m')
if j1 == 2:
print('\033[1;33mEMPATE\033[m')
else:
print('\033[1;31mJOGADA INVALIDA\033[m')
| 3.5625 | 4 |
lib/addr/AddressStandardizationSolution.py | apilloud/supportertracker | 0 | 12759236 | <filename>lib/addr/AddressStandardizationSolution.py<gh_stars>0
import re
#**
#* A python port of the
#* Address Standardization Solution, PHP Edition.
#*
#* Address Standardization Solution is a trademark of The Analysis
#* and Solutions Company.
#*
#* @package AddressStandardizationSolution
#* @author <NAME> <<EMAIL>>
#* @copyright The Analysis and Solutions Company, 2001-2010
#* @license http:#www.analysisandsolutions.com/software/license.htm Simple Public License
#* @link http:#www.analysisandsolutions.com/software/addr/addr.htm
#*
#**
#* Formats a Delivery Address Line according to the United States Postal
#* Service's Addressing Standards
#*
#*
#* @package AddressStandardizationSolution
#* @author <NAME> <<EMAIL>>
#* @copyright The Analysis and Solutions Company, 2001-2010
#* @license http:#www.analysisandsolutions.com/software/license.htm Simple Public License
#* @link http:#www.analysisandsolutions.com/software/addr/addr.htm
#*
class AddressStandardizationSolution:
def __init__(self):
# An array with compass directions as keys and abbreviations as values
self.directionals = {
'E': 'E',
'EAST': 'E',
'E-R': 'EAST',
'N': 'N',
'NO': 'N',
'NORTH': 'N',
'N-R': 'NORTH',
'NE': 'NE',
'NORTHEAST': 'NE',
'NE-R': 'NORTHEAST',
'NORTHWEST': 'NW',
'NW-R': 'NORTHWEST',
'NW': 'NW',
'S': 'S',
'SO': 'S',
'SOUTH': 'S',
'S-R': 'SOUTH',
'SE': 'SE',
'SOUTHEAST': 'SE',
'SE-R': 'SOUTHEAST',
'SOUTHWEST': 'SW',
'SW-R': 'SOUTHWEST',
'SW': 'SW',
'W': 'W',
'WEST': 'W',
'W-R': 'WEST',
}
# An array with room types as keys and abbreviations as values
self.identifiers = {
'APARTMENT': 'APT',
'APT-R': 'APARTMENT',
'APT': 'APT',
'BLDG': 'BLDG',
'BUILDING': 'BLDG',
'BLDG-R': 'BUILDING',
'BOX': 'BOX',
'BOX-R': 'BOX',
'BASEMENT': 'BSMT',
'BSMT-R': 'BASEMENT',
'BSMT': 'BSMT',
'DEPARTMENT': 'DEPT',
'DEPT-R': 'DEPARTMENT',
'DEPT': 'DEPT',
'FL': 'FL',
'FLOOR': 'FL',
'FL-R': 'FLOOR',
'FRNT': 'FRNT',
'FRONT': 'FRNT',
'FRNT-R': 'FRONT',
'HANGER': 'HNGR',
'HNGR-R': 'HANGER',
'HNGR': 'HNGR',
'KEY': 'KEY',
'KEY-R': 'KEY',
'LBBY': 'LBBY',
'LOBBY': 'LBBY',
'LBBY-R': 'LOBBY',
'LOT': 'LOT',
'LOT-R': 'LOT',
'LOWER': 'LOWR',
'LOWR-R': 'LOWER',
'LOWR': 'LOWR',
'OFC': 'OFC',
'OFFICE': 'OFC',
'OFC-R': 'OFFICE',
'PENTHOUSE': 'PH',
'PH-R': 'PENTHOUSE',
'PH': 'PH',
'PIER': 'PIER',
'PIER-R': 'PIER',
'PMB': 'PMB',
'PMB-R': 'PMB',
'REAR': 'REAR',
'REAR-R': 'REAR',
'RM': 'RM',
'ROOM': 'RM',
'RM-R': 'ROOM',
'SIDE': 'SIDE',
'SIDE-R': 'SIDE',
'SLIP': 'SLIP',
'SLIP-R': 'SLIP',
'SPACE': 'SPC',
'SPC-R': 'SPACE',
'SPC': 'SPC',
'STE': 'STE',
'SUITE': 'STE',
'STE-R': 'SUITE',
'MAILSTOP': 'STOP',
'STOP': 'STOP',
'STOP-R': 'STOP',
'TRAILER': 'TRLR',
'TRLR-R': 'TRAILER',
'TRLR': 'TRLR',
'UNIT': 'UNIT',
'UNIT-R': 'UNIT',
'UPPER': 'UPPR',
'UPPR-R': 'UPPER',
'UPPR': 'UPPR',
'UPR': 'UPPR',
'#-R': '#',
'#': '#',
}
# An array with numeric words as keys and numbers as values
self.numbers = {
'FIRST': '1',
'ONE': '1',
'TEN': '10',
'TENTH': '10',
'ELEVEN': '11',
'ELEVENTH': '11',
'TWELFTH': '12',
'TWELVE': '12',
'THIRTEEN': '13',
'THIRTEENTH': '13',
'FOURTEEN': '14',
'FOURTEENTH': '14',
'FIFTEEN': '15',
'FIFTEENTH': '15',
'SIXTEEN': '16',
'SIXTEENTH': '16',
'SEVENTEEN': '17',
'SEVENTEENTH': '17',
'EIGHTEEN': '18',
'EIGHTEENTH': '18',
'NINETEEN': '19',
'NINETEENTH': '19',
'SECOND': '2',
'TWO': '2',
'TWENTIETH': '20',
'TWENTY': '20',
'THIRD': '3',
'THREE': '3',
'FOUR': '4',
'FOURTH': '4',
'FIFTH': '5',
'FIVE': '5',
'SIX': '6',
'SIXTH': '6',
'SEVEN': '7',
'SEVENTH': '7',
'EIGHT': '8',
'EIGHTH': '8',
'NINE': '9',
'NINTH': '9',
}
# An array with state names as keys and abbreviations as values
self.states = {
'ARMED FORCES AMERICA': 'AA',
'ARMED FORCES EUROPE': 'AE',
'ALASKA': 'AK',
'ALABAMA': 'AL',
'ARMED FORCES PACIFIC': 'AP',
'ARKANSAS': 'AR',
'ARIZONA': 'AZ',
'CALIFORNIA': 'CA',
'COLORADO': 'CO',
'CONNECTICUT': 'CT',
'DISTRICT OF COLUMBIA': 'DC',
'DELAWARE': 'DE',
'FLORIDA': 'FL',
'GEORGIA': 'GA',
'HAWAII': 'HI',
'IOWA': 'IA',
'IDAHO': 'ID',
'ILLINOIS': 'IL',
'INDIANA': 'IN',
'KANSAS': 'KS',
'KENTUCKY': 'KY',
'LOUISIANA': 'LA',
'MASSACHUSETTS': 'MA',
'MARYLAND': 'MD',
'MAINE': 'ME',
'MICHIGAN': 'MI',
'MINNESOTA': 'MN',
'MISSOURI': 'MO',
'MISSISSIPPI': 'MS',
'MONTANA': 'MT',
'NORTH CAROLINA': 'NC',
'NORTH DAKOTA': 'ND',
'NEBRASKA': 'NE',
'NEW HAMPSHIRE': 'NH',
'NEW JERSEY': 'NJ',
'NEW MEXICO': 'NM',
'NEVADA': 'NV',
'NEW YORK': 'NY',
'OHIO': 'OH',
'OKLAHOMA': 'OK',
'OREGON': 'OR',
'PENNSYLVANIA': 'PA',
'RHODE ISLAND': 'RI',
'SOUTH CAROLINA': 'SC',
'SOUTH DAKOTA': 'SD',
'TENNESSEE': 'TN',
'TEXAS': 'TX',
'UTAH': 'UT',
'VIRGINIA': 'VA',
'VERMONT': 'VT',
'WASHINGTON': 'WA',
'WISCONSIN': 'WI',
'WEST VIRGINIA': 'WV',
'WYOMING': 'WY',
}
# An array with street types as keys and abbreviations as values
self.suffixes = {
'ALLEE': 'ALY',
'ALLEY': 'ALY',
'ALY-R': 'ALLEY',
'ALLY': 'ALY',
'ALY': 'ALY',
'ANEX': 'ANX',
'ANNEX': 'ANX',
'ANX-R': 'ANNEX',
'ANNX': 'ANX',
'ANX': 'ANX',
'ARC': 'ARC',
'ARCADE': 'ARC',
'ARC-R': 'ARCADE',
'AV': 'AVE',
'AVE': 'AVE',
'AVEN': 'AVE',
'AVENU': 'AVE',
'AVENUE': 'AVE',
'AVE-R': 'AVENUE',
'AVN': 'AVE',
'AVNUE': 'AVE',
'BCH': 'BCH',
'BEACH': 'BCH',
'BCH-R': 'BEACH',
'BG': 'BG',
'BURG': 'BG',
'BG-R': 'BURG',
'BGS': 'BGS',
'BURGS': 'BGS',
'BGS-R': 'BURGS',
'BLF': 'BLF',
'BLUF': 'BLF',
'BLUFF': 'BLF',
'BLF-R': 'BLUFF',
'BLFS': 'BLFS',
'BLUFFS': 'BLFS',
'BLFS-R': 'BLUFFS',
'BLVD': 'BLVD',
'BLVRD': 'BLVD',
'BOUL': 'BLVD',
'BOULEVARD': 'BLVD',
'BLVD-R': 'BOULEVARD',
'BOULOVARD': 'BLVD',
'BOULV': 'BLVD',
'BOULVRD': 'BLVD',
'BULAVARD': 'BLVD',
'BULEVARD': 'BLVD',
'BULLEVARD': 'BLVD',
'BULOVARD': 'BLVD',
'BULVD': 'BLVD',
'BEND': 'BND',
'BND-R': 'BEND',
'BND': 'BND',
'BR': 'BR',
'BRANCH': 'BR',
'BR-R': 'BRANCH',
'BRNCH': 'BR',
'BRDGE': 'BRG',
'BRG': 'BRG',
'BRGE': 'BRG',
'BRIDGE': 'BRG',
'BRG-R': 'BRIDGE',
'BRK': 'BRK',
'BROOK': 'BRK',
'BRK-R': 'BROOK',
'BRKS': 'BRKS',
'BROOKS': 'BRKS',
'BRKS-R': 'BROOKS',
'BOT': 'BTM',
'BOTTM': 'BTM',
'BOTTOM': 'BTM',
'BTM-R': 'BOTTOM',
'BTM': 'BTM',
'BYP': 'BYP',
'BYPA': 'BYP',
'BYPAS': 'BYP',
'BYPASS': 'BYP',
'BYP-R': 'BYPASS',
'BYPS': 'BYP',
'BAYOO': 'BYU',
'BAYOU': 'BYU',
'BYU-R': 'BAYOU',
'BYO': 'BYU',
'BYOU': 'BYU',
'BYU': 'BYU',
'CIR': 'CIR',
'CIRC': 'CIR',
'CIRCEL': 'CIR',
'CIRCL': 'CIR',
'CIRCLE': 'CIR',
'CIR-R': 'CIRCLE',
'CRCL': 'CIR',
'CRCLE': 'CIR',
'CIRCELS': 'CIRS',
'CIRCLES': 'CIRS',
'CIRS-R': 'CIRCLES',
'CIRCLS': 'CIRS',
'CIRCS': 'CIRS',
'CIRS': 'CIRS',
'CRCLES': 'CIRS',
'CRCLS': 'CIRS',
'CLB': 'CLB',
'CLUB': 'CLB',
'CLB-R': 'CLUB',
'CLF': 'CLF',
'CLIF': 'CLF',
'CLIFF': 'CLF',
'CLF-R': 'CLIFF',
'CLFS': 'CLFS',
'CLIFFS': 'CLFS',
'CLFS-R': 'CLIFFS',
'CLIFS': 'CLFS',
'CMN': 'CMN',
'COMMON': 'CMN',
'CMN-R': 'COMMON',
'COMN': 'CMN',
'COR': 'COR',
'CORN': 'COR',
'CORNER': 'COR',
'COR-R': 'CORNER',
'CRNR': 'COR',
'CORNERS': 'CORS',
'CORS-R': 'CORNERS',
'CORNRS': 'CORS',
'CORS': 'CORS',
'CRNRS': 'CORS',
'CAMP': 'CP',
'CP-R': 'CAMP',
'CMP': 'CP',
'CP': 'CP',
'CAPE': 'CPE',
'CPE-R': 'CAPE',
'CPE': 'CPE',
'CRECENT': 'CRES',
'CRES': 'CRES',
'CRESCENT': 'CRES',
'CRES-R': 'CRESCENT',
'CRESENT': 'CRES',
'CRSCNT': 'CRES',
'CRSENT': 'CRES',
'CRSNT': 'CRES',
'CK': 'CRK',
'CR': 'CRK',
'CREEK': 'CRK',
'CRK-R': 'CREEK',
'CREK': 'CRK',
'CRK': 'CRK',
'COARSE': 'CRSE',
'COURSE': 'CRSE',
'CRSE-R': 'COURSE',
'CRSE': 'CRSE',
'CREST': 'CRST',
'CRST-R': 'CREST',
'CRST': 'CRST',
'CAUSEWAY': 'CSWY',
'CSWY-R': 'CAUSEWAY',
'CAUSEWY': 'CSWY',
'CAUSWAY': 'CSWY',
'CAUSWY': 'CSWY',
'CSWY': 'CSWY',
'CORT': 'CT',
'COURT': 'CT',
'CT-R': 'COURT',
'CRT': 'CT',
'CT': 'CT',
'CEN': 'CTR',
'CENT': 'CTR',
'CENTER': 'CTR',
'CTR-R': 'CENTER',
'CENTR': 'CTR',
'CENTRE': 'CTR',
'CNTER': 'CTR',
'CNTR': 'CTR',
'CTR': 'CTR',
'CENS': 'CTRS',
'CENTERS': 'CTRS',
'CTRS-R': 'CENTERS',
'CENTRES': 'CTRS',
'CENTRS': 'CTRS',
'CENTS': 'CTRS',
'CNTERS': 'CTRS',
'CNTRS': 'CTRS',
'CTRS': 'CTRS',
'COURTS': 'CTS',
'CTS-R': 'COURTS',
'CTS': 'CTS',
'CRV': 'CURV',
'CURV': 'CURV',
'CURVE': 'CURV',
'CURV-R': 'CURVE',
'COV': 'CV',
'COVE': 'CV',
'CV-R': 'COVE',
'CV': 'CV',
'COVES': 'CVS',
'CVS-R': 'COVES',
'COVS': 'CVS',
'CVS': 'CVS',
'CAN': 'CYN',
'CANYN': 'CYN',
'CANYON': 'CYN',
'CYN-R': 'CANYON',
'CNYN': 'CYN',
'CYN': 'CYN',
'DAL': 'DL',
'DALE': 'DL',
'DL-R': 'DALE',
'DL': 'DL',
'DAM': 'DM',
'DM-R': 'DAM',
'DM': 'DM',
'DR': 'DR',
'DRIV': 'DR',
'DRIVE': 'DR',
'DR-R': 'DRIVE',
'DRV': 'DR',
'DRIVES': 'DRS',
'DRS-R': 'DRIVES',
'DRIVS': 'DRS',
'DRS': 'DRS',
'DRVS': 'DRS',
'DIV': 'DV',
'DIVD': 'DV',
'DIVID': 'DV',
'DIVIDE': 'DV',
'DV-R': 'DIVIDE',
'DV': 'DV',
'DVD': 'DV',
'EST': 'EST',
'ESTA': 'EST',
'ESTATE': 'EST',
'EST-R': 'ESTATE',
'ESTAS': 'ESTS',
'ESTATES': 'ESTS',
'ESTS-R': 'ESTATES',
'ESTS': 'ESTS',
'EXP': 'EXPY',
'EXPR': 'EXPY',
'EXPRESS': 'EXPY',
'EXPRESSWAY': 'EXPY',
'EXPY-R': 'EXPRESSWAY',
'EXPRESWAY': 'EXPY',
'EXPRSWY': 'EXPY',
'EXPRWY': 'EXPY',
'EXPW': 'EXPY',
'EXPWY': 'EXPY',
'EXPY': 'EXPY',
'EXWAY': 'EXPY',
'EXWY': 'EXPY',
'EXT': 'EXT',
'EXTEN': 'EXT',
'EXTENSION': 'EXT',
'EXT-R': 'EXTENSION',
'EXTENSN': 'EXT',
'EXTN': 'EXT',
'EXTNSN': 'EXT',
'EXTENS': 'EXTS',
'EXTENSIONS': 'EXTS',
'EXTS-R': 'EXTENSIONS',
'EXTENSNS': 'EXTS',
'EXTNS': 'EXTS',
'EXTNSNS': 'EXTS',
'EXTS': 'EXTS',
'FAL': 'FALL',
'FALL': 'FALL',
'FALL-R': 'FALL',
'FIELD': 'FLD',
'FLD-R': 'FIELD',
'FLD': 'FLD',
'FIELDS': 'FLDS',
'FLDS-R': 'FIELDS',
'FLDS': 'FLDS',
'FALLS': 'FLS',
'FLS-R': 'FALLS',
'FALS': 'FLS',
'FLS': 'FLS',
'FLAT': 'FLT',
'FLT-R': 'FLAT',
'FLT': 'FLT',
'FLATS': 'FLTS',
'FLTS-R': 'FLATS',
'FLTS': 'FLTS',
'FORD': 'FRD',
'FRD-R': 'FORD',
'FRD': 'FRD',
'FORDS': 'FRDS',
'FRDS-R': 'FORDS',
'FRDS': 'FRDS',
'FORG': 'FRG',
'FORGE': 'FRG',
'FRG-R': 'FORGE',
'FRG': 'FRG',
'FORGES': 'FRGS',
'FRGS-R': 'FORGES',
'FRGS': 'FRGS',
'FORK': 'FRK',
'FRK-R': 'FORK',
'FRK': 'FRK',
'FORKS': 'FRKS',
'FRKS-R': 'FORKS',
'FRKS': 'FRKS',
'FOREST': 'FRST',
'FRST-R': 'FOREST',
'FORESTS': 'FRST',
'FORREST': 'FRST',
'FORRESTS': 'FRST',
'FORRST': 'FRST',
'FORRSTS': 'FRST',
'FORST': 'FRST',
'FORSTS': 'FRST',
'FRRESTS': 'FRST',
'FRRST': 'FRST',
'FRRSTS': 'FRST',
'FRST': 'FRST',
'FERRY': 'FRY',
'FRY-R': 'FERRY',
'FERY': 'FRY',
'FRRY': 'FRY',
'FRY': 'FRY',
'FORT': 'FT',
'FT-R': 'FORT',
'FRT': 'FT',
'FT': 'FT',
'FREEWAY': 'FWY',
'FWY-R': 'FREEWAY',
'FREEWY': 'FWY',
'FREWAY': 'FWY',
'FREWY': 'FWY',
'FRWAY': 'FWY',
'FRWY': 'FWY',
'FWY': 'FWY',
'GARDEN': 'GDN',
'GDN-R': 'GARDEN',
'GARDN': 'GDN',
'GDN': 'GDN',
'GRDEN': 'GDN',
'GRDN': 'GDN',
'GARDENS': 'GDNS',
'GDNS-R': 'GARDENS',
'GARDNS': 'GDNS',
'GDNS': 'GDNS',
'GRDENS': 'GDNS',
'GRDNS': 'GDNS',
'GLEN': 'GLN',
'GLN-R': 'GLEN',
'GLENN': 'GLN',
'GLN': 'GLN',
'GLENNS': 'GLNS',
'GLENS': 'GLNS',
'GLNS-R': 'GLENS',
'GLNS': 'GLNS',
'GREEN': 'GRN',
'GRN-R': 'GREEN',
'GREN': 'GRN',
'GRN': 'GRN',
'GREENS': 'GRNS',
'GRNS-R': 'GREENS',
'GRENS': 'GRNS',
'GRNS': 'GRNS',
'GROV': 'GRV',
'GROVE': 'GRV',
'GRV-R': 'GROVE',
'GRV': 'GRV',
'GROVES': 'GRVS',
'GRVS-R': 'GROVES',
'GROVS': 'GRVS',
'GRVS': 'GRVS',
'GATEWAY': 'GTWY',
'GTWY-R': 'GATEWAY',
'GATEWY': 'GTWY',
'GATWAY': 'GTWY',
'GTWAY': 'GTWY',
'GTWY': 'GTWY',
'HARB': 'HBR',
'HARBOR': 'HBR',
'HBR-R': 'HARBOR',
'HARBR': 'HBR',
'HBR': 'HBR',
'HRBOR': 'HBR',
'HARBORS': 'HBRS',
'HBRS-R': 'HARBORS',
'HBRS': 'HBRS',
'HILL': 'HL',
'HL-R': 'HILL',
'HL': 'HL',
'HILLS': 'HLS',
'HLS-R': 'HILLS',
'HLS': 'HLS',
'HLLW': 'HOLW',
'HLLWS': 'HOLW',
'HOLLOW': 'HOLW',
'HOLW-R': 'HOLLOW',
'HOLLOWS': 'HOLW',
'HOLOW': 'HOLW',
'HOLOWS': 'HOLW',
'HOLW': 'HOLW',
'HOLWS': 'HOLW',
'HEIGHT': 'HTS',
'HEIGHTS': 'HTS',
'HTS-R': 'HEIGHTS',
'HGTS': 'HTS',
'HT': 'HTS',
'HTS': 'HTS',
'HAVEN': 'HVN',
'HVN-R': 'HAVEN',
'HAVN': 'HVN',
'HVN': 'HVN',
'HIGHWAY': 'HWY',
'HWY-R': 'HIGHWAY',
'HIGHWY': 'HWY',
'HIWAY': 'HWY',
'HIWY': 'HWY',
'HWAY': 'HWY',
'HWY': 'HWY',
'HYGHWAY': 'HWY',
'HYWAY': 'HWY',
'HYWY': 'HWY',
'INLET': 'INLT',
'INLT-R': 'INLET',
'INLT': 'INLT',
'ILAND': 'IS',
'ILND': 'IS',
'IS': 'IS',
'ISLAND': 'IS',
'IS-R': 'ISLAND',
'ISLND': 'IS',
'ILE': 'ISLE',
'ISLE': 'ISLE',
'ISLE-R': 'ISLE',
'ISLES': 'ISLE',
'ILANDS': 'ISS',
'ILNDS': 'ISS',
'ISLANDS': 'ISS',
'ISS-R': 'ISLANDS',
'ISLDS': 'ISS',
'ISLNDS': 'ISS',
'ISS': 'ISS',
'JCT': 'JCT',
'JCTION': 'JCT',
'JCTN': 'JCT',
'JUNCTION': 'JCT',
'JCT-R': 'JUNCTION',
'JUNCTN': 'JCT',
'JUNCTON': 'JCT',
'JCTIONS': 'JCTS',
'JCTNS': 'JCTS',
'JCTS': 'JCTS',
'JUNCTIONS': 'JCTS',
'JCTS-R': 'JUNCTIONS',
'JUNCTONS': 'JCTS',
'JUNGTNS': 'JCTS',
'KNL': 'KNL',
'KNOL': 'KNL',
'KNOLL': 'KNL',
'KNL-R': 'KNOLL',
'KNLS': 'KNLS',
'KNOLLS': 'KNLS',
'KNLS-R': 'KNOLLS',
'KNOLS': 'KNLS',
'KEY': 'KY',
'KY-R': 'KEY',
'KY': 'KY',
'KEYS': 'KYS',
'KYS-R': 'KEYS',
'KYS': 'KYS',
'LAND': 'LAND',
'LAND-R': 'LAND',
'LCK': 'LCK',
'LOCK': 'LCK',
'LCK-R': 'LOCK',
'LCKS': 'LCKS',
'LOCKS': 'LCKS',
'LCKS-R': 'LOCKS',
'LDG': 'LDG',
'LDGE': 'LDG',
'LODG': 'LDG',
'LODGE': 'LDG',
'LDG-R': 'LODGE',
'LF': 'LF',
'LOAF': 'LF',
'LF-R': 'LOAF',
'LGT': 'LGT',
'LIGHT': 'LGT',
'LGT-R': 'LIGHT',
'LT': 'LGT',
'LGTS': 'LGTS',
'LIGHTS': 'LGTS',
'LGTS-R': 'LIGHTS',
'LTS': 'LGTS',
'LAKE': 'LK',
'LK-R': 'LAKE',
'LK': 'LK',
'LAKES': 'LKS',
'LKS-R': 'LAKES',
'LKS': 'LKS',
'LA': 'LN',
'LANE': 'LN',
'LN-R': 'LANE',
'LANES': 'LN',
'LN': 'LN',
'LNS': 'LN',
'LANDG': 'LNDG',
'LANDING': 'LNDG',
'LNDG-R': 'LANDING',
'LANDNG': 'LNDG',
'LNDG': 'LNDG',
'LNDNG': 'LNDG',
'LOOP': 'LOOP',
'LOOP-R': 'LOOP',
'LOOPS': 'LOOP',
'MALL': 'MALL',
'MALL-R': 'MALL',
'MDW': 'MDW',
'MEADOW': 'MDW',
'MDW-R': 'MEADOW',
'MDWS': 'MDWS',
'MEADOWS': 'MDWS',
'MDWS-R': 'MEADOWS',
'MEDOWS': 'MDWS',
'MEDWS': 'MDWS',
'MEWS': 'MEWS',
'MEWS-R': 'MEWS',
'MIL': 'ML',
'MILL': 'ML',
'ML-R': 'MILL',
'ML': 'ML',
'MILLS': 'MLS',
'MLS-R': 'MILLS',
'MILS': 'MLS',
'MLS': 'MLS',
'MANOR': 'MNR',
'MNR-R': 'MANOR',
'MANR': 'MNR',
'MNR': 'MNR',
'MANORS': 'MNRS',
'MNRS-R': 'MANORS',
'MANRS': 'MNRS',
'MNRS': 'MNRS',
'MISN': 'MSN',
'MISSION': 'MSN',
'MSN-R': 'MISSION',
'MISSN': 'MSN',
'MSN': 'MSN',
'MSSN': 'MSN',
'MNT': 'MT',
'MOUNT': 'MT',
'MT-R': 'MOUNT',
'MT': 'MT',
'MNTAIN': 'MTN',
'MNTN': 'MTN',
'MOUNTAIN': 'MTN',
'MTN-R': 'MOUNTAIN',
'MOUNTIN': 'MTN',
'MTIN': 'MTN',
'MTN': 'MTN',
'MNTNS': 'MTNS',
'MOUNTAINS': 'MTNS',
'MTNS-R': 'MOUNTAINS',
'MTNS': 'MTNS',
'MOTORWAY': 'MTWY',
'MTWY-R': 'MOTORWAY',
'MOTORWY': 'MTWY',
'MOTRWY': 'MTWY',
'MOTWY': 'MTWY',
'MTRWY': 'MTWY',
'MTWY': 'MTWY',
'NCK': 'NCK',
'NECK': 'NCK',
'NCK-R': 'NECK',
'NEK': 'NCK',
'OPAS': 'OPAS',
'OVERPAS': 'OPAS',
'OVERPASS': 'OPAS',
'OPAS-R': 'OVERPASS',
'OVERPS': 'OPAS',
'OVRPS': 'OPAS',
'ORCH': 'ORCH',
'ORCHARD': 'ORCH',
'ORCH-R': 'ORCHARD',
'ORCHRD': 'ORCH',
'OVAL': 'OVAL',
'OVAL-R': 'OVAL',
'OVL': 'OVAL',
'PARK': 'PARK',
'PARK-R': 'PARK',
'PARKS': 'PARK',
'PK': 'PARK',
'PRK': 'PARK',
'PAS': 'PASS',
'PASS': '<PASSWORD>',
'PASS-R': 'PASS',
'PATH': 'PATH',
'PATH-R': 'PATH',
'PATHS': 'PATH',
'PIKE': 'PIKE',
'PIKE-R': 'PIKE',
'PIKES': 'PIKE',
'PARKWAY': 'PKWY',
'PKWY-R': 'PARKWAY',
'PARKWAYS': 'PKWY',
'PARKWY': 'PKWY',
'PKWAY': 'PKWY',
'PKWY': 'PKWY',
'PKWYS': 'PKWY',
'PKY': 'PKWY',
'PL': 'PL',
'PLAC': 'PL',
'PLACE': 'PL',
'PL-R': 'PLACE',
'PLASE': 'PL',
'PLAIN': 'PLN',
'PLN-R': 'PLAIN',
'PLN': 'PLN',
'PLAINES': 'PLNS',
'PLAINS': 'PLNS',
'PLNS-R': 'PLAINS',
'PLNS': 'PLNS',
'PLAZ': 'PLZ',
'PLAZA': 'PLZ',
'PLZ-R': 'PLAZA',
'PLZ': 'PLZ',
'PLZA': 'PLZ',
'PZ': 'PLZ',
'PINE': 'PNE',
'PNE-R': 'PINE',
'PNE': 'PNE',
'PINES': 'PNES',
'PNES-R': 'PINES',
'PNES': 'PNES',
'PR': 'PR',
'PRAIR': 'PR',
'PRAIRIE': 'PR',
'PR-R': 'PRAIRIE',
'PRARE': 'PR',
'PRARIE': 'PR',
'PRR': 'PR',
'PRRE': 'PR',
'PORT': 'PRT',
'PRT-R': 'PORT',
'PRT': 'PRT',
'PORTS': 'PRTS',
'PRTS-R': 'PORTS',
'PRTS': 'PRTS',
'PASG': 'PSGE',
'PASSAGE': 'PSGE',
'PSGE-R': 'PASSAGE',
'PASSG': 'PSGE',
'PSGE': 'PSGE',
'PNT': 'PT',
'POINT': 'PT',
'PT-R': 'POINT',
'PT': 'PT',
'PNTS': 'PTS',
'POINTS': 'PTS',
'PTS-R': 'POINTS',
'PTS': 'PTS',
'RAD': 'RADL',
'RADIAL': 'RADL',
'RADL-R': 'RADIAL',
'RADIEL': 'RADL',
'RADL': 'RADL',
'RAMP': 'RAMP',
'RAMP-R': 'RAMP',
'RD': 'RD',
'ROAD': 'RD',
'RD-R': 'ROAD',
'RDG': 'RDG',
'RDGE': 'RDG',
'RIDGE': 'RDG',
'RDG-R': 'RIDGE',
'RDGS': 'RDGS',
'RIDGES': 'RDGS',
'RDGS-R': 'RIDGES',
'RDS': 'RDS',
'ROADS': 'RDS',
'RDS-R': 'ROADS',
'RIV': 'RIV',
'RIVER': 'RIV',
'RIV-R': 'RIVER',
'RIVR': 'RIV',
'RVR': 'RIV',
'RANCH': 'RNCH',
'RNCH-R': 'RANCH',
'RANCHES': 'RNCH',
'RNCH': 'RNCH',
'RNCHS': 'RNCH',
'RAOD': 'RD',
'ROW': 'ROW',
'ROW-R': 'ROW',
'RAPID': 'RPD',
'RPD-R': 'RAPID',
'RPD': 'RPD',
'RAPIDS': 'RPDS',
'RPDS-R': 'RAPIDS',
'RPDS': 'RPDS',
'REST': 'RST',
'RST-R': 'REST',
'RST': 'RST',
'ROUTE': 'RTE',
'RTE-R': 'ROUTE',
'RT': 'RTE',
'RTE': 'RTE',
'RUE': 'RUE',
'RUE-R': 'RUE',
'RUN': 'RUN',
'RUN-R': 'RUN',
'SHL': 'SHL',
'SHOAL': 'SHL',
'SHL-R': 'SHOAL',
'SHOL': 'SHL',
'SHLS': 'SHLS',
'SHOALS': 'SHLS',
'SHLS-R': 'SHOALS',
'SHOLS': 'SHLS',
'SHOAR': 'SHR',
'SHORE': 'SHR',
'SHR-R': 'SHORE',
'SHR': 'SHR',
'SHOARS': 'SHRS',
'SHORES': 'SHRS',
'SHRS-R': 'SHORES',
'SHRS': 'SHRS',
'SKWY': 'SKWY',
'SKYWAY': 'SKWY',
'SKWY-R': 'SKYWAY',
'SKYWY': 'SKWY',
'SMT': 'SMT',
'SUMIT': 'SMT',
'SUMITT': 'SMT',
'SUMMIT': 'SMT',
'SMT-R': 'SUMMIT',
'SUMT': 'SMT',
'SPG': 'SPG',
'SPNG': 'SPG',
'SPRING': 'SPG',
'SPG-R': 'SPRING',
'SPRNG': 'SPG',
'SPGS': 'SPGS',
'SPNGS': 'SPGS',
'SPRINGS': 'SPGS',
'SPGS-R': 'SPRINGS',
'SPRNGS': 'SPGS',
'SPR': 'SPUR',
'SPRS': 'SPUR',
'SPUR': 'SPUR',
'SPUR-R': 'SPUR',
'SPURS': 'SPUR',
'SQ': 'SQ',
'SQAR': 'SQ',
'SQR': 'SQ',
'SQRE': 'SQ',
'SQU': 'SQ',
'SQUARE': 'SQ',
'SQ-R': 'SQUARE',
'SQARS': 'SQS',
'SQRS': 'SQS',
'SQS': 'SQS',
'SQUARES': 'SQS',
'SQS-R': 'SQUARES',
'ST': 'ST',
'STR': 'ST',
'STREET': 'ST',
'ST-R': 'STREET',
'STRT': 'ST',
'STA': 'STA',
'STATION': 'STA',
'STA-R': 'STATION',
'STATN': 'STA',
'STN': 'STA',
'STRA': 'STRA',
'STRAV': 'STRA',
'STRAVE': 'STRA',
'STRAVEN': 'STRA',
'STRAVENUE': 'STRA',
'STRA-R': 'STRAVENUE',
'STRAVN': 'STRA',
'STRVN': 'STRA',
'STRVNUE': 'STRA',
'STREAM': 'STRM',
'STRM-R': 'STREAM',
'STREME': 'STRM',
'STRM': 'STRM',
'STREETS': 'STS',
'STS-R': 'STREETS',
'STS': 'STS',
'TER': 'TER',
'TERACE': 'TER',
'TERASE': 'TER',
'TERR': 'TER',
'TERRACE': 'TER',
'TER-R': 'TERRACE',
'TERRASE': 'TER',
'TERRC': 'TER',
'TERRICE': 'TER',
'TPK': 'TPKE',
'TPKE': 'TPKE',
'TRNPK': 'TPKE',
'TRPK': 'TPKE',
'TURNPIKE': 'TPKE',
'TPKE-R': 'TURNPIKE',
'TURNPK': 'TPKE',
'TRACK': 'TRAK',
'TRAK-R': 'TRACK',
'TRACKS': 'TRAK',
'TRAK': 'TRAK',
'TRK': 'TRAK',
'TRKS': 'TRAK',
'TRACE': 'TRCE',
'TRCE-R': 'TRACE',
'TRACES': 'TRCE',
'TRCE': 'TRCE',
'TRAFFICWAY': 'TRFY',
'TRFY-R': 'TRAFFICWAY',
'TRAFFICWY': 'TRFY',
'TRAFWAY': 'TRFY',
'TRFCWY': 'TRFY',
'TRFFCWY': 'TRFY',
'TRFFWY': 'TRFY',
'TRFWY': 'TRFY',
'TRFY': 'TRFY',
'TR': 'TRL',
'TRAIL': 'TRL',
'TRL-R': 'TRAIL',
'TRAILS': 'TRL',
'TRL': 'TRL',
'TRLS': 'TRL',
'THROUGHWAY': 'TRWY',
'TRWY-R': 'THROUGHWAY',
'THROUGHWY': 'TRWY',
'THRUWAY': 'TRWY',
'THRUWY': 'TRWY',
'THRWAY': 'TRWY',
'THRWY': 'TRWY',
'THWY': 'TRWY',
'TRWY': 'TRWY',
'TUNEL': 'TUNL',
'TUNL': 'TUNL',
'TUNLS': 'TUNL',
'TUNNEL': 'TUNL',
'TUNL-R': 'TUNNEL',
'TUNNELS': 'TUNL',
'TUNNL': 'TUNL',
'UN': 'UN',
'UNION': 'UN',
'UN-R': 'UNION',
'UNIONS': 'UNS',
'UNS-R': 'UNIONS',
'UNS': 'UNS',
'UDRPS': 'UPAS',
'UNDERPAS': 'UPAS',
'UNDERPASS': 'UPAS',
'UPAS-R': 'UNDERPASS',
'UNDERPS': 'UPAS',
'UNDRPAS': 'UPAS',
'UNDRPS': 'UPAS',
'UPAS': 'UPAS',
'VDCT': 'VIA',
'VIA': 'VIA',
'VIADCT': 'VIA',
'VIADUCT': 'VIA',
'VIA-R': 'VIADUCT',
'VIS': 'VIS',
'VIST': 'VIS',
'VISTA': 'VIS',
'VIS-R': 'VISTA',
'VST': 'VIS',
'VSTA': 'VIS',
'VILLE': 'VL',
'VL-R': 'VILLE',
'VL': 'VL',
'VILG': 'VLG',
'VILL': 'VLG',
'VILLAG': 'VLG',
'VILLAGE': 'VLG',
'VLG-R': 'VILLAGE',
'VILLG': 'VLG',
'VILLIAGE': 'VLG',
'VLG': 'VLG',
'VILGS': 'VLGS',
'VILLAGES': 'VLGS',
'VLGS-R': 'VILLAGES',
'VLGS': 'VLGS',
'VALLEY': 'VLY',
'VLY-R': 'VALLEY',
'VALLY': 'VLY',
'VALY': 'VLY',
'VLLY': 'VLY',
'VLY': 'VLY',
'VALLEYS': 'VLYS',
'VLYS-R': 'VALLEYS',
'VLYS': 'VLYS',
'VIEW': 'VW',
'VW-R': 'VIEW',
'VW': 'VW',
'VIEWS': 'VWS',
'VWS-R': 'VIEWS',
'VWS': 'VWS',
'WALK': 'WALK',
'WALK-R': 'WALK',
'WALKS': 'WALK',
'WLK': 'WALK',
'WALL': 'WALL',
'WALL-R': 'WALL',
'WAY': 'WAY',
'WAY-R': 'WAY',
'WY': 'WAY',
'WAYS': 'WAYS',
'WAYS-R': 'WAYS',
'WEL': 'WL',
'WELL': 'WL',
'WL-R': 'WELL',
'WL': 'WL',
'WELLS': 'WLS',
'WLS-R': 'WELLS',
'WELS': 'WLS',
'WLS': 'WLS',
'CROSING': 'XING',
'CROSNG': 'XING',
'CROSSING': 'XING',
'XING-R': 'CROSSING',
'CRSING': 'XING',
'CRSNG': 'XING',
'CRSSING': 'XING',
'CRSSNG': 'XING',
'XING': 'XING',
'CROSRD': 'XRD',
'CROSSRD': 'XRD',
'CROSSROAD': 'XRD',
'XRD-R': 'CROSSROAD',
'CRSRD': 'XRD',
'XRD': 'XRD',
'XROAD': 'XRD',
}
# An array with things that look like street types but are actually
# names
self.suffixSimiles = {
'LA': 'LA',
'ST': 'SAINT',
'VIA': 'VIA',
}
#**
#* Formats a Delivery Address Line according to the United States Postal
#* Service's Addressing Standards
#*
#* This comes in VERY handy when searching for records by address.
#* Let's say a data entry person put an address in as
#* "Two N Boulevard." Later, someone else searches for them using
#* "2 North Blvd." Unfortunately, that query won't find them. Such
#* problems are averted by using this method before storing and
#* searching for data.
#*
#* Standardization can also help obtain lower bulk mailing rates.
#*
#* Based upon USPS Publication 28, November 1997.
#*
#* @param string address the address to be converted
#*
#* @return string the cleaned up address
#*
#* @link http:#pe.usps.gov/cpim/ftp/pubs/Pub28/pub28.pdf
#*
def AddressLineStandardization(self, address):
if not address:
return ''
# General input sanitization.
address = address.upper()
# Replace bogus characters with spaces.
address = re.sub(r'[^A-Z0-9 /#.-]', ' ', address)
# Remove starting and ending spaces.
address = address.strip()
# Remove periods from ends.
address = re.sub(r'\.$', '', address)
# Add spaces around hash marks to simplify later processing.
address = address.replace('#', ' # ')
# Remove duplicate separators and spacing around separators,
# simplifying the next few steps.
address = re.sub(r' *([/.-])+ *', '\\1', address)
# Remove dashes between numberic/non-numerics combinations
# at ends of lines (for apartment numbers "32-D" -> "32D").
address = re.sub(r'(?<=[0-9])-(?=[^0-9]+$)', '', address)
address = re.sub(r'(?<=[^0-9])-(?=[0-9]+$)', '', address)
# Replace remaining separators with spaces.
address = re.sub(r'(?<=[^0-9])[/.-](?=[^0-9])', ' ', address)
address = re.sub(r'(?<=[0-9])[/.-](?=[^0-9])', ' ', address)
address = re.sub(r'(?<=[^0-9])[/.-](?=[0-9])', ' ', address)
# Remove duplilcate spaces.
address = re.sub(r'\s+', ' ', address)
# Remove hash marks where possible.
atom = re.match(r'(.+ )([A-Z]+)( #)( .+)', address)
if atom:
if atom.group(2) in self.identifiers:
address = atom.group(1) + atom.group(2) + atom.group(4)
address = address.strip()
if not address:
return ''
# Convert numeric words to integers.
parts = address.split(' ')
for key, val in enumerate(parts):
if val in self.numbers:
parts[key] = self.numbers[val]
address = ' '.join(parts)
# XXX: Expression Disabled
#address = re.sub(r' ([0-9]+)(ST|ND|RD|TH)? ?(?>FLOOR|FLR|FL)(?! [0-9])', r' FL \\1', address)
address = re.sub(r'(NORTH|SOUTH) (EAST|WEST)', '\\1\\2', address)
# Check for special addresses.
rural_alternatives = r'RR|RFD ROUTE|RURAL ROUTE|RURAL RTE|RURAL RT|RURAL DELIVERY|RD ROUTE|RD RTE|RD RT'
atom = re.match(r'^(' + rural_alternatives +
r') ?([0-9]+)([A-Z #]+)([0-9A-Z]+)(.*)$', address)
if atom:
return "RR " + atom.group(2) + " BOX " + atom.group(4)
atom = re.match(r'^(BOX|BX)([ #]*)([0-9A-Z]+) (' +
rural_alternatives + r') ?([0-9]+)(.*)$', address)
if atom:
return "RR " + atom.group(5) + " BOX " + atom.group(3)
atom = re.match(
r'^((((POST|P) ?(OFFICE|O) ?)?(BOX|BX|B) |(POST|P) ?(OFFICE|O) ?)|FIRM CALLER|CALLER|BIN|LOCKBOX|DRAWER)( ?(# )*)([0-9A-Z-]+)(.*)$', address)
if atom:
return "PO BOX " + atom.group(11)
highway_alternatives = r'HIGHWAY|HIGHWY|HIWAY|HIWY|HWAY|HWY|HYGHWAY|HYWAY|HYWY'
atom = re.match(r'^([0-9A-Z.-]+ ?[0-9/]* ?)(.*)( CNTY| COUNTY) (' +
highway_alternatives + r')( NO | # | )?([0-9A-Z]+)(.*)$', address)
if atom:
state = atom.group(2)
if state in self.states:
state = self.states[state]
identifier = atom.group(6)
if identifier in identifier:
identifier = self.identifiers[identifier]
number = atom.group(7).replace(' #', '')
return atom.group(1) + state + " COUNTY HWY " + identifier + number
return atom.group(1) + state + " COUNTY HIGHWAY " + identifier + self.getEolAbbr(atom.group(7))
atom = re.match(
r'^([0-9A-Z.-]+ ?[0-9/]* ?)(.*)( CR |( CNTY| COUNTY) (ROAD|RD))( NO | # | )?([0-9A-Z]+)(.*)$', address)
if atom:
state = atom.group(2)
if state in self.states:
state = self.states[state]
identifier = atom.group(7)
if identifier in self.identifiers:
identifier = self.identifiers[identifier]
number = atom.group(8).replace(' #', '')
return atom.group(1) + state + " COUNTY RD " + identifier + number
return atom.group(1) + state + " COUNTY ROAD " + identifier + self.getEolAbbr(atom.group(8))
atom = re.match(
r'^([0-9A-Z.-]+ ?[0-9/]* ?)(.*)( SR|( STATE| ST) (ROAD|RD))( NO | # | )?([0-9A-Z]+)(.*)$', address)
if atom:
state = atom.group(2)
if state in self.states:
state = self.states[state]
identifier = atom.group(7)
if identifier in self.identifiers:
identifier = self.identifiers[identifier]
number = atom.group(8).replace(' #', '')
return atom.group(1) + state + " STATE RD " + identifier + number
return atom.group(1) + state + " STATE ROAD " + identifier + self.getEolAbbr(atom.group(8))
atom = re.match(
r'^([0-9A-Z.-]+ ?[0-9/]* ?)(.*)( STATE| ST) (ROUTE|RTE|RT)( NO | # | )?([0-9A-Z]+)(.*)$', address)
if atom:
state = atom.group(2)
if state in self.states:
state = self.states[state]
identifier = atom.group(6)
if identifier in self.identifiers:
identifier = self.identifiers[identifier]
number = atom.group(7).replace(' #', '')
return atom.group(1) + state + " STATE RTE " + identifier + number
return atom.group(1) + state + " STATE ROUTE " + identifier + self.getEolAbbr(atom.group(7))
atom = re.match(r'^([0-9A-Z.-]+ [0-9/]* ?)(INTERSTATE|INTRST|INT|I) ?(' +
highway_alternatives + r'|H)? ?([0-9]+)(.*)$', address)
if atom:
number = atom.group(5).replace(' BYP ', ' BYPASS ')
return atom.group(1) + "INTERSTATE " + atom.group(4) + self.getEolAbbr(number)
atom = re.match(r'^([0-9A-Z.-]+ ?[0-9/]* ?)(.*)( STATE| ST) (' +
highway_alternatives + r')( NO | # | )?([0-9A-Z]+)(.*)$', address)
if atom:
state = atom.group(2)
if state in self.states:
state = self.states[state]
identifier = atom.group(6)
if identifier in self.identifiers:
identifier = self.identifiers[identifier]
number = atom.group(7).replace(' #', '')
return atom.group(1) + state + " STATE HWY " + identifier + number
return atom.group(1) + state + " STATE HIGHWAY " + identifier + self.getEolAbbr(atom.group(7))
atom = re.match(r'^([0-9A-Z.-]+ ?[0-9/]* ?)(.*)( US| U S|UNITED STATES) (' +
highway_alternatives + r')( NO | # | )?([0-9A-Z]+)(.*)$', address)
if atom:
state = atom.group(2)
if state in self.states:
state = self.states[state]
identifier = atom.group(6)
if identifier in self.identifiers:
identifier = self.identifiers[identifier]
number = atom.group(7).replace(' #', '')
return atom.group(1) + state + " US HWY " + identifier + number
return atom.group(1) + state + " US HIGHWAY " + identifier + self.getEolAbbr(atom.group(7))
atom = re.match(r'^([0-9A-Z.-]+ ?[0-9/]* ?)(.*) (' +
highway_alternatives + r')( NO | # | )?([0-9A-Z]+)(.*)$', address)
if atom:
state = atom.group(2)
if state in self.states:
state = self.states[state]
identifier = atom.group(5)
if identifier in self.identifiers:
identifier = self.identifiers[identifier]
number = atom.group(6).replace(' #', '')
return atom.group(1) + state + " HWY " + identifier + number
return atom.group(1) + state + " HIGHWAY " + identifier + self.getEolAbbr(atom.group(6))
atom = re.match(r'^((' + highway_alternatives +
r'|H) ?(CONTRACT|C)|STAR) ?(ROUTE|RTE|RT|R)?( NO | # | )?([0-9]+) ?([A-Z]+)(.*)$', address)
if atom:
return "HC " + atom.group(6) + " BOX" + self.getEolAbbr(atom.group(8))
atom = re.match(
r'^([0-9A-Z.-]+ [0-9/]* ?)(RANCH )(ROAD|RD)( NO | # | )?([0-9A-Z]+)(.*)$', address)
if atom:
identifier = atom.group(5)
if identifier in self.identifiers:
identifier = self.identifiers[identifier]
number = atom.group(6).replace(' #', '')
return atom.group(1) + "RANCH RD " + identifier + number
return atom.group(1) + "RANCH ROAD " + identifier + self.getEolAbbr(atom.group(6))
address = re.sub(r'^([0-9A-Z.-]+) ([0-9][/][0-9])', '\\1%\\2', address)
atom = re.match(
r'^([0-9A-Z/%.-]+ )(ROAD|RD)([A-Z #]+)([0-9A-Z]+)(.*)$', address)
if atom:
prefix = atom.group(1).replace('%', ' ')
return prefix + "ROAD " + atom.group(4) + self.getEolAbbr(atom.group(5))
atom = re.match(
r'^([0-9A-Z/%.-]+ )(ROUTE|RTE|RT)([A-Z #]+)([0-9A-Z]+)(.*)$', address)
if atom:
prefix = atom.group(1).replace('%', ' ')
return prefix + "ROUTE " + atom.group(4) + self.getEolAbbr(atom.group(5))
atom = re.match(
r'^([0-9A-Z/%.-]+ )(AVENUE|AVENU|AVNUE|AVEN|AVN|AVE|AV) ([A-Z]+)(.*)$', address)
if atom:
prefix = atom.group(1).replace('%', ' ')
return prefix + "AVENUE " + atom.group(3) + self.getEolAbbr(atom.group(4))
atom = re.match(
r'^([0-9A-Z/%.-]+ )(BOULEVARD|BOULV|BOUL|BLVD) ([A-Z]+)(.*)$', address)
if atom:
prefix = atom.group(1).replace('%', ' ')
return prefix + "BOULEVARD " + self.getEolAbbr(atom.group(3) + atom.group(4))
# Handle normal addresses.
parts = address.split(' ')
count = len(parts) - 1
firstsuff = None
suff = 0
id_count = 0
out = [None] * len(parts)
for counter in range(count, -1, -1):
out[counter] = parts[counter]
if parts[counter] in self.suffixes:
if not suff:
# The first suffix (from the right).
firstsuff = counter
if counter + 2 < count and (not out[counter + 1]) and (not out[counter + 2]):
switch = out[counter + 1] + ' ' + out[counter + 2]
if 'EAST W' != switch and \
'WEST E' != switch and \
'NORTH S' != switch and \
'SOUTH N' != switch:
out[counter] = self.suffixes[parts[counter]]
else:
out[counter] = self.suffixes[parts[counter]]
if counter == count:
id_count += 1
else:
# A subsequent suffix, display as full word,
# but could be a name (ie: LA, SAINT or VIA).
if parts[counter] in self.suffixSimiles \
and not out[counter + 1] in self.suffixes:
out[counter] = self.suffixSimiles[parts[counter]]
else:
out[counter] = self.suffixes[parts[counter]]
out[counter] = self.suffixes[out[counter] + "-R"]
suff += 1
elif parts[counter] in self.identifiers:
prior = counter - 1
if prior >= 0 and parts[prior] in self.identifiers and id_count == 0:
# Don't touch unit numbers
pass
else:
out[counter] = self.identifiers[parts[counter]]
if suff > 0:
out[counter] = self.identifiers[out[counter] + "-R"]
id_count += 1
elif parts[counter] in self.directionals:
prior = counter - 1
nextv = counter + 1
if prior >= 0 and parts[prior] in self.identifiers and id_count == 0:
# Don't touch unit numbers
pass
elif count >= nextv \
and nextv == firstsuff:
out[counter] = self.directionals[parts[counter]]
if suff <= 1:
out[counter] = self.directionals[out[counter] + "-R"]
elif counter > 2 \
and nextv <= count \
and parts[nextv] \
and parts[nextv] in self.directionals:
# Already set.
pass
elif counter == 2 \
and parts[prior] in self.directionals:
# Already set.
pass
else:
out[counter] = self.directionals[parts[counter]]
if count < 2:
out[counter] = self.directionals[out[counter] + "-R"]
if counter == count:
id_count = 1
elif re.match(r'^[0-9]+$', parts[counter]) \
and counter > 0 \
and counter < count:
if suff:
switch = int(parts[counter][-2:])
if 11 == parts or \
12 == parts or \
13 == parts:
out[counter] = parts[counter] + 'TH'
else:
switch = int(parts[counter][-1:])
if 1 == switch:
out[counter] = parts[counter] + 'ST'
elif 2 == switch:
out[counter] = parts[counter] + 'ND'
elif 3 == switch:
out[counter] = parts[counter] + 'RD'
else:
out[counter] = parts[counter] + 'TH'
out[0] = out[0].replace('%', ' ')
# ksort($out);
return ' '.join(out)
#**
#* Implement abbreviations for words at the ends of certain address lines
#*
#* @param string $string the address fragments to be analyzed
#*
#* @return string the cleaned up string
#*
def getEolAbbr(self, string):
suff = 0
id_count = 0
parts = string.split(' ')
count = len(parts) - 1
out = [None] * len(parts)
for counter in range(count, -1, -1):
if parts[counter] in self.suffixes:
if not suff:
out[counter] = self.suffixes[parts[counter]]
suff += 1
if counter == count:
id_count = 1
else:
out[counter] = parts[counter]
elif parts[counter] in self.identifiers:
out[counter] = self.identifiers[parts[counter]]
id_count = 1
elif parts[counter] in self.directionals:
out[counter] = self.directionals[parts[counter]]
if counter == count:
id_count = 1
else:
out[counter] = parts[counter]
# ksort($out);
return ' '.join(out)
| 2.578125 | 3 |
salt/states/boto_datapipeline.py | ipmb/salt | 0 | 12759237 | # -*- coding: utf-8 -*-
'''
Manage Data Pipelines
.. versionadded:: 2016.3.0
Be aware that this interacts with Amazon's services, and so may incur charges.
This module uses ``boto3``, which can be installed via package, or pip.
This module accepts explicit AWS credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
.. code-block:: yaml
datapipeline.keyid: <KEY>
datapipeline.key: <KEY>
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile,
either passed in as a dict, or as a string to pull from pillars or minion
config:
.. code-block:: yaml
myprofile:
keyid: <KEY>
key: <KEY>
region: us-east-1
.. code-block:: yaml
Ensure daily data pipeline exists:
boto_datapipeline.present:
- name: my-datapipeline
- pipeline_objects:
DefaultSchedule:
name: Every 1 day
fields:
period: 1 Day
type: Schedule
startAt: FIRST_ACTIVATION_DATE_TIME
- parameter_values:
myDDBTableName: my-dynamo-table
'''
# Import Python libs
from __future__ import absolute_import
import copy
import datetime
import difflib
# Import Salt lobs
import salt.utils.json
from salt.ext import six
from salt.ext.six.moves import zip
def __virtual__():
'''
Only load if boto is available.
'''
return 'boto_datapipeline' if 'boto_datapipeline.create_pipeline' in __salt__ else False
def present(name, pipeline_objects=None,
pipeline_objects_from_pillars='boto_datapipeline_pipeline_objects',
parameter_objects=None,
parameter_objects_from_pillars='boto_datapipeline_parameter_objects',
parameter_values=None,
parameter_values_from_pillars='boto_datapipeline_parameter_values',
region=None,
key=None, keyid=None, profile=None):
'''
Ensure the data pipeline exists with matching definition.
name
Name of the service to ensure a data pipeline exists for.
pipeline_objects
Pipeline objects to use. Will override objects read from pillars.
pipeline_objects_from_pillars
The pillar key to use for lookup.
parameter_objects
Parameter objects to use. Will override objects read from pillars.
parameter_objects_from_pillars
The pillar key to use for lookup.
parameter_values
Parameter values to use. Will override values read from pillars.
parameter_values_from_pillars
The pillar key to use for lookup.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
pipeline_objects = pipeline_objects or {}
parameter_objects = parameter_objects or {}
parameter_values = parameter_values or {}
present, old_pipeline_definition = _pipeline_present_with_definition(
name,
_pipeline_objects(pipeline_objects_from_pillars, pipeline_objects),
_parameter_objects(parameter_objects_from_pillars, parameter_objects),
_parameter_values(parameter_values_from_pillars, parameter_values),
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if present:
ret['comment'] = 'AWS data pipeline {0} present'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Data pipeline {0} is set to be created or updated'.format(name)
ret['result'] = None
return ret
result_create_pipeline = __salt__['boto_datapipeline.create_pipeline'](
name,
name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in result_create_pipeline:
ret['result'] = False
ret['comment'] = 'Failed to create data pipeline {0}: {1}'.format(
name, result_create_pipeline['error'])
return ret
pipeline_id = result_create_pipeline['result']
result_pipeline_definition = __salt__['boto_datapipeline.put_pipeline_definition'](
pipeline_id,
_pipeline_objects(pipeline_objects_from_pillars, pipeline_objects),
parameter_objects=_parameter_objects(parameter_objects_from_pillars, parameter_objects),
parameter_values=_parameter_values(parameter_values_from_pillars, parameter_values),
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in result_pipeline_definition:
if _immutable_fields_error(result_pipeline_definition):
# If update not possible, delete and retry
result_delete_pipeline = __salt__['boto_datapipeline.delete_pipeline'](
pipeline_id,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in result_delete_pipeline:
ret['result'] = False
ret['comment'] = 'Failed to delete data pipeline {0}: {1}'.format(
pipeline_id, result_delete_pipeline['error'])
return ret
result_create_pipeline = __salt__['boto_datapipeline.create_pipeline'](
name,
name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in result_create_pipeline:
ret['result'] = False
ret['comment'] = 'Failed to create data pipeline {0}: {1}'.format(
name, result_create_pipeline['error'])
return ret
pipeline_id = result_create_pipeline['result']
result_pipeline_definition = __salt__['boto_datapipeline.put_pipeline_definition'](
pipeline_id,
_pipeline_objects(pipeline_objects_from_pillars, pipeline_objects),
parameter_objects=_parameter_objects(parameter_objects_from_pillars, parameter_objects),
parameter_values=_parameter_values(parameter_values_from_pillars, parameter_values),
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in result_pipeline_definition:
# Still erroring after possible retry
ret['result'] = False
ret['comment'] = 'Failed to create data pipeline {0}: {1}'.format(
name, result_pipeline_definition['error'])
return ret
result_activate_pipeline = __salt__['boto_datapipeline.activate_pipeline'](
pipeline_id,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in result_activate_pipeline:
ret['result'] = False
ret['comment'] = 'Failed to create data pipeline {0}: {1}'.format(
name, result_pipeline_definition['error'])
return ret
pipeline_definition_result = __salt__['boto_datapipeline.get_pipeline_definition'](
pipeline_id,
version='active',
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in pipeline_definition_result:
new_pipeline_definition = {}
else:
new_pipeline_definition = _standardize(pipeline_definition_result['result'])
if not old_pipeline_definition:
ret['changes']['new'] = 'Pipeline created.'
ret['comment'] = 'Data pipeline {0} created'.format(name)
else:
ret['changes']['diff'] = _diff(old_pipeline_definition, new_pipeline_definition)
ret['comment'] = 'Data pipeline {0} updated'.format(name)
return ret
def _immutable_fields_error(result_pipeline_definition):
'''Return true if update pipeline failed due to immutable fields
Some fields cannot be changed after a pipeline has been activated.
http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-manage-pipeline-modify-console.html#dp-edit-pipeline-limits
'''
for e in result_pipeline_definition['error']:
for e2 in e["errors"]:
if "can not be changed" in e2:
return True
return False
def _pipeline_present_with_definition(name, expected_pipeline_objects,
expected_parameter_objects,
expected_parameter_values, region, key,
keyid, profile):
'''
Return true if the pipeline exists and the definition matches.
name
The name of the pipeline.
expected_pipeline_objects
Pipeline objects that must match the definition.
expected_parameter_objects
Parameter objects that must match the definition.
expected_parameter_values
Parameter values that must match the definition.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
result_pipeline_id = __salt__['boto_datapipeline.pipeline_id_from_name'](
name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in result_pipeline_id:
return False, {}
pipeline_id = result_pipeline_id['result']
pipeline_definition_result = __salt__['boto_datapipeline.get_pipeline_definition'](
pipeline_id,
version='active',
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in pipeline_definition_result:
return False, {}
pipeline_definition = _standardize(pipeline_definition_result['result'])
pipeline_objects = pipeline_definition.get('pipelineObjects')
parameter_objects = pipeline_definition.get('parameterObjects')
parameter_values = pipeline_definition.get('parameterValues')
present = (_recursive_compare(_cleaned(pipeline_objects), _cleaned(expected_pipeline_objects)) and
_recursive_compare(parameter_objects, expected_parameter_objects) and
_recursive_compare(parameter_values, expected_parameter_values))
return present, pipeline_definition
def _cleaned(_pipeline_objects):
"""Return standardized pipeline objects to be used for comparing
Remove year, month, and day components of the startDateTime so that data
pipelines with the same time of day but different days are considered
equal.
"""
pipeline_objects = copy.deepcopy(_pipeline_objects)
for pipeline_object in pipeline_objects:
if pipeline_object['id'] == 'DefaultSchedule':
for field_object in pipeline_object['fields']:
if field_object['key'] == 'startDateTime':
start_date_time_string = field_object['stringValue']
start_date_time = datetime.datetime.strptime(start_date_time_string,
"%Y-%m-%dT%H:%M:%S")
field_object['stringValue'] = start_date_time.strftime("%H:%M:%S")
return pipeline_objects
def _recursive_compare(v1, v2):
'''
Return v1 == v2. Compares list, dict, recursively.
'''
if isinstance(v1, list):
if v2 is None:
v2 = []
if len(v1) != len(v2):
return False
v1.sort(key=_id_or_key)
v2.sort(key=_id_or_key)
for x, y in zip(v1, v2):
if not _recursive_compare(x, y):
return False
return True
elif isinstance(v1, dict):
if v2 is None:
v2 = {}
v1 = dict(v1)
v2 = dict(v2)
if sorted(v1) != sorted(v2):
return False
for k in v1:
if not _recursive_compare(v1[k], v2[k]):
return False
return True
else:
return v1 == v2
def _id_or_key(list_item):
'''
Return the value at key 'id' or 'key'.
'''
if isinstance(list_item, dict):
if 'id' in list_item:
return list_item['id']
if 'key' in list_item:
return list_item['key']
return list_item
def _diff(old_pipeline_definition, new_pipeline_definition):
'''
Return string diff of pipeline definitions.
'''
old_pipeline_definition.pop('ResponseMetadata', None)
new_pipeline_definition.pop('ResponseMetadata', None)
diff = difflib.unified_diff(
salt.utils.json.dumps(old_pipeline_definition, indent=4).splitlines(True),
salt.utils.json.dumps(new_pipeline_definition, indent=4).splitlines(True),
)
return str('').join(diff) # future lint: disable=blacklisted-function
def _standardize(structure):
'''
Return standardized format for lists/dictionaries.
Lists of dictionaries are sorted by the value of the dictionary at
its primary key ('id' or 'key'). OrderedDict's are converted to
basic dictionaries.
'''
def mutating_helper(structure):
if isinstance(structure, list):
structure.sort(key=_id_or_key)
for each in structure:
mutating_helper(each)
elif isinstance(structure, dict):
structure = dict(structure)
for k, v in six.iteritems(structure):
mutating_helper(k)
mutating_helper(v)
new_structure = copy.deepcopy(structure)
mutating_helper(new_structure)
return new_structure
def _pipeline_objects(pipeline_objects_from_pillars, pipeline_object_overrides):
'''
Return a list of pipeline objects that compose the pipeline
pipeline_objects_from_pillars
The pillar key to use for lookup
pipeline_object_overrides
Pipeline objects to use. Will override objects read from pillars.
'''
from_pillars = copy.deepcopy(__salt__['pillar.get'](pipeline_objects_from_pillars))
from_pillars.update(pipeline_object_overrides)
pipeline_objects = _standardize(_dict_to_list_ids(from_pillars))
for pipeline_object in pipeline_objects:
pipeline_object['fields'] = _properties_from_dict(pipeline_object['fields'])
return pipeline_objects
def _parameter_objects(parameter_objects_from_pillars, parameter_object_overrides):
'''
Return a list of parameter objects that configure the pipeline
parameter_objects_from_pillars
The pillar key to use for lookup
parameter_object_overrides
Parameter objects to use. Will override objects read from pillars.
'''
from_pillars = copy.deepcopy(__salt__['pillar.get'](parameter_objects_from_pillars))
from_pillars.update(parameter_object_overrides)
parameter_objects = _standardize(_dict_to_list_ids(from_pillars))
for parameter_object in parameter_objects:
parameter_object['attributes'] = _properties_from_dict(parameter_object['attributes'])
return parameter_objects
def _parameter_values(parameter_values_from_pillars, parameter_value_overrides):
'''
Return a dictionary of parameter values that configure the pipeline
parameter_values_from_pillars
The pillar key to use for lookup
parameter_value_overrides
Parameter values to use. Will override values read from pillars.
'''
from_pillars = copy.deepcopy(__salt__['pillar.get'](parameter_values_from_pillars))
from_pillars.update(parameter_value_overrides)
parameter_values = _standardize(from_pillars)
return _properties_from_dict(parameter_values, key_name='id')
def _dict_to_list_ids(objects):
'''
Convert a dictionary to a list of dictionaries, where each element has
a key value pair {'id': key}. This makes it easy to override pillar values
while still satisfying the boto api.
'''
list_with_ids = []
for key, value in six.iteritems(objects):
element = {'id': key}
element.update(value)
list_with_ids.append(element)
return list_with_ids
def _properties_from_dict(d, key_name='key'):
'''
Transforms dictionary into pipeline object properties.
The output format conforms to boto's specification.
Example input:
{
'a': '1',
'b': {
'ref': '2'
},
}
Example output:
[
{
'key': 'a',
'stringValue': '1',
},
{
'key': 'b',
'refValue': '2',
},
]
'''
fields = []
for key, value in six.iteritems(d):
if isinstance(value, dict):
fields.append({
key_name: key,
'refValue': value['ref'],
})
else:
fields.append({
key_name: key,
'stringValue': value,
})
return fields
def absent(name, region=None, key=None, keyid=None, profile=None):
'''
Ensure a pipeline with the service_name does not exist
name
Name of the service to ensure a data pipeline does not exist for.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
result_pipeline_id = __salt__['boto_datapipeline.pipeline_id_from_name'](
name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' not in result_pipeline_id:
pipeline_id = result_pipeline_id['result']
if __opts__['test']:
ret['comment'] = 'Data pipeline {0} set to be deleted.'.format(name)
ret['result'] = None
return ret
else:
__salt__['boto_datapipeline.delete_pipeline'](
pipeline_id,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
ret['changes']['old'] = {'pipeline_id': pipeline_id}
ret['changes']['new'] = None
else:
ret['comment'] = 'AWS data pipeline {0} absent.'.format(name)
return ret
| 2.109375 | 2 |
components.py | shadowfax11/image_sys | 0 | 12759238 | import numpy as np
from numpy.core.function_base import linspace
EPS = 1e-9
class CameraSensor:
"""
Defines the camera sensor properties
"""
def __init__(self, image_size, pitch, RGB=False, name=None, bayer_pattern=None):
self.img_size = np.array([image_size[0], image_size[1]]) # image height, width (in pixels)
self.img_cntr = np.array([int(image_size[0]/2), int(image_size[1]/2)])
if RGB:
self.type = 'RGB'
self.C = 3 # number of channels
if bayer_pattern is not None:
self.bayer_pattern = bayer_pattern
else:
self.bayer_pattern = 'RGGB'
else:
self.type = 'Mono'
self.C = 1
if len(pitch)==1:
self.px_size = np.array([pitch, pitch]) # should be in meters
else:
self.px_size = np.array([pitch[0], pitch[1]]) # should be in meters
self.name = name # name of camera sensor (optional)
# create coordinate system for image plane
dh, dw = self.px_size[0], self.px_size[1]
h , w = dh*self.img_size[0], dw*self.img_size[1]
self.x_sensor = np.linspace( -w/2 + dw/2 , +w/2 - dw/2 + EPS, self.img_size[1])
self.y_sensor = np.linspace( -h/2 + dh/2 , +h/2 - dh/2 + EPS, self.img_size[0])
self.X_sensor, self.Y_sensor = np.meshgrid(self.x_sensor, self.y_sensor)
def get_physical_sensor_size(self):
"""
Returns the physical sensor size (in units of mm x mm)
"""
height_mm = np.float(self.px_size[0]*self.img_size[0])*1000
width_mm = np.float(self.px_size[1]*self.img_size[1])*1000
return height_mm, width_mm
class Lens:
def __init__(self, f, D=None):
self.f = f
self.D = D # D is set to None if its value is irrelevant
class Mask:
"""
Class for creating an amplitude/phase mask.
"""
def __init__(self, mask_pattern, mask_size):
"""
mask_pattern (numpy.ndarray): 2D array of values (real or complex)
mask_size (list or numpy.array): Physical size of mask (h x w). Units of meters
mask_pattern array values should have magnitude should be between [0, 1] for realistic mask patterns.
"""
self.mask = mask_pattern # mask pattern can be a complex-valued as well (numpy 2D array)
self.mask_size = np.array([mask_size[0], mask_size[1]])
self.mask_pitch = np.array([mask_size[0]/mask_pattern.shape[0], mask_size[1]/mask_pattern.shape[1]])
# create coordinate system on mask-plane
h, w = self.mask_size[0], self.mask_size[1]
dh, dw = self.mask_pitch[0], self.mask_pitch[1]
self.x_mask = np.linspace( -w/2 + dw/2 , +w/2 - dw/2 + EPS, num=self.mask.shape[1])
self.y_mask = np.linspace( -h/2 + dh/2 , +h/2 - dh/2 + EPS, num=self.mask.shape[0])
self.X_mask, self.Y_mask = np.meshgrid(self.x_mask, self.y_mask) | 2.9375 | 3 |
scripts/build_meta_learning_model.py | urbanmatthias/Auto-PyTorch | 0 | 12759239 | <filename>scripts/build_meta_learning_model.py
import os, sys
sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..")))
from autoPyTorch.utils.config.config_file_parser import ConfigFileParser
from autoPyTorch.utils.metalearning.meta_model_builder import MetaModelBuilder
from hpbandster.core.nameserver import nic_name_to_host
import argparse
import json
__author__ = "<NAME>, <NAME> and <NAME>"
__version__ = "0.0.1"
__license__ = "BSD"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run benchmarks for autonet.')
parser.add_argument("--run_id_range", default=None, help="An id for the run. A range of run ids can be given: start-stop.")
parser.add_argument("--result_dir", default=None, help="Override result dir in benchmark config.")
parser.add_argument("--only_finished_runs", action='store_true', help="Only consider finished runs")
parser.add_argument("--save_path", default="metamodels", help="Store the meta learning models in given filename")
parser.add_argument("--learn_warmstarted_model", action='store_true', help="Learn a warmstarted model")
parser.add_argument("--learn_initial_design", action='store_true', help="Learn an initial_design")
parser.add_argument("--calculate_loss_matrix_entry", default=-1, type=int, help="Calculate an entry of the cost matrix used for initial design")
parser.add_argument("--print_missing_loss_matrix_entries", action='store_true', help='Which loss matrix entries are not yet computed.')
parser.add_argument("--memory_limit_mb", default=None, type=int)
parser.add_argument("--time_limit_per_entry", default=None, type=int)
parser.add_argument("--initial_design_max_total_budget", default=None, type=float)
parser.add_argument("--initial_design_convergence_threshold", default=None, type=float)
parser.add_argument("--loss_matrix_db_config_file", default=None, type=str, help="File specifying mongodb host / port / login credentials")
parser.add_argument("--leave_out", default=-1, type=int, help="Leave out dataset with given index")
parser.add_argument("--initial_design_force_num_max_budget", default=0, type=int,
help="Force the number of configurations evaluated on the full budget to be a specific value")
parser.add_argument("--initial_design_force_num_sh_iter", default=0, type=int,
help="Force the number of SH-iterations to be a specific value")
parser.add_argument("--initial_design_normalize_loss", default="normalized_distance_to_min", type=str,
help="Strategy to normalize the loss")
parser.add_argument('benchmark', help='The benchmark to learn from')
args = parser.parse_args()
run_id_range = args.run_id_range
if args.run_id_range is not None:
if "-" in args.run_id_range:
run_id_range = range(int(args.run_id_range.split("-")[0]), int(args.run_id_range.split("-")[1]) + 1)
else:
run_id_range = range(int(args.run_id_range), int(args.run_id_range) + 1)
config_file = args.benchmark
config_parser = MetaModelBuilder().get_benchmark_config_file_parser()
config = config_parser.read(config_file)
if (args.result_dir is not None):
config['result_dir'] = os.path.join(ConfigFileParser.get_autonet_home(), args.result_dir)
config['run_id_range'] = run_id_range
config['save_path'] = args.save_path
config['memory_limit_mb'] = args.memory_limit_mb
config['time_limit_per_entry'] = args.time_limit_per_entry
config["learn_warmstarted_model"] = args.learn_warmstarted_model
config["learn_initial_design"] = args.learn_initial_design
config["calculate_loss_matrix_entry"] = args.calculate_loss_matrix_entry
config["only_finished_runs"] = args.only_finished_runs
config["initial_design_max_total_budget"] = args.initial_design_max_total_budget
config["initial_design_convergence_threshold"] = args.initial_design_convergence_threshold
config["print_missing_loss_matrix_entries"] = args.print_missing_loss_matrix_entries
config["loss_matrix_db_config"] = dict()
config["leave_out_instance"] = args.leave_out
config["initial_design_force_num_sh_iter"] = args.initial_design_force_num_sh_iter
config["initial_design_force_num_max_budget"] = args.initial_design_force_num_max_budget
config["initial_design_normalize_loss"] = args.initial_design_normalize_loss
config['benchmark_name'] = os.path.basename(args.benchmark).split(".")[0]
if args.loss_matrix_db_config_file is not None:
with open(args.loss_matrix_db_config_file) as f:
config["loss_matrix_db_config"] = json.load(f)
builder = MetaModelBuilder()
builder.run(**config)
| 2.484375 | 2 |
src/python/nimbusml/examples/WordTokenizer.py | montehoover/NimbusML | 134 | 12759240 | ###############################################################################
# WordTokenizer
from nimbusml import Pipeline, FileDataStream
from nimbusml.datasets import get_dataset
from nimbusml.preprocessing.text import WordTokenizer
# data input (as a FileDataStream)
path = get_dataset("wiki_detox_train").as_filepath()
data = FileDataStream.read_csv(path, sep='\t')
print(data.head())
# Sentiment SentimentText
# 0 1 ==RUDE== Dude, you are rude upload that carl p...
# 1 1 == OK! == IM GOING TO VANDALIZE WILD ONES WIK...
# 2 1 Stop trolling, zapatancas, calling me a liar m...
# 3 1 ==You're cool== You seem like a really cool g...
# 4 1 ::::: Why are you threatening me? I'm not bein...
tokenize = WordTokenizer(char_array_term_separators=[" "]) << {'wt': 'SentimentText'}
pipeline = Pipeline([tokenize])
tokenize.fit(data)
y = tokenize.transform(data)
print(y.drop(labels='SentimentText', axis=1).head())
# Sentiment wt.000 wt.001 wt.002 wt.003 wt.004 wt.005 ... wt.366 wt.367 wt.368 wt.369 wt.370 wt.371 wt.372
# 0 1 ==RUDE== Dude, you are rude upload ... None None None None None None None
# 1 1 == OK! == IM GOING TO ... None None None None None None None
# 2 1 Stop trolling, zapatancas, calling me a ... None None None None None None None
# 3 1 ==You're cool== You seem like a ... None None None None None None None
# 4 1 ::::: Why are you threatening me? ... None None None None None None None
| 2.75 | 3 |
setup.py | trevorcampbell/ubvi | 5 | 12759241 | <filename>setup.py<gh_stars>1-10
from setuptools import setup, find_packages
setup(
name='ubvi',
url='https://github.com/trevorcampbell/ubvi',
author='<NAME>',
author_email='<EMAIL>',
version='0.2',
packages=find_packages(),
license='MIT License',
long_description=open('README.md').read(),
install_requires = ['numpy', 'scipy', 'autograd'],
)
| 1.414063 | 1 |
ztag/__main__.py | 18121861183/ztag | 1 | 12759242 | import argparse
import sys
import json
from zsearch_definitions import protocols
from ztag.stream import Stream, Incoming, Outgoing, InputFile, OutputFile
from ztag.transform import Transform, Decoder, Encoder
from ztag.decoders import JSONDecoder
from ztag.encoders import JSONEncoder
from ztag.annotation import Annotation
from ztag.annotator import Annotator, AnnotationTesting
from ztag.transformer import ZMapTransformer
from ztag.log import Logger
from ztag.classargs import subclass_of
from datetime import datetime
def non_negative(s):
x = int(s)
if x < 0:
raise argparse.ArgumentTypeError
return x
def uint16(s):
x = int(s)
if x < 0 or x > 65535:
raise argparse.ArgumentTypeError
return x
def zsearch_protocol(s):
try:
result = protocols.Protocol.from_pretty_name(s)
return result
except KeyError as e:
raise argparse.ArgumentTypeError(e)
def zsearch_subprotocol(s):
try:
return protocols.Subprotocol.from_pretty_name(s)
except KeyError as e:
raise argparse.ArgumentTypeError(e)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', type=uint16,
help="Target port")
parser.add_argument('-P', '--protocol',
type=zsearch_protocol)
parser.add_argument('-S', '--subprotocol',
type=zsearch_subprotocol)
parser.add_argument('-T', '--destination', default="full_ipv4",
type=str, choices=["full_ipv4", "alexa_top1mil"])
parser.add_argument('-s', '--scan-id', required=False, type=non_negative)
# parser.add_argument('-t', '--tags', type=tag_class)
parser.add_argument('-I', '--incoming', type=subclass_of(Incoming),
default=None)
parser.add_argument('-D', '--decoder', type=subclass_of(Decoder),
default=JSONDecoder)
parser.add_argument('-X', '--transform', type=subclass_of(Transform),
default=None)
parser.add_argument('-E', '--encoder', type=subclass_of(Encoder),
default=JSONEncoder)
parser.add_argument('-O', '--outgoing', type=subclass_of(Outgoing),
default=OutputFile)
parser.add_argument('-i', '--input-file', default=sys.stdin,
type=argparse.FileType('r'))
parser.add_argument('-l', '--log-file', type=argparse.FileType('w'),
default=sys.stderr)
parser.add_argument('--updates-file', default=sys.stderr,
type=argparse.FileType('w'))
parser.add_argument('-v', '--log-level', type=int, default=Logger.INFO,
choices=range(0, Logger.TRACE + 1))
parser.add_argument('-m', '--metadata-file', type=argparse.FileType('w'),
default=sys.stderr)
parser.add_argument('--strip-domain-prefix', type=str, default=None)
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('-t', '--tests', action='store_true')
parser.add_argument('--safe-import', action='store_true')
parser.add_argument('--no-safe-tests', action='store_true')
args = parser.parse_args()
Annotation.load_annotations(args.safe_import)
if args.tests:
sys.exit(AnnotationTesting().run(args.no_safe_tests))
if not args.port:
sys.stderr.write("ERROR: port (-p/--port) required\n")
sys.exit(1)
if not args.protocol:
proto_string = ", ".join(protocols.Protocol._by_pretty_name.keys())
sys.stderr.write("ERROR: protocol (-P/--protocol) required\n")
sys.stderr.write("Registered Protocols: %s\n" % proto_string)
sys.exit(1)
if not args.subprotocol:
subproto_string = ", ".join(
protocols.Subprotocol._by_pretty_name.keys())
sys.stderr.write("ERROR: subprotocol (-S/--subprotocol) required\n")
sys.stderr.write("Registered SubProtocols: %s\n" % subproto_string)
sys.exit(1)
metadata = dict()
port = args.port
protocol = args.protocol
subprotocol = args.subprotocol
scan_id = args.scan_id or 0
transform_kwargs = dict()
transform_args = list()
logger = Logger(args.log_file, log_level=args.log_level)
if args.strip_domain_prefix:
if not args.strip_domain_prefix.endswith("."):
args.strip_domain_prefix += "."
logger.info("stripping prefix %s" % args.strip_domain_prefix)
transform_kwargs['strip_domain_prefix'] = args.strip_domain_prefix
if args.transform is not None:
transform = args.transform(port, protocol, subprotocol, scan_id,
*transform_args, **transform_kwargs)
else:
transform = ZMapTransformer.find_transform(port, protocol, subprotocol,
scan_id, *transform_args, **transform_kwargs)
if args.incoming is not None:
incoming = args.incoming(input_file=args.input_file)
elif transform.incoming is not None:
incoming = transform.incoming(input_file=args.input_file)
else:
incoming = InputFile(input_file=args.input_file)
if args.decoder is not None:
decoder = args.decoder(logger=logger)
elif transform.decoder is not None:
decoder = transform.decoder(logger=logger)
else:
decoder = JSONDecoder(logger=logger)
encoder = args.encoder(port, protocol, subprotocol, scan_id)
outgoing = args.outgoing(output_file=sys.stdout, logger=logger,
destination=args.destination)
tagger = Annotator(port, protocol, subprotocol,
debug=args.debug, logger=logger)
num_tags = len(tagger.eligible_tags)
logger.info("found %d tags" % num_tags)
metadata['eligible_tags'] = num_tags
transforms = [
decoder,
transform,
tagger,
encoder,
]
s = Stream(incoming, outgoing, transforms=transforms, logger=logger, updates=args.updates_file)
start_time = datetime.utcnow()
handled, skipped = s.run()
end_time = datetime.utcnow()
duration = end_time - start_time
logger.info("handled %d records" % handled)
logger.info("skipped %d records" % skipped)
metadata['records_handled'] = handled
metadata['records_skipped'] = skipped
metadata['start_time'] = Logger.rfc_time_from_utc(start_time)
metadata['end_time'] = Logger.rfc_time_from_utc(end_time)
metadata['duration'] = int(duration.total_seconds())
args.metadata_file.write(json.dumps(metadata))
args.metadata_file.write("\n")
args.metadata_file.flush()
if __name__ == "__main__":
main()
| 2.375 | 2 |
src/sample/PlayerClass.py | TestowanieAutomatyczneUG/laboratorium-9-Grzeskii | 0 | 12759243 | class Player:
def getTime(self):
pass
def playWavFile(self, file):
pass
def wavWasPlayed(self):
pass
def resetWav(self):
pass | 1.75 | 2 |
projects/amygActivation/projectMain.py | Chibee/rt-cloud | 0 | 12759244 | import os
import sys
import argparse
import logging
# import project modules
# Add base project path (two directories up)
currPath = os.path.dirname(os.path.realpath(__file__))
rootPath = os.path.dirname(os.path.dirname(currPath))
sys.path.append(rootPath)
from rtCommon.utils import loadConfigFile, installLoggers
from rtCommon.structDict import StructDict
from rtCommon.projectInterface import Web
defaultConfig = os.path.join(currPath, 'conf/amygActivation.toml')
expScript = os.path.join(currPath, 'amygActivation.py')
initScript = os.path.join(currPath, 'initialize.py')
finalizeScript = os.path.join(currPath, 'finalize.py')
if __name__ == "__main__":
installLoggers(logging.INFO, logging.INFO, filename=os.path.join(currPath, 'logs/webServer.log'))
argParser = argparse.ArgumentParser()
argParser.add_argument('--filesremote', '-x', default=False, action='store_true',
help='dicom files retrieved from remote server')
argParser.add_argument('--config', '-c', default=defaultConfig, type=str,
help='experiment file (.json or .toml)')
args = argParser.parse_args()
# HERE: Set the path to the fMRI Python script to run here
params = StructDict({'fmriPyScript': expScript,
'initScript': initScript,
'finalizeScript': finalizeScript,
'filesremote': args.filesremote,
'port': 8888,
})
cfg = loadConfigFile(args.config)
web = Web()
web.start(params, cfg)
| 2.046875 | 2 |
Questions/Serial Bomber Location/solution.py | leander-dsouza/Abhyudaya_2020 | 1 | 12759245 | <filename>Questions/Serial Bomber Location/solution.py
def countSetBits(num):
binary = bin(num)
setBits = [ones for ones in binary[2:] if ones == '1']
return len(setBits)
if __name__ == "__main__":
n = int(input())
for i in range(n+1):
print(countSetBits(i), end =' ')
| 3.6875 | 4 |
src/cosalib/azure.py | gustavold/coreos-assembler | 0 | 12759246 | from cosalib.cmdlib import run_verbose
def remove_azure_image(image, resource_group, auth, profile):
print(f"Azure: removing image {image}")
try:
run_verbose(['ore', 'azure',
'--azure-auth', auth,
'--azure-profile', profile,
'delete-image-arm',
'--image-name', image,
'--resource-group', resource_group])
except SystemExit:
raise Exception("Failed to remove image")
| 2.078125 | 2 |
envoy.distribution.verify/tests/test_verify.py | Nordix/pytooling | 0 | 12759247 | <gh_stars>0
from itertools import chain
from unittest.mock import AsyncMock, MagicMock, PropertyMock
import pytest
from aio.run.checker import Checker
from envoy.distribution import distrotest, verify
class DummyDistroChecker(verify.PackagesDistroChecker):
def __init__(self, *args):
pass
def test_checker_constructor(patches):
checker = DummyDistroChecker("path1", "path2", "path3")
assert isinstance(checker, Checker)
assert checker._active_distrotest is None
assert checker.checks == ("distros", )
assert checker.test_class == distrotest.DistroTest
assert "test_class" not in checker.__dict__
assert checker.test_config_class == distrotest.DistroTestConfig
assert "test_config_class" not in checker.__dict__
def _check_arg_property(patches, prop, arg=None):
checker = DummyDistroChecker("path1", "path2", "path3")
patched = patches(
("PackagesDistroChecker.args", dict(new_callable=PropertyMock)),
prefix="envoy.distribution.verify.checker")
with patched as (m_args, ):
assert (
getattr(checker, prop)
== getattr(m_args.return_value, arg or prop))
assert prop not in checker.__dict__
@pytest.mark.parametrize(
"prop",
[("rebuild",),
("filter_distributions", "distribution")])
def test_checker_arg_props(patches, prop):
_check_arg_property(patches, *prop)
def _check_arg_path_property(patches, prop, arg=None):
checker = DummyDistroChecker("path1", "path2", "path3")
patched = patches(
"pathlib",
("PackagesDistroChecker.args", dict(new_callable=PropertyMock)),
prefix="envoy.distribution.verify.checker")
with patched as (m_plib, m_args):
assert getattr(checker, prop) == m_plib.Path.return_value
assert (
m_plib.Path.call_args
== [(getattr(m_args.return_value, arg or prop), ), {}])
assert prop not in checker.__dict__
@pytest.mark.parametrize(
"prop",
[("testfile",),
("packages_tarball", "packages")])
def test_checker_arg_path_props(patches, prop):
_check_arg_path_property(patches, *prop)
def test_checker_active_distrotest(patches):
checker = DummyDistroChecker("path1", "path2", "path3")
assert checker.active_distrotest is None
checker._active_distrotest = "ATEST"
assert checker.active_distrotest == "ATEST"
assert "active_distrotest" not in checker.__dict__
@pytest.mark.parametrize("is_dict", [True, False])
def test_checker_config(patches, is_dict):
checker = DummyDistroChecker("path1", "path2", "path3")
patched = patches(
"isinstance",
"utils",
("PackagesDistroChecker.args", dict(new_callable=PropertyMock)),
prefix="envoy.distribution.verify.checker")
with patched as (m_inst, m_utils, m_args):
m_inst.return_value = is_dict
if is_dict:
assert checker.config == m_utils.from_yaml.return_value
else:
with pytest.raises(verify.PackagesConfigurationError) as e:
checker.config
assert (
m_utils.from_yaml.call_args
== [(m_args.return_value.config,), {}])
if is_dict:
assert "config" in checker.__dict__
else:
assert (
e.value.args[0]
== f"Unable to parse configuration {m_args.return_value.config}")
def test_checker_docker(patches):
checker = DummyDistroChecker("path1", "path2", "path3")
patched = patches(
"aiodocker",
prefix="envoy.distribution.verify.checker")
with patched as (m_docker, ):
assert checker.docker == m_docker.Docker.return_value
assert (
m_docker.Docker.call_args
== [(), {}])
assert "docker" in checker.__dict__
@pytest.mark.parametrize("maintainer", [None, True, "MAINT"])
def test_checker_maintainer(patches, maintainer):
checker = DummyDistroChecker("path1", "path2", "path3")
patched = patches(
("PackagesDistroChecker.args",
dict(new_callable=PropertyMock)),
prefix="envoy.distribution.verify.checker")
with patched as (m_args, ):
m_args.return_value.maintainer = maintainer
assert (
checker.maintainer
== (maintainer
if maintainer
else verify.checker.ENVOY_MAINTAINER))
assert "maintainer" not in checker.__dict__
def test_checker_path(patches):
checker = DummyDistroChecker("path1", "path2", "path3")
patched = patches(
"pathlib",
("PackagesDistroChecker.tempdir", dict(new_callable=PropertyMock)),
prefix="envoy.distribution.verify.checker")
with patched as (m_plib, m_temp):
assert checker.path == m_plib.Path.return_value
assert (
m_plib.Path.call_args
== [(m_temp.return_value.name, ), {}])
assert "path" not in checker.__dict__
def test_checker_test_config(patches):
checker = DummyDistroChecker("path1", "path2", "path3")
patched = patches(
("PackagesDistroChecker.docker",
dict(new_callable=PropertyMock)),
("PackagesDistroChecker.maintainer",
dict(new_callable=PropertyMock)),
("PackagesDistroChecker.packages_tarball",
dict(new_callable=PropertyMock)),
("PackagesDistroChecker.path",
dict(new_callable=PropertyMock)),
("PackagesDistroChecker.test_config_class",
dict(new_callable=PropertyMock)),
("PackagesDistroChecker.testfile",
dict(new_callable=PropertyMock)),
("PackagesDistroChecker.version",
dict(new_callable=PropertyMock)),
prefix="envoy.distribution.verify.checker")
with patched as patchy:
(m_docker, m_maintainer,
m_tar, m_path, m_class, m_test, m_version) = patchy
assert checker.test_config == m_class.return_value.return_value
assert (
m_class.return_value.call_args
== [(),
{'docker': m_docker.return_value,
'path': m_path.return_value,
'tarball': m_tar.return_value,
'testfile': m_test.return_value,
'maintainer': m_maintainer.return_value,
'version': str(m_version.return_value)}])
assert "test_config" in checker.__dict__
@pytest.mark.parametrize(
"config",
[{},
{f"DISTRO{i}": dict(
image="SOMEIMAGE",
ext="EXT1",
foo="FOO",
bar="BAR")
for i in range(1, 4)},
{f"DISTRO{i}": dict(
image="OTHERIMAGE",
ext="EXT2",
foo="FOO",
bar="BAR")
for i in range(1, 4)}])
@pytest.mark.parametrize(
"distributions",
[None,
[],
["DISTRO1", "DISTRO2", "DISTRO3"],
["DISTRO1", "DISTRO3"]])
def test_checker_tests(patches, config, distributions):
checker = DummyDistroChecker("path1", "path2", "path3")
patched = patches(
"PackagesDistroChecker.get_test_config",
"PackagesDistroChecker.get_test_packages",
("PackagesDistroChecker.config",
dict(new_callable=PropertyMock)),
("PackagesDistroChecker.filter_distributions",
dict(new_callable=PropertyMock)),
prefix="envoy.distribution.verify.checker")
with patched as (m_tconfig, m_pkgs, m_config, m_tests):
m_config.return_value = config.copy()
m_tests.return_value = distributions
result = checker.tests
if distributions:
config = {k: v for k, v in config.items() if k in distributions}
assert (
len(result)
== len(config)
== len(m_pkgs.call_args_list)
== len(m_tconfig.call_args_list))
for i, k in enumerate(result):
assert k == (list(config)[i])
assert result[k] == m_tconfig.return_value
assert (
m_tconfig.call_args_list
== [[(_config["image"], ), {}] for _config in config.values()])
assert (
m_tconfig.return_value.update.call_args_list
== [[(_conf,), {}] for _conf in config.values()])
assert (
m_tconfig.return_value.__getitem__.call_args_list
== [[('type',), {}], [('ext',), {}]] * len(config))
assert (
m_pkgs.call_args_list
== ([[(m_tconfig.return_value.__getitem__.return_value, ) * 2, {}]]
* len(config)))
def test_checker_version(patches):
checker = DummyDistroChecker("path1", "path2", "path3")
patched = patches(
("PackagesDistroChecker.args",
dict(new_callable=PropertyMock)),
prefix="envoy.distribution.verify.checker")
with patched as (m_args, ):
assert checker.version == m_args.return_value.version
assert "version" not in checker.__dict__
def test_checker_add_arguments():
checker = DummyDistroChecker("x", "y", "z")
parser = MagicMock()
checker.add_arguments(parser)
assert (
parser.add_argument.call_args_list
== [[('--verbosity', '-v'),
{'choices': ['debug', 'info', 'warn', 'error'],
'default': 'info',
'help': 'Application log level'}],
[('--log-level', '-l'),
{'choices': ['debug', 'info', 'warn', 'error'],
'default': 'warn',
'help': 'Log level for non-application logs'}],
[('--fix',),
{'action': 'store_true',
'default': False,
'help': 'Attempt to fix in place'}],
[('--diff',),
{'action': 'store_true',
'default': False,
'help': 'Display a diff in the console where available'}],
[('--warning', '-w'),
{'choices': ['warn', 'error'],
'default': 'warn',
'help': 'Handle warnings as warnings or errors'}],
[('--summary',),
{'action': 'store_true',
'default': False,
'help': 'Show a summary of check runs'}],
[('--summary-errors',),
{'type': int,
'default': 5,
'help': (
'Number of errors to show in the summary, -1 shows all')}],
[('--summary-warnings',),
{'type': int,
'default': 5,
'help': (
'Number of warnings to show in the summary, -1 shows all')}],
[('--check', '-c'),
{'choices': ('distros',),
'nargs': '*',
'help': (
'Specify which checks to run, '
'can be specified for multiple checks')}],
[('--config-distros',),
{'default': '',
'help': 'Custom configuration for the distros check'}],
[('--path', '-p'),
{'default': None,
'help': (
'Path to the test root (usually Envoy source dir). '
'If not specified the first path of paths is used')}],
[('paths',),
{'nargs': '*',
'help': (
'Paths to check. At least one path must be specified, '
'or the `path` argument should be provided')}],
[('testfile',),
{'help': (
'Path to the test file that will be run inside the '
'distribution containers')}],
[('version',),
{'help': 'Expected envoy version.'}],
[('config',),
{'help': (
'Path to a YAML configuration with distributions '
'for testing')}],
[('packages',),
{'help': 'Path to a tarball containing packages to test'}],
[('--distribution', '-d'),
{'nargs': '?',
'help': (
'Specify distribution to test. '
'Can be specified multiple times.')}],
[('--maintainer',
'-m'),
{'help': (
'Specify the expected maintainer of the packages. '
'Defaults to Envoy maintainers.')}],
[('--rebuild',),
{'action': 'store_true',
'help': 'Rebuild test images before running the tests.'}]])
@pytest.mark.parametrize(
"tests",
[{},
{f"DISTRO{i}": dict(image=f"IMAGE{i}")
for i in range(1, 4)}])
@pytest.mark.parametrize("rebuild", [True, False])
async def test_checker_check_distros(patches, tests, rebuild):
checker = DummyDistroChecker("path1", "path2", "path3")
patched = patches(
"PackagesDistroChecker.run_test",
("PackagesDistroChecker.log", dict(new_callable=PropertyMock)),
("PackagesDistroChecker.rebuild", dict(new_callable=PropertyMock)),
("PackagesDistroChecker.tests", dict(new_callable=PropertyMock)),
prefix="envoy.distribution.verify.checker")
_items = {}
for i, (k, v) in enumerate(tests.items()):
v["packages"] = []
for x in range(0, 3):
_mock = MagicMock()
_mock.name = f"P{i}{x}"
v["packages"].append(_mock)
_items[k] = v
with patched as (m_dtest, m_log, m_rebuild, m_tests):
m_tests.return_value.items.return_value = _items.items()
m_rebuild.return_value = rebuild
assert not await checker.check_distros()
assert (
m_log.return_value.info.call_args_list
== [[((f"[{name}] Testing with: "
f'{",".join(n.name for n in tests[name]["packages"])}'),),
{}]
for name in tests])
expected = list(
chain.from_iterable(
[[(name, tests[name]["image"], package, (i == 0 and rebuild)), {}]
for i, package in enumerate(tests[name]["packages"])]
for name in tests))
assert (
m_dtest.call_args_list
== expected)
def test_checker_get_test_config(patches):
checker = DummyDistroChecker("path1", "path2", "path3")
patched = patches(
("PackagesDistroChecker.test_config", dict(new_callable=PropertyMock)),
prefix="envoy.distribution.verify.checker")
with patched as (m_config, ):
assert (
checker.get_test_config("IMAGE")
== m_config.return_value.get_config.return_value.copy.return_value)
assert (
m_config.return_value.get_config.call_args
== [('IMAGE',), {}])
assert (
m_config.return_value.get_config.return_value.copy.call_args
== [(), {}])
def test_checker_get_test_packages(patches):
checker = DummyDistroChecker("path1", "path2", "path3")
patched = patches(
("PackagesDistroChecker.test_config", dict(new_callable=PropertyMock)),
prefix="envoy.distribution.verify.checker")
with patched as (m_config, ):
assert (
checker.get_test_packages("TYPE", "EXT")
== m_config.return_value.get_packages.return_value)
assert (
m_config.return_value.get_packages.call_args
== [('TYPE', 'EXT'), {}])
async def test_checker_on_checks_complete(patches):
checker = DummyDistroChecker("path1", "path2", "path3")
patched = patches(
"PackagesDistroChecker._cleanup_test",
"PackagesDistroChecker._cleanup_docker",
"checker.Checker.on_checks_complete",
prefix="envoy.distribution.verify.checker")
order_mock = MagicMock()
with patched as (m_test, m_docker, m_complete):
m_test.side_effect = lambda: order_mock("TEST")
m_docker.side_effect = lambda: order_mock("DOCKER")
m_complete.side_effect = lambda: (
order_mock('COMPLETE') and "COMPLETE")
assert await checker.on_checks_complete() == "COMPLETE"
assert (
order_mock.call_args_list
== [[('TEST',), {}],
[('DOCKER',), {}],
[('COMPLETE',), {}]])
for m in m_test, m_docker, m_complete:
assert (
m.call_args
== [(), {}])
@pytest.mark.parametrize("exiting", [True, False])
@pytest.mark.parametrize("errors", [None, (), ("ERR1", "ERR")])
@pytest.mark.parametrize("rebuild", [True, False])
async def test_checker_run_test(patches, exiting, errors, rebuild):
checker = DummyDistroChecker("path1", "path2", "path3")
patched = patches(
("PackagesDistroChecker.test_class", dict(new_callable=PropertyMock)),
("PackagesDistroChecker.test_config", dict(new_callable=PropertyMock)),
("PackagesDistroChecker.exiting", dict(new_callable=PropertyMock)),
("PackagesDistroChecker.log", dict(new_callable=PropertyMock)),
prefix="envoy.distribution.verify.checker")
with patched as (m_test, m_config, m_exit, m_log):
m_exit.return_value = exiting
m_test.return_value.return_value.run = AsyncMock(
return_value=errors)
assert not await checker.run_test("NAME", "IMAGE", "PACKAGE", rebuild)
if exiting:
assert not m_log.called
assert not m_test.called
assert not checker._active_distrotest
return
assert (
checker._active_distrotest
== m_test.return_value.return_value)
assert (
m_log.return_value.info.call_args
== [('[NAME] Testing package: PACKAGE',), {}])
assert (
m_test.return_value.call_args
== [(checker, m_config.return_value,
'NAME',
'IMAGE',
'PACKAGE'),
{"rebuild": rebuild}])
@pytest.mark.parametrize("exists", [True, False])
async def test_checker__cleanup_docker(patches, exists):
checker = DummyDistroChecker("path1", "path2", "path3")
patched = patches(
("PackagesDistroChecker.docker", dict(new_callable=PropertyMock)),
prefix="envoy.distribution.verify.checker")
if exists:
checker.__dict__["docker"] = "DOCKER"
with patched as (m_docker, ):
m_docker.return_value.close = AsyncMock()
await checker._cleanup_docker()
assert "docker" not in checker.__dict__
if not exists:
assert not m_docker.return_value.close.called
return
assert (
m_docker.return_value.close.call_args
== [(), {}])
@pytest.mark.parametrize("exists", [True, False])
async def test_checker__cleanup_test(patches, exists):
checker = DummyDistroChecker("path1", "path2", "path3")
patched = patches(
("PackagesDistroChecker.active_distrotest",
dict(new_callable=PropertyMock)),
prefix="envoy.distribution.verify.checker")
with patched as (m_active, ):
if not exists:
m_active.return_value = None
else:
m_active.return_value.cleanup = AsyncMock()
await checker._cleanup_test()
if not exists:
return
assert (
m_active.return_value.cleanup.call_args
== [(), {}])
| 2 | 2 |
intermediate_class_notes/src/dayone/map_demo.py | kmad1729/python_notes | 0 | 12759248 | <filename>intermediate_class_notes/src/dayone/map_demo.py<gh_stars>0
'simple demo of using map to create instances of objects'
class Dog:
def __init__(self, name):
self.name = name
def bark(self):
print("Woof! %s is barking!" % self.name)
dogs = list(map(Dog, ['rex', 'rover', 'ranger']))
| 4.03125 | 4 |
maad/rois/rois_2d.py | scikit-maad/scikit-maad | 31 | 12759249 | #!/usr/bin/env python
"""
Segmentation methods to find regions of interest in the time and frequency domain.
"""
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: New BSD License
# =============================================================================
# Load the modules
# =============================================================================
# Import external modules
import numpy as np
from scipy.stats import iqr
from skimage import measure
import pandas as pd
import sys
_MIN_ = sys.float_info.min
# Import internal modules
from maad.util import (plot2d, rand_cmap)
#%%
#****************************************************************************
# private functions
#****************************************************************************
def _double_threshold_rel (im, bin_std=6, bin_per=0.5,
verbose=False, display=False, savefig=None, **kwargs):
"""
Binarize an image based on a double relative threshold.
The values used for the thresholding depends on the values found in the
image. => relative threshold
Parameters
----------
im : 2d ndarray of scalars
Spectrogram (or image)
bin_std : scalar, optional, default is 6
Set the first threshold. This threshold is not an absolute value but
depends on values that are similar to 75th percentile (pseudo_mean) and
a sort of std value of the image.
threshold1 = "pseudo_mean" + "std" * bin_std
Value higher than threshold1 are set to 1, they are the seeds for
the second step. The others are set to 0.
bin_per: scalar, optional, defautl is 0.5
Set how much the second threshold is lower than the first
threshold value. From 0 to 1. ex: 0.1 = 10 %.
threshold2 = threshold1 (1-bin_per)
Value higher than threshold2 and connected (directly or not) to the
seeds are set to 1, the other remains 0
verbose : boolean, optional, default is False
print messages
display : boolean, optional, default is False
Display the signal if True
savefig : string, optional, default is None
Root filename (with full path) is required to save the figures. Postfix
is added to the root filename.
\*\*kwargs, optional. This parameter is used by plt.plot and savefig functions
- savefilename : str, optional, default :'_spectro_after_noise_subtraction.png'
Postfix of the figure filename
- figsize : tuple of integers, optional, default: (4,10)
width, height in inches.
- title : string, optional, default : 'Spectrogram'
title of the figure
- xlabel : string, optional, default : 'Time [s]'
label of the horizontal axis
- ylabel : string, optional, default : 'Amplitude [AU]'
label of the vertical axis
- cmap : string or Colormap object, optional, default is 'gray'
See https://matplotlib.org/examples/color/colormaps_reference.html
in order to get all the existing colormaps
examples: 'hsv', 'hot', 'bone', 'tab20c', 'jet', 'seismic',
'viridis'...
- vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with norm to normalize
luminance data. Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.
- extent : scalars (left, right, bottom, top), optional, default: None
The location, in data-coordinates, of the lower-left and
upper-right corners. If `None`, the image is positioned such that
the pixel centers fall on zero-based (row, column) indices.
- dpi : integer, optional, default is 96
Dot per inch.
For printed version, choose high dpi (i.e. dpi=300) => slow
For screen version, choose low dpi (i.e. dpi=96) => fast
- format : string, optional, default is 'png'
Format to save the figure
... and more, see matplotlib
Returns
-------
im_out: binary image
References
----------
.. [1] from MATLAB: Threshold estimation (from Oliveira et al, 2015)
Adapted by <NAME> Dec 12, 2017
"""
# test if im is full of zeros
if not im.any() :
im_out = np.zeros(im.shape)
else:
# Compute the qth percentile of the data along the specified axis
val1 = np.percentile(im[np.where(im>0)],75) # value corresponding to the limit between the 75% lowest value and 25% largest value
# The interquartile range (IQR) is the difference between the 75th and
# 25th percentile of the data. It is a measure of the dispersion similar
# to standard deviation or variance, but is much more robust against outliers
val2 = iqr(im[np.where(im>0)])*bin_std
# Threshold : qth percentile + sort of std
h_th = val1 + val2
# Low threshold limit
l_th = (h_th-h_th*bin_per)
if verbose :
print(72 * '_')
print('Double thresholding with values relative to the image...')
print ('**********************************************************')
print (' high threshold value %.2f | low threshold value %.2f' % (h_th, l_th))
print ('**********************************************************')
# binarisation
im_t1 = im > h_th # mask1
im_t2 = im > l_th # mask2
im_t3 = im * im_t1 # selected parts of the image
#find index of regions which meet the criteria
conncomp_t2 = measure.label(im_t2) #Find connected components in binary image
rprops = measure.regionprops(conncomp_t2,im_t3)
rprops_mean_intensity = [region['mean_intensity'] for region in rprops]
rprops_mean_intensity = np.asarray(rprops_mean_intensity)
rprops_label = [region['label'] for region in rprops]
rprops_label = np.asarray(rprops_label)
[ind]=np.where(rprops_mean_intensity>0)
im_out = np.isin(conncomp_t2, rprops_label[ind]) # test if the indice is in the matrix of indices
im_out =im_out*1 # boolean to 0,1 conversion
# Display
if display :
ylabel =kwargs.pop('ylabel','Frequency [Hz]')
xlabel =kwargs.pop('xlabel','Time [sec]')
title =kwargs.pop('title','binary image => MASK')
cmap =kwargs.pop('cmap','gray')
vmin=kwargs.pop('vmin',0)
vmax=kwargs.pop('vmax',1)
extent=kwargs.pop('extent',None)
if extent is None :
xlabel = 'pseudotime [points]'
ylabel = 'pseudofrequency [points]'
_, fig = plot2d (im_out,
extent = extent,
title = title,
ylabel = ylabel,
xlabel = xlabel,
vmin = vmin,
vmax = vmax,
cmap = cmap,
**kwargs)
# SAVE FIGURE
if savefig is not None :
dpi =kwargs.pop('dpi',96)
format=kwargs.pop('format','png')
filename=kwargs.pop('filename','_spectro_binary')
filename = savefig+filename+'.'+format
if verbose :
print('\n''save figure : %s' %filename)
fig.savefig(filename, bbox_inches='tight', dpi=dpi, format=format,
**kwargs)
return im_out
#%%
def _double_threshold_abs(im, bin_h=0.7, bin_l=0.2,
verbose=False,display=False, savefig=None, **kwargs):
"""
Binarize an image based on a double relative threshold.
The values used for the thresholding are independent of the values in the
image => absolute threshold
Parameters
----------
im : 2d ndarray of scalars
Spectrogram (or image)
bin_h : scalar, optional, default is 0.7
Set the first threshold. Value higher than this value are set to 1,
the others are set to 0. They are the seeds for the second step
bin_l: scalar, optional, defautl is 0.2
Set the second threshold. Value higher than this value and connected
to the seeds or to other pixels connected to the seeds are set to 1,
the other remains 0
verbose : boolean, optional, default is False
print messages
display : boolean, optional, default is False
Display the signal if True
savefig : string, optional, default is None
Root filename (with full path) is required to save the figures. Postfix
is added to the root filename.
\*\*kwargs, optional. This parameter is used by plt.plot and savefig functions
- savefilename : str, optional, default :'_spectro_after_noise_subtraction.png'
Postfix of the figure filename
- figsize : tuple of integers, optional, default: (4,10)
width, height in inches.
- title : string, optional, default : 'Spectrogram'
title of the figure
- xlabel : string, optional, default : 'Time [s]'
label of the horizontal axis
- ylabel : string, optional, default : 'Amplitude [AU]'
label of the vertical axis
- cmap : string or Colormap object, optional, default is 'gray'
See https://matplotlib.org/examples/color/colormaps_reference.html
in order to get all the existing colormaps
examples: 'hsv', 'hot', 'bone', 'tab20c', 'jet', 'seismic',
'viridis'...
- vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with norm to normalize
luminance data. Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.
- extent : scalars (left, right, bottom, top), optional, default: None
The location, in data-coordinates, of the lower-left and
upper-right corners. If `None`, the image is positioned such that
the pixel centers fall on zero-based (row, column) indices.
- dpi : integer, optional, default is 96
Dot per inch.
For printed version, choose high dpi (i.e. dpi=300) => slow
For screen version, choose low dpi (i.e. dpi=96) => fast
- format : string, optional, default is 'png'
Format to save the figure
... and more, see matplotlib
Returns
-------
im_out: binary image
References
----------
.. [1] <NAME>. A computational approach to edge detection. IEEE Transactions on Pattern Analysis and Machine Intelligence. 1986; vol. 8, pp.679-698. `DOI: 10.1109/TPAMI.1986.4767851 <https://doi.org/10.1109/TPAMI.1986.4767851>`_
"""
# binarisation
im_t1 = im > bin_h # mask1
im_t2 = im > bin_l # mask2
im_t3 = im * im_t1 # selected parts of the image
#find index of regions which meet the criteria
conncomp_t2 = measure.label(im_t2) #Find connected components in binary image
rprops = measure.regionprops(conncomp_t2,im_t3)
rprops_mean_intensity = [region['mean_intensity'] for region in rprops]
rprops_mean_intensity = np.asarray(rprops_mean_intensity)
rprops_label = [region['label'] for region in rprops]
rprops_label = np.asarray(rprops_label)
[ind]=np.where(rprops_mean_intensity>0)
im_out = np.isin(conncomp_t2, rprops_label[ind]) # test if the indice is in the maxtrix of indices
im_out =im_out*1 # boolean to 0,1 conversion
if verbose :
print(72 * '_')
print('Double thresholding with absolute values...')
print ('**********************************************************')
print (' Number of rois %.2f | Rois cover %.2f%' % (len(rprops_label),
sum(im_out)/(im_out.shape[1]*im_out.shape[0])*100))
print ('**********************************************************')
# Display
if display :
ylabel =kwargs.pop('ylabel','Frequency [Hz]')
xlabel =kwargs.pop('xlabel','Time [sec]')
title =kwargs.pop('title','binary image => MASK')
cmap =kwargs.pop('cmap','gray')
vmin=kwargs.pop('vmin',0)
vmax=kwargs.pop('vmax',1)
extent=kwargs.pop('extent',None)
if extent is None :
xlabel = 'pseudotime [points]'
ylabel = 'pseudofrequency [points]'
_, fig = plot2d (im_out,
extent = extent,
title = title,
ylabel = ylabel,
xlabel = xlabel,
vmin = vmin,
vmax = vmax,
cmap = cmap,
**kwargs)
# SAVE FIGURE
if savefig is not None :
dpi =kwargs.pop('dpi',96)
format=kwargs.pop('format','png')
filename=kwargs.pop('filename','_spectro_binary')
filename = savefig+filename+'.'+format
if verbose :
print('\n''save figure : %s' %filename)
fig.savefig(filename, bbox_inches='tight', dpi=dpi, format=format,
**kwargs)
return im_out
#%%
# =============================================================================
# public functions
# =============================================================================
def create_mask(im, mode_bin = 'relative',
verbose= False, display = False, savefig = None, **kwargs):
"""
Binarize an image based on a double threshold.
Parameters
----------
im : 2d ndarray of scalars
Spectrogram (or image)
mode_bin : string in {'relative', 'absolute'}, optional, default is 'relative'
if 'absolute' [1]_ , a double threshold with absolute value is performed
with two parameters (see \*\*kwargs section)
if 'relative' [2]_, a relative double threshold is performed with two
parameters (see \*\*kwargs section)
verbose : boolean, optional, default is False
print messages
display : boolean, optional, default is False
Display the signal if True
savefig : string, optional, default is None
Root filename (with full path) is required to save the figures. Postfix
is added to the root filename.
\*\*kwargs, optional. This parameter is used by the maad functions as well
as the plt.plot and savefig functions.
All the input arguments required or optional in the signature of the
functions above can be passed as kwargs :
if 'absolute' [1]_
- bin_h : scalar, optional, default is 0.7
Set the first threshold. Value higher than this value are set to 1,
the others are set to 0. They are the seeds for the second step
- bin_l: scalar, optional, defautl is 0.2
Set the second threshold. Value higher than this value and connected
to the seeds or to other pixels connected to the seeds (6-connectivity)
are set to 1, the other remains 0
if 'relative' [2]_ :
- bin_std : scalar, optional, default is 6
bin_std is needed to compute the threshold1.
This threshold is not an absolute value but depends on values that are
similar to 75th percentile (pseudo_mean) and a sort of std value of
the image.
threshold1 = "pseudo_mean" + "std" * bin_std
Value higher than threshold1 are set to 1, they are the seeds for
the second step. The others are set to 0.
- bin_per: scalar, optional, defautl is 0.5
Set how much the second threshold is lower than the first
threshold value. From 0 to 1. ex: 0.1 = 10 %.
threshold2 = threshold1 (1-bin_per)
Value higher than threshold2 and connected (6-connectivity) to the
seeds are set to 1, the other remains 0
... and more, see matplotlib
Returns
-------
im_bin: binary image
References
----------
.. [1] <NAME>. A computational approach to edge detection. IEEE Transactions on Pattern Analysis and Machine Intelligence. 1986; vol. 8, pp.679-698. `DOI: 10.1109/TPAMI.1986.4767851 <https://doi.org/10.1109/TPAMI.1986.4767851>`_
.. [2] from MATLAB: Threshold estimation (Oliveira et al, 2015)
Examples
--------
Load audio recording and convert it into spectrogram
>>> s, fs = maad.sound.load('../data/cold_forest_daylight.wav')
>>> Sxx,tn,fn,ext = maad.sound.spectrogram (s, fs, fcrop=(0,10000))
Convert linear spectrogram into dB
>>> Sxx_dB = maad.util.power2dB(Sxx) +96
Smooth the spectrogram
>>> Sxx_dB_blurred = maad.rois.smooth(Sxx_dB)
Detection of the acoustic signature => creation of a mask
>>> im_bin = maad.rois.create_mask(Sxx_dB_blurred, bin_std=1.5, bin_per=0.25, mode='relative')
Plot spectrograms
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(2, 1)
>>> maad.util.plot2d(Sxx_dB, ax=ax1, extent=ext, title='original', vmin=10, vmax=70)
>>> maad.util.plot2d(im_bin, ax=ax2, extent=ext, title='mask)')
>>> fig.set_size_inches(13,8)
>>> fig.tight_layout()
"""
if mode_bin == 'relative':
bin_std=kwargs.pop('bin_std', 6)
bin_per=kwargs.pop('bin_per', 0.5)
im_bin = _double_threshold_rel(im, bin_std, bin_per,
verbose, display, savefig, **kwargs)
elif mode_bin == 'absolute':
bin_h=kwargs.pop('bin_h', 0.7)
bin_l=kwargs.pop('bin_l', 0.3)
im_bin = _double_threshold_abs(im, bin_h, bin_l,
verbose, display, savefig, **kwargs)
return im_bin
#%%
def select_rois(im_bin, min_roi=None ,max_roi=None,
verbose=False, display=False, savefig = None, **kwargs):
"""
Select regions of interest based on its dimensions.
The input is a binary mask, and the output is an image with labelled pixels.
Parameters
----------
im : 2d ndarray of scalars
Spectrogram (or image)
min_roi, max_roi : scalars, optional, default : None
Define the minimum and the maximum area possible for an ROI. If None,
the minimum ROI area is 1 pixel and the maximum ROI area is the area of
the image
verbose : boolean, optional, default is False
print messages
display : boolean, optional, default is False
Display the signal if True
savefig : string, optional, default is None
Root filename (with full path) is required to save the figures. Postfix
is added to the root filename.
\*\*kwargs, optional. This parameter is used by plt.plot and savefig functions
- savefilename : str, optional, default :'_spectro_after_noise_subtraction.png'
Postfix of the figure filename
- figsize : tuple of integers, optional, default: (4,10)
width, height in inches.
- title : string, optional, default : 'Spectrogram'
title of the figure
- xlabel : string, optional, default : 'Time [s]'
label of the horizontal axis
- ylabel : string, optional, default : 'Amplitude [AU]'
label of the vertical axis
- cmap : string or Colormap object, optional, default is 'gray'
See https://matplotlib.org/examples/color/colormaps_reference.html
in order to get all the existing colormaps
examples: 'hsv', 'hot', 'bone', 'tab20c', 'jet', 'seismic',
'viridis'...
- vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with norm to normalize
luminance data. Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.
- extent : scalars (left, right, bottom, top), optional, default: None
The location, in data-coordinates, of the lower-left and
upper-right corners. If `None`, the image is positioned such that
the pixel centers fall on zero-based (row, column) indices.
- dpi : integer, optional, default is 96
Dot per inch.
For printed version, choose high dpi (i.e. dpi=300) => slow
For screen version, choose low dpi (i.e. dpi=96) => fast
- format : string, optional, default is 'png'
Format to save the figure
... and more, see matplotlib
Returns
-------
im_rois: 2d ndarray
image with labels as values
rois: pandas DataFrame
Regions of interest with future descriptors will be computed.
Array have column names: ``labelID``, ``label``, ``min_y``, ``min_x``,
``max_y``, ``max_x``,
Use the function ``maad.util.format_features`` before using
centroid_features to format of the ``rois`` DataFrame
correctly.
Examples
--------
Load audio recording compute the spectrogram in dB.
>>> s, fs = maad.sound.load('../data/cold_forest_daylight.wav')
>>> Sxx,tn,fn,ext = maad.sound.spectrogram (s, fs, fcrop=(0,20000), display=True)
>>> Sxx_dB = maad.util.power2dB(Sxx) +96
Smooth the spectrogram
>>> Sxx_dB_blurred = maad.sound.smooth(Sxx_dB)
Using image binarization, detect isolated region in the time-frequency domain with high density of energy, i.e. regions of interest (ROIs).
>>> im_bin = maad.rois.create_mask(Sxx_dB_blurred, bin_std=1.5, bin_per=0.5, mode='relative')
Select ROIs from the binary mask.
>>> im_rois, df_rois = maad.rois.select_rois(im_bin, display=True)
We detected the background noise as a ROI, and that multiple ROIs are mixed in a single region. To have better results, it is adviced to preprocess the spectrogram to remove the background noise before creating the mask.
>>> Sxx_noNoise = maad.sound.median_equalizer(Sxx)
>>> Sxx_noNoise_dB = maad.util.power2dB(Sxx_noNoise)
>>> Sxx_noNoise_dB_blurred = maad.sound.smooth(Sxx_noNoise_dB)
>>> im_bin2 = maad.rois.create_mask(Sxx_noNoise_dB_blurred, bin_std=6, bin_per=0.5, mode='relative')
>>> im_rois2, df_rois2 = maad.rois.select_rois(im_bin2, display=True)
"""
# test if max_roi and min_roi are defined
if max_roi is None:
# the maximum ROI is set to the aera of the image
max_roi=im_bin.shape[0]*im_bin.shape[1]
if min_roi is None:
# the min ROI area is set to 1 pixel
min_roi = 1
if verbose :
print(72 * '_')
print('Automatic ROIs selection in progress...')
print ('**********************************************************')
print (' Min ROI area %d pix² | Max ROI area %d pix²' % (min_roi, max_roi))
print ('**********************************************************')
labels = measure.label(im_bin) #Find connected components in binary image
rprops = measure.regionprops(labels)
rois_bbox = []
rois_label = []
for roi in rprops:
# select the rois depending on their size
if (roi.area >= min_roi) & (roi.area <= max_roi):
# get the label
rois_label.append(roi.label)
# get rectangle coordonates
rois_bbox.append (roi.bbox)
im_rois = np.isin(labels, rois_label) # test if the indice is in the matrix of indices
im_rois = im_rois* labels
# create a list with labelID and labelName (None in this case)
rois_label = list(zip(rois_label,['unknown']*len(rois_label)))
# test if there is a roi
if len(rois_label)>0 :
# create a dataframe rois containing the coordonates and the label
rois = np.concatenate((np.asarray(rois_label), np.asarray(rois_bbox)), axis=1)
rois = pd.DataFrame(rois, columns = ['labelID', 'label', 'min_y','min_x','max_y', 'max_x'])
# force type to integer
rois = rois.astype({'label': str,'min_y':int,'min_x':int,'max_y':int, 'max_x':int})
# compensate half-open interval of bbox from skimage
rois.max_y -= 1
rois.max_x -= 1
else :
rois = []
rois = pd.DataFrame(rois, columns = ['labelID', 'label', 'min_y','min_x','max_y', 'max_x'])
rois = rois.astype({'label': str,'min_y':int,'min_x':int,'max_y':int, 'max_x':int})
# Display
if display :
ylabel =kwargs.pop('ylabel','Frequency [Hz]')
xlabel =kwargs.pop('xlabel','Time [sec]')
title =kwargs.pop('title','Selected ROIs')
extent=kwargs.pop('extent',None)
if extent is None :
xlabel = 'pseudotime [points]'
ylabel = 'pseudofrequency [points]'
# randcmap = rand_cmap(len(rois_label))
# cmap =kwargs.pop('cmap',randcmap)
cmap =kwargs.pop('cmap','tab20')
_, fig = plot2d (im_rois,
extent = extent,
title = title,
ylabel = ylabel,
xlabel = xlabel,
cmap = cmap,
**kwargs)
# SAVE FIGURE
if savefig is not None :
dpi =kwargs.pop('dpi',96)
format=kwargs.pop('format','png')
filename=kwargs.pop('filename','_spectro_selectrois')
filename = savefig+filename+'.'+format
fig.savefig(filename, bbox_inches='tight', dpi=dpi, format=format,
**kwargs)
return im_rois, rois
#%%
def rois_to_imblobs(im_zeros, rois):
"""
Take a matrix full of zeros and add ones in delimited regions defined by rois.
Parameters
----------
im_zeros : ndarray
matrix full of zeros with the size to the image where the rois come from.
rois : DataFrame
rois must have the columns names:((min_y, min_x, max_y, max_x) which
correspond to the bounding box coordinates
Returns
-------
im_blobs : ndarray
matrix with 1 corresponding to the rois and 0 elsewhere
Examples
--------
>>> from maad import rois, util
>>> import pandas as pd
>>> import numpy as np
>>> im_zeros = np.zeros((100,300))
>>> df_rois = pd.DataFrame({'min_y': [10, 40], 'min_x': [10, 200], 'max_y': [60, 80], 'max_x': [110, 250]})
>>> im_blobs = rois.rois_to_imblobs(im_zeros, df_rois)
>>> util.plot2d(im_blobs)
"""
# Check format of the input data
if type(rois) is not pd.core.frame.DataFrame :
raise TypeError('Rois must be of type pandas DataFrame')
if not(('min_y' and 'min_x' and 'max_y' and 'max_x') in rois) :
raise TypeError('Array must be a Pandas DataFrame with column names:((min_y, min_x, max_y, max_x). Check example in documentation.')
# select the columns
rois_bbox = rois[['min_y', 'min_x', 'max_y', 'max_x']]
# roi to image blob
for min_y, min_x, max_y, max_x in rois_bbox.values:
im_zeros[int(min_y):int(max_y+1), int(min_x):int(max_x+1)] = 1
im_blobs = im_zeros.astype(int)
return im_blobs
| 2.796875 | 3 |
StackApp/env/lib/python2.7/site-packages/blueprint/frontend/chef.py | jonathanmusila/StackOverflow-Lite | 0 | 12759250 | <gh_stars>0
"""
Chef code generator.
"""
import base64
import codecs
import errno
import logging
import os
import os.path
import re
import tarfile
from blueprint import util
from blueprint import walk
def chef(b, relaxed=False):
"""
Generate Chef code.
"""
c = Cookbook(b.name, comment=b.DISCLAIMER)
def source(dirname, filename, gen_content, url):
"""
Create a cookbook_file and execute resource to fetch and extract
a source tarball.
"""
pathname = os.path.join('/tmp', filename)
if url is not None:
c.execute('curl -o "{0}" "{1}" || wget -O "{0}" "{1}"'.
format(pathname, url),
creates=pathname)
elif gen_content is not None:
c.file(pathname,
gen_content(),
owner='root',
group='root',
mode='0644',
backup=False,
source=pathname[1:])
if '.zip' == pathname[-4:]:
c.execute('{0}'.format(pathname),
command='unzip "{0}"'.format(pathname),
cwd=dirname)
else:
c.execute('{0}'.format(pathname),
command='tar xf "{0}"'.format(pathname),
cwd=dirname)
def file(pathname, f):
"""
Create a cookbook_file resource.
"""
if 'template' in f:
logging.warning('file template {0} won\'t appear in generated '
'Chef cookbooks'.format(pathname))
return
c.directory(os.path.dirname(pathname),
group='root',
mode='0755',
owner='root',
recursive=True)
if '120000' == f['mode'] or '120777' == f['mode']:
c.link(pathname,
group=f['group'],
owner=f['owner'],
to=f['content'])
return
if 'source' in f:
c.remote_file(pathname,
backup=False,
group=f['group'],
mode=f['mode'][-4:],
owner=f['owner'],
source=f['source'])
else:
content = f['content']
if 'base64' == f['encoding']:
content = base64.b64decode(content)
c.file(pathname,
content,
backup=False,
group=f['group'],
mode=f['mode'][-4:],
owner=f['owner'],
source=pathname[1:])
def before_packages(manager):
"""
Create execute resources to configure the package managers.
"""
packages = b.packages.get(manager, [])
if 0 == len(packages):
return
if 1 == len(packages) and manager in packages:
return
if 'apt' == manager:
c.execute('apt-get -q update')
elif 'yum' == manager:
c.execute('yum makecache')
def package(manager, package, version):
"""
Create a package resource.
"""
if manager == package:
return
if manager in ('apt', 'yum'):
if relaxed or version is None:
c.package(package)
else:
c.package(package, version=version)
# See comments on this section in `puppet` above.
match = re.match(r'^rubygems(\d+\.\d+(?:\.\d+)?)$', package)
if match is not None and util.rubygems_update():
c.execute('/usr/bin/gem{0} install --no-rdoc --no-ri ' # No ,
'rubygems-update'.format(match.group(1)))
c.execute('/usr/bin/ruby{0} ' # No ,
'$(PATH=$PATH:/var/lib/gems/{0}/bin ' # No ,
'which update_rubygems)"'.format(match.group(1)))
if 'nodejs' == package:
c.execute('{ ' # No ,
'curl http://npmjs.org/install.sh || ' # No ,
'wget -O- http://npmjs.org/install.sh ' # No ,
'} | sh',
creates='/usr/bin/npm')
# AWS cfn-init templates may specify RPMs to be installed from URLs,
# which are specified as versions.
elif 'rpm' == manager:
c.rpm_package(package, source=version)
# All types of gems get to have package resources.
elif 'rubygems' == manager:
if relaxed or version is None:
c.gem_package(package)
else:
c.gem_package(package, version=version)
elif re.search(r'ruby', manager) is not None:
match = re.match(r'^ruby(?:gems)?(\d+\.\d+(?:\.\d+)?)',
manager)
if relaxed or version is None:
c.gem_package(package,
gem_binary='/usr/bin/gem{0}'.format(match.group(1)))
else:
c.gem_package(package,
gem_binary='/usr/bin/gem{0}'.format(match.group(1)),
version=version)
# Everything else is an execute resource.
else:
c.execute(manager(package, version, relaxed))
def service(manager, service):
"""
Create a service resource and subscribe to its dependencies.
"""
# Transform dependency list into a subscribes attribute.
# TODO Breaks inlining.
subscribe = []
def service_file(m, s, pathname):
f = b.files[pathname]
if '120000' == f['mode'] or '120777' == f['mode']:
subscribe.append('link[{0}]'.format(pathname))
else:
subscribe.append('cookbook_file[{0}]'.format(pathname))
walk.walk_service_files(b, manager, service, service_file=service_file)
def service_package(m, s, pm, package):
subscribe.append('package[{0}]'.format(package))
walk.walk_service_packages(b,
manager,
service,
service_package=service_package)
def service_source(m, s, dirname):
subscribe.append('execute[{0}]'.format(b.sources[dirname]))
walk.walk_service_sources(b,
manager,
service,
service_source=service_source)
subscribe = util.BareString('resources(' \
+ ', '.join([repr(s) for s in subscribe]) + ')')
kwargs = {'action': [[':enable', ':start']],
'subscribes': [':restart', subscribe]}
if 'upstart' == manager:
kwargs['provider'] = util.BareString(
'Chef::Provider::Service::Upstart')
c.service(service, **kwargs)
b.walk(source=source,
file=file,
before_packages=before_packages,
package=package,
service=service)
return c
class Cookbook(object):
"""
A cookbook is a collection of Chef resources plus the files and other
supporting objects needed to run it.
"""
def __init__(self, name, comment=None):
"""
"""
if name is None:
self.name = 'blueprint-generated-chef-cookbook'
else:
self.name = str(name)
self.comment = comment
self.resources = []
self.files = {}
def add(self, resource):
"""
Resources must be added in the order they're expected to run.
Chef does not support managing dependencies explicitly.
"""
self.resources.append(resource)
def directory(self, name, **kwargs):
"""
Create a directory resource.
"""
self.add(Resource('directory', name, **kwargs))
def link(self, name, **kwargs):
"""
Create a (symbolic) link resource.
"""
self.add(Resource('link', name, **kwargs))
def file(self, name, content, **kwargs):
"""
Create a file or cookbook_file resource depending on whether the
cookbook is dumped to a string or to files.
"""
self.add(File(name, content, **kwargs))
def remote_file(self, name, **kwargs):
"""
Create a remote_file resource.
"""
self.add(Resource('remote_file', name, **kwargs))
def package(self, name, **kwargs):
"""
Create a package resource provided by the default provider.
"""
self.add(Resource('package', name, **kwargs))
def rpm_package(self, name, **kwargs):
"""
Create a package resource provided by RPM.
"""
self.add(Resource('rpm_package', name, **kwargs))
def gem_package(self, name, **kwargs):
"""
Create a package resource provided by RubyGems.
"""
self.add(Resource('gem_package', name, **kwargs))
def execute(self, name, **kwargs):
"""
Create an execute resource.
"""
self.add(Resource('execute', name, **kwargs))
def service(self, name, **kwargs):
"""
Create a service resource.
"""
self.add(Resource('service', name, **kwargs))
def _dump(self, w, inline=False):
"""
Generate Chef code. This will call the callable `w` with each
line of output. `dumps` and `dumpf` use this to append to a list
and write to a file with the same code.
If present, a comment is written first. Next, resources are written
in the order they were added to the recipe.
"""
if self.comment is not None:
w(self.comment)
for resource in self.resources:
w(resource.dumps(inline))
def dumps(self):
"""
Generate a string containing Chef code and all file contents.
"""
out = []
return ''.join(out)
def dumpf(self, gzip=False):
"""
Generate files containing Chef code and templates. The directory
structure generated is that of a cookbook with a default recipe and
cookbook files.
"""
os.mkdir(self.name)
f = codecs.open(os.path.join(self.name, 'metadata.rb'), 'w', encoding='utf-8')
f.close()
os.mkdir(os.path.join(self.name, 'recipes'))
filename = os.path.join(self.name, 'recipes/default.rb')
f = codecs.open(filename, 'w', encoding='utf-8')
self._dump(f.write, inline=False)
f.close()
for resource in self.resources:
if 'cookbook_file' != resource.type:
continue
pathname = os.path.join(self.name, 'files/default',
resource.name[1:])
try:
os.makedirs(os.path.dirname(pathname))
except OSError as e:
if errno.EEXIST != e.errno:
raise e
if isinstance(resource.content, unicode):
f = codecs.open(pathname, 'w', encoding='utf-8')
else:
f = open(pathname, 'w')
f.write(resource.content)
f.close()
if gzip:
filename = 'chef-{0}.tar.gz'.format(self.name)
tarball = tarfile.open(filename, 'w:gz')
tarball.add(self.name)
tarball.close()
return filename
return filename
class Resource(dict):
"""
A Chef resource has a type, a name, and some parameters. Nothing has
to be unique as resources are dealt with in order rather than by building
a dependency graph.
"""
def __init__(self, type, name, **kwargs):
"""
Don't instantiate this class directly. Instead, use the methods made
available in the Cookbook class.
"""
super(Resource, self).__init__(**kwargs)
self.type = type
self.name = name
@classmethod
def _dumps(cls, value, recursive=False):
"""
Return a value as it should be written. If the value starts with
a ':', it will be written as-is. Otherwise, it will be written as
a string.
"""
if value is None:
return 'nil'
elif True == value:
return 'true'
elif False == value:
return 'false'
elif any([isinstance(value, t) for t in (int, long, float)]):
return value
elif 1 < len(value) and ':' == value[0]:
return value
elif hasattr(value, 'bare') or isinstance(value, util.BareString):
return value
elif isinstance(value, cls):
return repr(value)
elif isinstance(value, list) or isinstance(value, tuple):
s = ', '.join([cls._dumps(v, True) for v in value])
if recursive:
return '[' + s + ']'
else:
return s
return repr(unicode(value).replace(u'#{', u'\\#{'))[1:]
def dumps(self, inline=False):
"""
Stringify differently depending on the number of options so the
output always looks like Ruby code should look. Parentheses are
always employed here due to grammatical inconsistencies when using
braces surrounding a block.
"""
if 0 == len(self):
return u'{0}({1})\n'.format(self.type, self._dumps(self.name))
elif 1 == len(self):
key, value = self.items()[0]
return u'{0}({1}) {{ {2} {3} }}\n'.format(self.type,
self._dumps(self.name),
key,
self._dumps(value))
else:
out = [u'{0}({1}) do\n'.format(self.type, self._dumps(self.name))]
for key, value in sorted(self.iteritems()):
out.append(u' {0} {1}\n'.format(key, self._dumps(value)))
out.append('end\n')
return ''.join(out)
class File(Resource):
"""
Special Chef file or cookbook_file resource.
"""
def __init__(self, name, content=None, **kwargs):
"""
File resources handle their content explicitly because in some
cases it is not written as a normal parameter.
"""
super(File, self).__init__('file', name, **kwargs)
self.content = content
def dumps(self, inline=False):
"""
Decide whether to write as a file with content or a cookbook_file
that leaves its content to be dealt with later.
"""
if inline:
if self.content is not None:
self['content'] = self.content
del self.content
self.type = 'file'
del self['source']
elif self.content is not None and 'source' in self:
self.type = 'cookbook_file'
return super(File, self).dumps(inline)
| 2.609375 | 3 |