hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e38cacb5a536701b8b88de3d26c0ae5970c260cf | 26,511 | py | Python | summarize_from_feedback/query_response_model.py | lumaway/summarize-from-feedback | 065ea4e1607a5822a3d78cc13a8cec63a2919c1b | [
"MIT"
] | 365 | 2020-09-04T15:50:14.000Z | 2022-03-31T04:54:32.000Z | summarize_from_feedback/query_response_model.py | ArturTtoptal/summarize-from-feedback | 56b6bb613a1b58a8aa7a5e29266f65c7b980ee48 | [
"CC-BY-4.0"
] | 14 | 2020-09-06T14:52:29.000Z | 2022-02-18T08:05:06.000Z | summarize_from_feedback/query_response_model.py | ArturTtoptal/summarize-from-feedback | 56b6bb613a1b58a8aa7a5e29266f65c7b980ee48 | [
"CC-BY-4.0"
] | 60 | 2020-09-04T16:33:48.000Z | 2022-01-28T19:01:42.000Z | import json
import os
import re
import shutil
import uuid
from dataclasses import dataclass, field
from typing import Callable, Optional, Set, List, Dict
import blobfile as bf
import numpy as np
import torch
from mpi4py import MPI
import summarize_from_feedback
from summarize_from_feedback.model_layout import ModelLayout
from summarize_from_feedback.models import sample_fns
from summarize_from_feedback.models.loss_functions import softmax_xent_loss_fn
from summarize_from_feedback.models.transformer import Hyperparams
from summarize_from_feedback.models.transformer import build_with_random_weights
from summarize_from_feedback.utils import exact_div, hyperparams
from summarize_from_feedback.utils import blobs
from summarize_from_feedback.utils.dist_utils import (
setup_cuda_device_and_dist,
create_data_parallel_comm,
create_within_replica_comm,
)
from summarize_from_feedback.utils.nested import map_nested
from summarize_from_feedback.utils.torch_utils import nans, to_numpy
@dataclass
@dataclass
def save_exported_model(layout, model, model_H: Hyperparams, save_dir, save_heads: Set[str]):
"""
Exporting a model allows it to be run with a different layout than it was trained with.
Currently, uploading/loading an exported model is slower than saving/restoring a checkpoint,
but if we can get exporting to be sufficiently fast, then we could replace legacy_checkpoints.py with
this "exporting" approach.
"""
if blobs.is_blob_url(save_dir):
local_dir = os.path.join("/tmp", str(uuid.uuid4()))
else:
local_dir = save_dir
os.makedirs(os.path.join(local_dir, "checkpoint"), exist_ok=True)
# Export the embeddings
if model.include_input_embeddings:
export_fine_piece(model.embedding.state_dict(), "input_embeddings")
if model.include_pos_embeddings:
export_fine_piece(model.position_embedding.state_dict(), "position_embedding")
# Export the resblocks
for resblock_idx, resblock in enumerate(model.torso.resblocks):
export_fine_piece(resblock.state_dict(), f"resblock_{resblock_idx:04d}")
# Export the final_layer_norm
if model.include_final_layer_norm:
export_fine_piece(model.ln_f.state_dict(), "final_layer_norm")
# Export the unembeddings
if model.include_output_unembeddings:
export_fine_piece({"unembedding_weights": model.unembedding_weights}, "output_unembeddings")
for head in save_heads:
export_fine_piece(model.scalar_heads[head].state_dict(), f"output_head_{head}")
if blobs.is_blob_url(save_dir):
blobs.parallel_copy_recursive(local_dir, save_dir)
shutil.rmtree(local_dir)
def parameter_name_to_sharding_dim(name: str) -> Optional[int]:
"""
:returns: None if all parameters are same on all shards, otherwise the dimension to
split upon.
"""
if name in ["embedding.weight", "position_embedding.weight", "unembedding_weights"]:
return -1
if name.startswith("torso.resblocks"):
match = re.search(r"torso\.resblocks\.\d+\.(.*)", name)
torso_part = match.group(1)
if torso_part.startswith("ln_1.") or torso_part.startswith("ln_2."):
return None
if _matches_any_prefix(
torso_part, ["attn.q_proj", "attn.k_proj", "attn.v_proj", "mlp.c_fc"]
):
return -1
if _matches_any_prefix(torso_part, ["attn.c_proj.weight", "mlp.c_proj.weight"]):
return -2
if _matches_any_prefix(torso_part, ["attn.c_proj.bias", "mlp.c_proj.bias"]):
return None
raise RuntimeError(f"Unexpected parameter name: {name}")
if name in ["ln_f.weight", "ln_f.bias"]:
return None
raise RuntimeError(f"Unexpected parameter name: {name}")
def load_exported_model(
layout: ModelLayout,
model,
model_H: Hyperparams,
load_path: str,
load_heads_map: Dict[str, str],
use_cache: bool = False,
):
"""
:param load_heads_map: maps name in model -> name to load from
"""
if use_cache and blobs.is_blob_url(load_path):
load_path = blobs.download_directory_cached(load_path)
with bf.BlobFile(os.path.join(load_path, "info.json")) as f:
info = json.load(f)
old_model_H = Hyperparams(**info["model_hparams"])
original_n_shards = old_model_H.n_shards
if "n_shards" in info:
assert info["n_shards"] == original_n_shards
assert layout.n_shards == model_H.n_shards
# print("orig n_shards", original_n_shards, "new n_shards", layout.n_shards)
if original_n_shards % layout.n_shards == 0:
n_chkpt_shards_per_rank = exact_div(original_n_shards, layout.n_shards)
shard_idx_start = n_chkpt_shards_per_rank * layout.shard_idx
load_shard_idxs = range(shard_idx_start, shard_idx_start + n_chkpt_shards_per_rank)
elif layout.n_shards % original_n_shards == 0:
n_ranks_per_chkpt_shard = exact_div(layout.n_shards, original_n_shards)
shard_idx_to_load = layout.shard_idx // n_ranks_per_chkpt_shard
shard_slice_idx = layout.shard_idx % n_ranks_per_chkpt_shard
else:
raise NotImplementedError(
f"Tried running a model that was originally created with "
f"{original_n_shards} shards with {layout.n_shards} shards. The new number "
f"of shards must evenly divide or be divisible by the original number of shards."
)
if model.include_input_embeddings:
model.embedding.load_state_dict(fetch("input_embeddings", "embedding"))
if model.include_pos_embeddings:
model.position_embedding.load_state_dict(
fetch("position_embedding", "position_embedding")
)
# fetch the resblocks
for resblock_idx, resblock in enumerate(model.torso.resblocks):
d = fetch(f"resblock_{resblock_idx:04d}", f"torso.resblocks.{resblock_idx}")
if not model_H.get("key_bias"):
d = {k: v for (k, v) in d.items() if "attn.k_proj.bias" not in k}
resblock.load_state_dict(d)
# fetch the final_layer_norm
if model.include_final_layer_norm:
model.ln_f.load_state_dict(fetch("final_layer_norm", "ln_f"))
# fetch the unembeddings
if model.include_output_unembeddings:
# Pull in the one piece
model.load_state_dict(fetch("output_unembeddings"), strict=False)
for model_head, save_head in load_heads_map.items():
model.scalar_heads[model_head].load_state_dict(
fetch(f"output_head_{save_head}", f"scalar_heads.{model_head}")
)
def _split_query_response_output_parts(x, query_length, response_padding_mask):
"""
Given an output x with shape [batch, num_responses, query_length + response_length, *rest],
returns a dictionary with it split into query/response parts with shapes
[batch, query_length + 1, *rest] and [batch, num_responses, response_length + 1, *rest]
"""
assert x.ndim >= 3
rest_shape = x.size()[3:]
d = dict()
# Add this back if it's ever actually useful
# d["query"] = torch.cat(
# [nans([x.size(0), 1, *rest_shape], dtype=x.dtype, device=x.device), x[:, 0, :query_length]],
# dim=1,
# )
if query_length > 0:
d["response"] = x[:, :, query_length - 1 :]
else:
d["response"] = torch.cat(
[
nans([x.size(0), x.size(1), 1, *rest_shape], dtype=x.dtype, device=x.device),
x[:, :, :query_length],
],
dim=2,
)
for _ in range(len(rest_shape)):
response_padding_mask = response_padding_mask.unsqueeze(-1)
# fill with NaNs in places where response had padding
d["response"].masked_fill_(
torch.cat(
[
torch.zeros(
[x.size(0), x.size(1), 1] + [1 for _ in range(len(rest_shape))],
dtype=torch.bool,
device=x.device,
),
response_padding_mask,
],
dim=2,
),
np.nan,
)
return d
PADDING_TOKEN = -1
@dataclass
class QueryResponseModel:
"""
Handles sampling, eval, and training with shared queries.
"""
def load(self, load_path, run_params=None, init_heads=(), map_heads={}, use_cache=False):
"""
Rebuilds everything, but keeps API semantics: model has same layout, and is on the same device, and all heads are the same (although some may be random init)
"""
if use_cache and blobs.is_blob_url(load_path):
load_path = blobs.download_directory_cached(load_path)
with bf.BlobFile(os.path.join(load_path, "info.json")) as f:
info = json.load(f)
self.model_hparams = Hyperparams(info["model_hparams"])
if run_params is not None:
extra_model_H = {k: v for k, v in run_params.to_json().items() if v is not None}
self.model_hparams.update(**extra_model_H)
self.encoder = summarize_from_feedback.encoder
model = build_with_random_weights(
layout=self.layout,
n_vocab=self.encoder.n_vocab,
device=self.device,
model_H=self.model_hparams,
)
self.model = self._update_model_with_head_info(model)
init_heads = set(init_heads or ())
# Load heads from where map_heads says, or the normal head name by default
load_heads_map = {
head: map_heads.get(head, head) for head in self.heads if head not in init_heads
}
load_exported_model(
self.layout,
self.model,
self.model_hparams,
load_path,
load_heads_map=load_heads_map,
use_cache=use_cache,
)
params_to_init = []
for head in init_heads:
params_to_init.append(self.model.scalar_heads[head].weight)
params_to_init.append(self.model.scalar_heads[head].bias)
self._sync_params(params_to_init, heads_to_init=init_heads)
self.barrier("load_finished")
def barrier(self, name=""):
"""
When called on all ranks, waits until all ranks are done
"""
self.in_replica_comm.barrier(name)
self.dp_comm.barrier(name)
def _eval(
self, queries, responses, eval_fn: Callable = None, eval_inputs=None, **model_call_kwargs
):
"""
Run a forward pass. Return all the head values, broadcasted within each replica. If an
eval_fn is passed, return its output across all replicas.
:return: A dict with structure:
eval_stats: structure from eval_fn
[head]: {
# disabled for now: query: [batch, query_len+1]
response: [batch, num_responses, sample_len+1]
}
"""
queries = queries.to(self.device)
responses = responses.to(self.device)
if eval_inputs is not None:
eval_inputs = map_nested(eval_inputs, lambda x: x.to(self.device))
mask, responses = _zero_padding_tokens(responses)
responses_per_query = responses.size(1)
# NOTE: could make this more efficient by sharing context work
tiled_queries = queries.unsqueeze(1).repeat(1, responses_per_query, 1)
run_tokens = torch.cat([tiled_queries, responses], dim=2).flatten(0, 1)
self.model.eval()
with torch.no_grad():
outputs = self.model(run_tokens, **model_call_kwargs)
outputs_mb = dict()
ret = dict()
for k in list(self.heads) + (["logits"] if self.logit_head else []):
reshaped = outputs[k].view(-1, responses_per_query, *outputs[k].size()[1:])
d = _split_query_response_output_parts(reshaped, queries.size(1), mask)
outputs_mb[k] = d
if k in self.heads:
ret[k] = d
if eval_fn is not None:
ret["eval_stats"] = eval_fn(outputs_mb, eval_inputs)
return ret
def _sample(
self,
context_tokens,
sample_len,
partial_responses=None,
responses_per_query=1,
sample_H=None,
**model_call_kwargs,
):
"""
:return: A dict with structure:
samples: [batch, num_responses, sample_len]
logprobs: [batch, num_responses, sample_len]
[head]: {
response: [batch, num_responses, sample_len+1]
}
"""
context_tokens = context_tokens.to(self.device)
self.model.eval()
n_batch, query_length = context_tokens.size()
assert self.logit_head, f"Cannot sample without logit_head"
# NOTE: could do this more efficiently by sharing context work
repeated_context_tokens = context_tokens.unsqueeze(1).repeat(1, responses_per_query, 1)
# Combine query and response so far into new query to be passed to _sample()
if partial_responses is not None:
partial_responses = partial_responses.to(self.device)
repeated_context_tokens = torch.cat((repeated_context_tokens, partial_responses), 2)
sample_fn = _get_sample_fn(sample_H)
flat_context_tokens = repeated_context_tokens.flatten(0, 1)
flat_n_batch, context_len = flat_context_tokens.shape
assert sample_len + context_len <= self.model_hparams["n_ctx"] + 1, (
f"Requested completion {sample_len} is too long for"
f"context {context_len} and model context_len {self.model_hparams.n_ctx}"
)
results = sample(
self.model,
flat_context_tokens,
sample_len=sample_len,
sample_fn=sample_fn,
model_output_keys=self.heads,
**model_call_kwargs,
)
samples = results["tokens"]
logprobs = results["logprobs"]
assert samples.size(-2) == n_batch * responses_per_query
assert logprobs.size(-2) == n_batch * responses_per_query
assert samples.size(-1) == sample_len, f"{samples.size()} vs {sample_len}"
assert logprobs.size(-1) == sample_len, f"{logprobs.size()} vs {sample_len}"
samples = samples.view(n_batch, responses_per_query, sample_len)
logprobs = logprobs.view(n_batch, responses_per_query, sample_len)
output = dict(contexts=context_tokens, samples=samples, logprobs=logprobs)
mask, _ = _zero_padding_tokens(output["samples"])
# NOTE: sample doesn't return eval'ed values on final token
mask = mask[:, :, :-1]
for k in self.heads:
reshaped = results[k].view(n_batch, responses_per_query, *results[k].shape[1:])
output[k] = _split_query_response_output_parts(reshaped, query_length, mask)
return output
| 37.711238 | 165 | 0.629097 | import json
import os
import re
import shutil
import uuid
from dataclasses import dataclass, field
from typing import Callable, Optional, Set, List, Dict
import blobfile as bf
import numpy as np
import torch
from mpi4py import MPI
import summarize_from_feedback
from summarize_from_feedback.model_layout import ModelLayout
from summarize_from_feedback.models import sample_fns
from summarize_from_feedback.models.loss_functions import softmax_xent_loss_fn
from summarize_from_feedback.models.transformer import Hyperparams
from summarize_from_feedback.models.transformer import build_with_random_weights
from summarize_from_feedback.utils import exact_div, hyperparams
from summarize_from_feedback.utils import blobs
from summarize_from_feedback.utils.dist_utils import (
setup_cuda_device_and_dist,
create_data_parallel_comm,
create_within_replica_comm,
)
from summarize_from_feedback.utils.nested import map_nested
from summarize_from_feedback.utils.torch_utils import nans, to_numpy
@dataclass
class RunParams(hyperparams.HParams):
fp16_embedding_weights: bool = False
fp16_conv_weights: bool = False
attn_dropout: float = 0.0
resid_dropout: float = 0.0
emb_dropout: float = 0.0
n_shards: int = 1
def all_gpu_layout(self):
return ModelLayout.standard(
n_shards=self.n_shards,
total_gpus=MPI.COMM_WORLD.Get_size(),
my_rank=MPI.COMM_WORLD.Get_rank(),
)
def sample(
self,
contexts,
sample_len,
sample_fn,
act_dtype=torch.float16,
model_output_keys=(),
**model_call_kwargs,
):
assert not self.training
n_batch, n_ctx = contexts.shape
with torch.no_grad():
tokens = []
logprobs = []
extra_outputs = []
output = self(contexts, act_dtype=act_dtype, **model_call_kwargs)
past_hidden_state = output["hidden_state"].detach()
prev_logits = output["logits"][:, -1:, :]
for sample_t in range(n_ctx, n_ctx + sample_len):
new = sample_fn(prev_logits)
new_tokens, new_logits = new.tokens, new.logits
new_logprobs = -softmax_xent_loss_fn(
dict(logits=new_logits.float()), dict(targets=new_tokens), reduction="none"
)
assert new_tokens.shape == (n_batch, 1)
assert new_logprobs.shape == (n_batch, 1)
tokens.append(new_tokens)
logprobs.append(new_logprobs)
extra_outputs.append({k: output[k] for k in model_output_keys})
# NOTE: last iteration is thrown away
output = self(
new_tokens, hidden_state=past_hidden_state, act_dtype=act_dtype, **model_call_kwargs
)
prev_logits = output["logits"]
past_hidden_state = past_hidden_state.concat_with(output["hidden_state"].detach())
tokens = torch.cat(tokens, dim=1)
logprobs = torch.cat(logprobs, dim=1)
extra_outputs = {
k: torch.cat([extra[k] for extra in extra_outputs], dim=1) for k in model_output_keys
}
return dict(tokens=tokens, logprobs=logprobs, **extra_outputs)
class ModelWithHeads(torch.nn.Module):
def __init__(self, model, scalar_heads, d_model, init_scales=1.0):
super().__init__()
self.model = model
self.scalar_head_names = scalar_heads
if not isinstance(init_scales, dict):
init_scales = {head_name: init_scales for head_name in scalar_heads}
self.scalar_heads = torch.nn.ModuleDict()
for name in self.scalar_head_names:
head = torch.nn.Linear(d_model, 1)
init_std = init_scales.get(name, 1.0) / np.sqrt(d_model + 1)
torch.nn.init.normal_(head.weight, std=init_std)
torch.nn.init.zeros_(head.bias)
self.scalar_heads[name] = head
for attr in [
"include_input_embeddings",
"embedding",
"include_pos_embeddings",
"position_embedding",
"include_final_layer_norm",
"include_output_unembeddings",
"ln_f",
"unembedding_weights",
"torso",
"mp_comm",
"n_ctx",
]:
if hasattr(self.model, attr):
setattr(self, attr, getattr(self.model, attr))
def forward(self, *args, **kwargs):
outputs = self.model(*args, **kwargs)
x = outputs["acts"]
for name, head in self.scalar_heads.items():
outputs[name] = torch.squeeze(head(x.type(head.weight.dtype)), dim=-1)
return outputs
def act_shape(self, in_shape):
return self.model.act_shape(in_shape)
@dataclass
class ModelSpec(hyperparams.HParams):
device: str = "cuda"
load_path: str = None
use_cache: bool = True
short_name: Optional[str] = None
init_heads: Optional[List[str]] = None
map_heads: Dict[str, str] = field(default_factory=dict)
run_params: RunParams = field(default_factory=RunParams)
def name(self):
if self.short_name is not None:
return self.short_name
elif self.load_path is not None:
return self.load_path
else:
raise NotImplementedError
def save_exported_model(layout, model, model_H: Hyperparams, save_dir, save_heads: Set[str]):
"""
Exporting a model allows it to be run with a different layout than it was trained with.
Currently, uploading/loading an exported model is slower than saving/restoring a checkpoint,
but if we can get exporting to be sufficiently fast, then we could replace legacy_checkpoints.py with
this "exporting" approach.
"""
if blobs.is_blob_url(save_dir):
local_dir = os.path.join("/tmp", str(uuid.uuid4()))
else:
local_dir = save_dir
os.makedirs(os.path.join(local_dir, "checkpoint"), exist_ok=True)
def export_fine_piece(fine_model_piece_dict: dict, chkpt_prefix: str):
fine_piece_path = os.path.join(
local_dir, "checkpoint", f"{chkpt_prefix}_shard_{layout.shard_idx:03d}.pkl"
)
# print(f"Uploading fine_piece: {fine_piece_path}")
torch.save(fine_model_piece_dict, fine_piece_path)
torch.cuda.synchronize() # Verify that the piece has finished being written
# Export the embeddings
if model.include_input_embeddings:
export_fine_piece(model.embedding.state_dict(), "input_embeddings")
if model.include_pos_embeddings:
export_fine_piece(model.position_embedding.state_dict(), "position_embedding")
# Export the resblocks
for resblock_idx, resblock in enumerate(model.torso.resblocks):
export_fine_piece(resblock.state_dict(), f"resblock_{resblock_idx:04d}")
# Export the final_layer_norm
if model.include_final_layer_norm:
export_fine_piece(model.ln_f.state_dict(), "final_layer_norm")
# Export the unembeddings
if model.include_output_unembeddings:
export_fine_piece({"unembedding_weights": model.unembedding_weights}, "output_unembeddings")
for head in save_heads:
export_fine_piece(model.scalar_heads[head].state_dict(), f"output_head_{head}")
if blobs.is_blob_url(save_dir):
blobs.parallel_copy_recursive(local_dir, save_dir)
shutil.rmtree(local_dir)
def _matches_any_prefix(x, prefixes):
return any([x.startswith(prefix) for prefix in prefixes])
def dim_to_shard(name: str) -> Optional[int]:
if name.startswith("scalar_heads."):
# heads should be the same on all shards
return None
return parameter_name_to_sharding_dim(name)
def parameter_name_to_sharding_dim(name: str) -> Optional[int]:
"""
:returns: None if all parameters are same on all shards, otherwise the dimension to
split upon.
"""
if name in ["embedding.weight", "position_embedding.weight", "unembedding_weights"]:
return -1
if name.startswith("torso.resblocks"):
match = re.search(r"torso\.resblocks\.\d+\.(.*)", name)
torso_part = match.group(1)
if torso_part.startswith("ln_1.") or torso_part.startswith("ln_2."):
return None
if _matches_any_prefix(
torso_part, ["attn.q_proj", "attn.k_proj", "attn.v_proj", "mlp.c_fc"]
):
return -1
if _matches_any_prefix(torso_part, ["attn.c_proj.weight", "mlp.c_proj.weight"]):
return -2
if _matches_any_prefix(torso_part, ["attn.c_proj.bias", "mlp.c_proj.bias"]):
return None
raise RuntimeError(f"Unexpected parameter name: {name}")
if name in ["ln_f.weight", "ln_f.bias"]:
return None
raise RuntimeError(f"Unexpected parameter name: {name}")
def get_shard_fix_factor(name: str, model_H: Hyperparams, old_model_H: Hyperparams) -> float:
# Hack to fix some bugs with our sharding code
if name.startswith("torso.resblocks"):
match = re.search(r"torso\.resblocks\.\d+\.(.*)", name)
torso_part = match.group(1)
# bias is added before all-reduce, which means with more shards, the
# weights are closer to 0 than expected
if _matches_any_prefix(torso_part, ["attn.c_proj.bias", "mlp.c_proj.bias"]):
return float(old_model_H.n_shards) / model_H.n_shards
if (
_matches_any_prefix(
torso_part,
[
"attn.q_proj.weight",
"attn.k_proj.weight",
"attn.q_proj.bias",
"attn.k_proj.bias",
],
)
and old_model_H.use_blocksparse_attn
):
return np.sqrt(np.sqrt(float(old_model_H.n_shards) / old_model_H.heads))
return 1.0
def load_exported_model(
layout: ModelLayout,
model,
model_H: Hyperparams,
load_path: str,
load_heads_map: Dict[str, str],
use_cache: bool = False,
):
"""
:param load_heads_map: maps name in model -> name to load from
"""
if use_cache and blobs.is_blob_url(load_path):
load_path = blobs.download_directory_cached(load_path)
with bf.BlobFile(os.path.join(load_path, "info.json")) as f:
info = json.load(f)
old_model_H = Hyperparams(**info["model_hparams"])
original_n_shards = old_model_H.n_shards
if "n_shards" in info:
assert info["n_shards"] == original_n_shards
assert layout.n_shards == model_H.n_shards
# print("orig n_shards", original_n_shards, "new n_shards", layout.n_shards)
def fetch_single_piece(fine_piece_name):
with bf.BlobFile(os.path.join(load_path, "checkpoint", fine_piece_name), "rb") as f:
return torch.load(f, map_location=torch.device("cpu"))
if original_n_shards % layout.n_shards == 0:
n_chkpt_shards_per_rank = exact_div(original_n_shards, layout.n_shards)
shard_idx_start = n_chkpt_shards_per_rank * layout.shard_idx
load_shard_idxs = range(shard_idx_start, shard_idx_start + n_chkpt_shards_per_rank)
def fetch(chkpt_prefix: str, module_name: str = ""):
sharded_pieces = [
fetch_single_piece(f"{chkpt_prefix}_shard_{shard_idx:03d}.pkl")
for shard_idx in load_shard_idxs
]
model_piece = {}
for k in sharded_pieces[0].keys():
parameter_name = ".".join([module_name, k]) if module_name else k
sharding_dim = dim_to_shard(parameter_name)
if sharding_dim is None:
val = sharded_pieces[0][k]
else:
val = torch.cat([piece[k] for piece in sharded_pieces], dim=sharding_dim)
fix_factor = get_shard_fix_factor(parameter_name, model_H, old_model_H)
model_piece[k] = (val.float() * fix_factor).to(val.dtype)
return model_piece
elif layout.n_shards % original_n_shards == 0:
n_ranks_per_chkpt_shard = exact_div(layout.n_shards, original_n_shards)
shard_idx_to_load = layout.shard_idx // n_ranks_per_chkpt_shard
shard_slice_idx = layout.shard_idx % n_ranks_per_chkpt_shard
def fetch(chkpt_prefix: str, module_name: str = ""):
unsharded_piece = fetch_single_piece(
f"{chkpt_prefix}_shard_{shard_idx_to_load:03d}.pkl"
)
model_piece = {}
for k in unsharded_piece.keys():
parameter_name = ".".join([module_name, k]) if module_name else k
sharding_dim = dim_to_shard(parameter_name)
if sharding_dim is None:
val = unsharded_piece[k]
else:
split_size = exact_div(
unsharded_piece[k].size()[sharding_dim], n_ranks_per_chkpt_shard
)
val = torch.split(
unsharded_piece[k], [split_size] * n_ranks_per_chkpt_shard, dim=sharding_dim
)[shard_slice_idx]
fix_factor = get_shard_fix_factor(parameter_name, model_H, old_model_H)
model_piece[k] = (val.float() * fix_factor).to(val.dtype)
return model_piece
else:
raise NotImplementedError(
f"Tried running a model that was originally created with "
f"{original_n_shards} shards with {layout.n_shards} shards. The new number "
f"of shards must evenly divide or be divisible by the original number of shards."
)
if model.include_input_embeddings:
model.embedding.load_state_dict(fetch("input_embeddings", "embedding"))
if model.include_pos_embeddings:
model.position_embedding.load_state_dict(
fetch("position_embedding", "position_embedding")
)
# fetch the resblocks
for resblock_idx, resblock in enumerate(model.torso.resblocks):
d = fetch(f"resblock_{resblock_idx:04d}", f"torso.resblocks.{resblock_idx}")
if not model_H.get("key_bias"):
d = {k: v for (k, v) in d.items() if "attn.k_proj.bias" not in k}
resblock.load_state_dict(d)
# fetch the final_layer_norm
if model.include_final_layer_norm:
model.ln_f.load_state_dict(fetch("final_layer_norm", "ln_f"))
# fetch the unembeddings
if model.include_output_unembeddings:
# Pull in the one piece
model.load_state_dict(fetch("output_unembeddings"), strict=False)
for model_head, save_head in load_heads_map.items():
model.scalar_heads[model_head].load_state_dict(
fetch(f"output_head_{save_head}", f"scalar_heads.{model_head}")
)
def _split_query_response_output_parts(x, query_length, response_padding_mask):
"""
Given an output x with shape [batch, num_responses, query_length + response_length, *rest],
returns a dictionary with it split into query/response parts with shapes
[batch, query_length + 1, *rest] and [batch, num_responses, response_length + 1, *rest]
"""
assert x.ndim >= 3
rest_shape = x.size()[3:]
d = dict()
# Add this back if it's ever actually useful
# d["query"] = torch.cat(
# [nans([x.size(0), 1, *rest_shape], dtype=x.dtype, device=x.device), x[:, 0, :query_length]],
# dim=1,
# )
if query_length > 0:
d["response"] = x[:, :, query_length - 1 :]
else:
d["response"] = torch.cat(
[
nans([x.size(0), x.size(1), 1, *rest_shape], dtype=x.dtype, device=x.device),
x[:, :, :query_length],
],
dim=2,
)
for _ in range(len(rest_shape)):
response_padding_mask = response_padding_mask.unsqueeze(-1)
# fill with NaNs in places where response had padding
d["response"].masked_fill_(
torch.cat(
[
torch.zeros(
[x.size(0), x.size(1), 1] + [1 for _ in range(len(rest_shape))],
dtype=torch.bool,
device=x.device,
),
response_padding_mask,
],
dim=2,
),
np.nan,
)
return d
PADDING_TOKEN = -1
def _zero_padding_tokens(response_tokens):
mask = response_tokens == PADDING_TOKEN
assert (
not (mask[:, :, 1:] < mask[:, :, :-1]).any().item()
), f"Padding tokens not a suffix {to_numpy(response_tokens)}"
return mask, torch.masked_fill(response_tokens, mask, 0)
def nested_reduce(ds, f):
new_d = {}
for k, v in ds[0].items():
if isinstance(v, dict):
new_d[k] = nested_reduce([d[k] for d in ds], f)
else:
new_d[k] = f([d[k] for d in ds])
return new_d
@dataclass
class SampleHParams(hyperparams.HParams):
temperature: float = 1.0
top_p: float = 1.0
def validate(self, *, prefix=""):
assert (
self.temperature == 1.0 or self.top_p == 1.0
), f"{prefix or 'SampleHParams'}: Cannot set both temperature ({self.temperature}) and top_p ({self.top_p})"
@classmethod
def argmax(cls):
return cls.from_json(dict(top_p=0))
def _get_sample_fn(H: Optional[SampleHParams] = None):
if H is None:
H = SampleHParams()
if H.top_p != 1.0:
assert H.temperature == 1.0
return sample_fns.nucleus_sampler(top_p=H.top_p)
else:
return sample_fns.standard(temperature=H.temperature)
class QueryResponseModel:
"""
Handles sampling, eval, and training with shared queries.
"""
def __init__(
self, spec: ModelSpec, *, layout: ModelLayout, logit_head=True, heads=(), init_scales=1.0
):
device = setup_cuda_device_and_dist(
backend="nccl" if spec.device == "cuda" else "gloo",
master_addr=None,
device=spec.device,
)
self.device = device
self.layout = layout
assert self.layout.n_shards == spec.run_params.n_shards
self.dp_comm = create_data_parallel_comm(layout)
self.in_replica_comm = create_within_replica_comm(layout)
self.logit_head = logit_head
self.heads = heads
self.init_scales = init_scales
self.load(
spec.load_path,
run_params=spec.run_params,
init_heads=spec.init_heads,
map_heads=spec.map_heads,
use_cache=spec.use_cache,
)
if self.device.type == "cuda":
print(
f"Loaded model to {self.device}. CUDA memory allocated: "
f"{torch.cuda.memory_allocated(device=self.device) / 1e9:.2f} GB"
)
def _sync_params(self, params_to_init, heads_to_init):
if self.layout.n_replicas > 1:
for param in params_to_init:
self.dp_comm.broadcast(
param.data,
src=self.layout.dp_sibling_ranks[0],
name="broadcast_params_from_zeroeth_replica",
)
if self.layout.n_shards > 0:
params_to_sync_shards = []
for head in heads_to_init:
params_to_sync_shards.append(self.model.scalar_heads[head].weight)
params_to_sync_shards.append(self.model.scalar_heads[head].bias)
for param in params_to_sync_shards:
self.model.mp_comm.broadcast(
param.data,
src=self.layout.mp_sibling_ranks[0],
name="broadcast_params_from_zeroeth_shard",
)
def _update_model_with_head_info(self, model):
if not self.logit_head:
model.include_output_unembeddings = False
model.unembedding_weights = None
model = ModelWithHeads(
model,
scalar_heads=list(self.heads),
d_model=model.d_model,
init_scales=self.init_scales,
)
model = model.to(self.device)
return model
def load(self, load_path, run_params=None, init_heads=(), map_heads={}, use_cache=False):
"""
Rebuilds everything, but keeps API semantics: model has same layout, and is on the same device, and all heads are the same (although some may be random init)
"""
if use_cache and blobs.is_blob_url(load_path):
load_path = blobs.download_directory_cached(load_path)
with bf.BlobFile(os.path.join(load_path, "info.json")) as f:
info = json.load(f)
self.model_hparams = Hyperparams(info["model_hparams"])
if run_params is not None:
extra_model_H = {k: v for k, v in run_params.to_json().items() if v is not None}
self.model_hparams.update(**extra_model_H)
self.encoder = summarize_from_feedback.encoder
model = build_with_random_weights(
layout=self.layout,
n_vocab=self.encoder.n_vocab,
device=self.device,
model_H=self.model_hparams,
)
self.model = self._update_model_with_head_info(model)
init_heads = set(init_heads or ())
# Load heads from where map_heads says, or the normal head name by default
load_heads_map = {
head: map_heads.get(head, head) for head in self.heads if head not in init_heads
}
load_exported_model(
self.layout,
self.model,
self.model_hparams,
load_path,
load_heads_map=load_heads_map,
use_cache=use_cache,
)
params_to_init = []
for head in init_heads:
params_to_init.append(self.model.scalar_heads[head].weight)
params_to_init.append(self.model.scalar_heads[head].bias)
self._sync_params(params_to_init, heads_to_init=init_heads)
self.barrier("load_finished")
def barrier(self, name=""):
"""
When called on all ranks, waits until all ranks are done
"""
self.in_replica_comm.barrier(name)
self.dp_comm.barrier(name)
def _eval(
self, queries, responses, eval_fn: Callable = None, eval_inputs=None, **model_call_kwargs
):
"""
Run a forward pass. Return all the head values, broadcasted within each replica. If an
eval_fn is passed, return its output across all replicas.
:return: A dict with structure:
eval_stats: structure from eval_fn
[head]: {
# disabled for now: query: [batch, query_len+1]
response: [batch, num_responses, sample_len+1]
}
"""
queries = queries.to(self.device)
responses = responses.to(self.device)
if eval_inputs is not None:
eval_inputs = map_nested(eval_inputs, lambda x: x.to(self.device))
mask, responses = _zero_padding_tokens(responses)
responses_per_query = responses.size(1)
# NOTE: could make this more efficient by sharing context work
tiled_queries = queries.unsqueeze(1).repeat(1, responses_per_query, 1)
run_tokens = torch.cat([tiled_queries, responses], dim=2).flatten(0, 1)
self.model.eval()
with torch.no_grad():
outputs = self.model(run_tokens, **model_call_kwargs)
outputs_mb = dict()
ret = dict()
for k in list(self.heads) + (["logits"] if self.logit_head else []):
reshaped = outputs[k].view(-1, responses_per_query, *outputs[k].size()[1:])
d = _split_query_response_output_parts(reshaped, queries.size(1), mask)
outputs_mb[k] = d
if k in self.heads:
ret[k] = d
if eval_fn is not None:
ret["eval_stats"] = eval_fn(outputs_mb, eval_inputs)
return ret
def _sample(
self,
context_tokens,
sample_len,
partial_responses=None,
responses_per_query=1,
sample_H=None,
**model_call_kwargs,
):
"""
:return: A dict with structure:
samples: [batch, num_responses, sample_len]
logprobs: [batch, num_responses, sample_len]
[head]: {
response: [batch, num_responses, sample_len+1]
}
"""
context_tokens = context_tokens.to(self.device)
self.model.eval()
n_batch, query_length = context_tokens.size()
assert self.logit_head, f"Cannot sample without logit_head"
# NOTE: could do this more efficiently by sharing context work
repeated_context_tokens = context_tokens.unsqueeze(1).repeat(1, responses_per_query, 1)
# Combine query and response so far into new query to be passed to _sample()
if partial_responses is not None:
partial_responses = partial_responses.to(self.device)
repeated_context_tokens = torch.cat((repeated_context_tokens, partial_responses), 2)
sample_fn = _get_sample_fn(sample_H)
flat_context_tokens = repeated_context_tokens.flatten(0, 1)
flat_n_batch, context_len = flat_context_tokens.shape
assert sample_len + context_len <= self.model_hparams["n_ctx"] + 1, (
f"Requested completion {sample_len} is too long for"
f"context {context_len} and model context_len {self.model_hparams.n_ctx}"
)
results = sample(
self.model,
flat_context_tokens,
sample_len=sample_len,
sample_fn=sample_fn,
model_output_keys=self.heads,
**model_call_kwargs,
)
samples = results["tokens"]
logprobs = results["logprobs"]
assert samples.size(-2) == n_batch * responses_per_query
assert logprobs.size(-2) == n_batch * responses_per_query
assert samples.size(-1) == sample_len, f"{samples.size()} vs {sample_len}"
assert logprobs.size(-1) == sample_len, f"{logprobs.size()} vs {sample_len}"
samples = samples.view(n_batch, responses_per_query, sample_len)
logprobs = logprobs.view(n_batch, responses_per_query, sample_len)
output = dict(contexts=context_tokens, samples=samples, logprobs=logprobs)
mask, _ = _zero_padding_tokens(output["samples"])
# NOTE: sample doesn't return eval'ed values on final token
mask = mask[:, :, :-1]
for k in self.heads:
reshaped = results[k].view(n_batch, responses_per_query, *results[k].shape[1:])
output[k] = _split_query_response_output_parts(reshaped, query_length, mask)
return output
| 10,461 | 715 | 527 |
fe6f53a11a4ce63e0af56938460e2f1d561c2562 | 384 | py | Python | ReverseInteger.py | FreeBirdsCrew/Main_Branch | 1e544b04b414e183db4e52d194732f6b0c9ec90e | [
"MIT"
] | null | null | null | ReverseInteger.py | FreeBirdsCrew/Main_Branch | 1e544b04b414e183db4e52d194732f6b0c9ec90e | [
"MIT"
] | null | null | null | ReverseInteger.py | FreeBirdsCrew/Main_Branch | 1e544b04b414e183db4e52d194732f6b0c9ec90e | [
"MIT"
] | null | null | null |
print("123","10")
| 25.6 | 96 | 0.432292 | def addStrings(self, num1: str, num2: str) -> str:
m = max(len(num1),len(num2))
ret = ''
c = 0
for i in range (-1,-(m+1),-1) :
x = int(num1[i] if i >= -len(num1) else 0) + int(num2[i] if i >= -len(num2) else 0) + c
d = x%10
c = x//10
ret += (str(d))
if c > 0 : ret += (str(c))
return ret[::-1]
print("123","10")
| 340 | 0 | 23 |
9274a980711b4760f2ec48ff3702850203e8a7b7 | 11,035 | py | Python | .dev_scripts/benchmark_regression/2-benchmark_test.py | YuxinZou/mmclassification | 2037260ea6c98a3b115e97727e1151a1c2c32f7a | [
"Apache-2.0"
] | 1 | 2022-03-15T07:36:04.000Z | 2022-03-15T07:36:04.000Z | .dev_scripts/benchmark_regression/2-benchmark_test.py | YuxinZou/mmclassification | 2037260ea6c98a3b115e97727e1151a1c2c32f7a | [
"Apache-2.0"
] | null | null | null | .dev_scripts/benchmark_regression/2-benchmark_test.py | YuxinZou/mmclassification | 2037260ea6c98a3b115e97727e1151a1c2c32f7a | [
"Apache-2.0"
] | 1 | 2022-03-25T08:40:07.000Z | 2022-03-25T08:40:07.000Z | import argparse
import os
import os.path as osp
import pickle
import re
from collections import OrderedDict
from datetime import datetime
from pathlib import Path
from modelindex.load_model_index import load
from rich.console import Console
from rich.syntax import Syntax
from rich.table import Table
console = Console()
MMCLS_ROOT = Path(__file__).absolute().parents[2]
METRICS_MAP = {
'Top 1 Accuracy': 'accuracy_top-1',
'Top 5 Accuracy': 'accuracy_top-5'
}
if __name__ == '__main__':
main()
| 32.647929 | 79 | 0.587676 | import argparse
import os
import os.path as osp
import pickle
import re
from collections import OrderedDict
from datetime import datetime
from pathlib import Path
from modelindex.load_model_index import load
from rich.console import Console
from rich.syntax import Syntax
from rich.table import Table
console = Console()
MMCLS_ROOT = Path(__file__).absolute().parents[2]
METRICS_MAP = {
'Top 1 Accuracy': 'accuracy_top-1',
'Top 5 Accuracy': 'accuracy_top-5'
}
def parse_args():
parser = argparse.ArgumentParser(
description="Test all models' accuracy in model-index.yml")
parser.add_argument(
'partition', type=str, help='Cluster partition to use.')
parser.add_argument('checkpoint_root', help='Checkpoint file root path.')
parser.add_argument(
'--job-name',
type=str,
default='cls-test-benchmark',
help='Slurm job name prefix')
parser.add_argument('--port', type=int, default=29666, help='dist port')
parser.add_argument(
'--models', nargs='+', type=str, help='Specify model names to run.')
parser.add_argument(
'--work-dir',
default='work_dirs/benchmark_test',
help='the dir to save metric')
parser.add_argument(
'--run', action='store_true', help='run script directly')
parser.add_argument(
'--local',
action='store_true',
help='run at local instead of cluster.')
parser.add_argument(
'--mail', type=str, help='Mail address to watch test status.')
parser.add_argument(
'--mail-type',
nargs='+',
default=['BEGIN'],
choices=['NONE', 'BEGIN', 'END', 'FAIL', 'REQUEUE', 'ALL'],
help='Mail address to watch test status.')
parser.add_argument(
'--quotatype',
default=None,
choices=['reserved', 'auto', 'spot'],
help='Quota type, only available for phoenix-slurm>=0.2')
parser.add_argument(
'--summary',
action='store_true',
help='Summarize benchmark test results.')
parser.add_argument('--save', action='store_true', help='Save the summary')
args = parser.parse_args()
return args
def create_test_job_batch(commands, model_info, args, port, script_name):
fname = model_info.name
config = Path(model_info.config)
assert config.exists(), f'{fname}: {config} not found.'
http_prefix = 'https://download.openmmlab.com/mmclassification/'
if 's3://' in args.checkpoint_root:
from mmcv.fileio import FileClient
from petrel_client.common.exception import AccessDeniedError
file_client = FileClient.infer_client(uri=args.checkpoint_root)
checkpoint = file_client.join_path(
args.checkpoint_root, model_info.weights[len(http_prefix):])
try:
exists = file_client.exists(checkpoint)
except AccessDeniedError:
exists = False
else:
checkpoint_root = Path(args.checkpoint_root)
checkpoint = checkpoint_root / model_info.weights[len(http_prefix):]
exists = checkpoint.exists()
if not exists:
print(f'WARNING: {fname}: {checkpoint} not found.')
return None
job_name = f'{args.job_name}_{fname}'
work_dir = Path(args.work_dir) / fname
work_dir.mkdir(parents=True, exist_ok=True)
if args.mail is not None and 'NONE' not in args.mail_type:
mail_cfg = (f'#SBATCH --mail {args.mail}\n'
f'#SBATCH --mail-type {args.mail_type}\n')
else:
mail_cfg = ''
if args.quotatype is not None:
quota_cfg = f'#SBATCH --quotatype {args.quotatype}\n'
else:
quota_cfg = ''
launcher = 'none' if args.local else 'slurm'
runner = 'python' if args.local else 'srun python'
job_script = (f'#!/bin/bash\n'
f'#SBATCH --output {work_dir}/job.%j.out\n'
f'#SBATCH --partition={args.partition}\n'
f'#SBATCH --job-name {job_name}\n'
f'#SBATCH --gres=gpu:8\n'
f'{mail_cfg}{quota_cfg}'
f'#SBATCH --ntasks-per-node=8\n'
f'#SBATCH --ntasks=8\n'
f'#SBATCH --cpus-per-task=5\n\n'
f'{runner} -u {script_name} {config} {checkpoint} '
f'--out={work_dir / "result.pkl"} --metrics accuracy '
f'--out-items=none '
f'--cfg-option dist_params.port={port} '
f'--launcher={launcher}\n')
with open(work_dir / 'job.sh', 'w') as f:
f.write(job_script)
commands.append(f'echo "{config}"')
if args.local:
commands.append(f'bash {work_dir}/job.sh')
else:
commands.append(f'sbatch {work_dir}/job.sh')
return work_dir / 'job.sh'
def test(args):
# parse model-index.yml
model_index_file = MMCLS_ROOT / 'model-index.yml'
model_index = load(str(model_index_file))
model_index.build_models_with_collections()
models = OrderedDict({model.name: model for model in model_index.models})
script_name = osp.join('tools', 'test.py')
port = args.port
commands = []
if args.models:
patterns = [re.compile(pattern) for pattern in args.models]
filter_models = {}
for k, v in models.items():
if any([re.match(pattern, k) for pattern in patterns]):
filter_models[k] = v
if len(filter_models) == 0:
print('No model found, please specify models in:')
print('\n'.join(models.keys()))
return
models = filter_models
preview_script = ''
for model_info in models.values():
if model_info.results is None:
continue
script_path = create_test_job_batch(commands, model_info, args, port,
script_name)
preview_script = script_path or preview_script
port += 1
command_str = '\n'.join(commands)
preview = Table()
preview.add_column(str(preview_script))
preview.add_column('Shell command preview')
preview.add_row(
Syntax.from_path(
preview_script,
background_color='default',
line_numbers=True,
word_wrap=True),
Syntax(
command_str,
'bash',
background_color='default',
line_numbers=True,
word_wrap=True))
console.print(preview)
if args.run:
os.system(command_str)
else:
console.print('Please set "--run" to start the job')
def save_summary(summary_data, models_map, work_dir):
summary_path = work_dir / 'test_benchmark_summary.md'
file = open(summary_path, 'w')
headers = [
'Model', 'Top-1 Expected(%)', 'Top-1 (%)', 'Top-5 Expected (%)',
'Top-5 (%)', 'Config'
]
file.write('# Test Benchmark Regression Summary\n')
file.write('| ' + ' | '.join(headers) + ' |\n')
file.write('|:' + ':|:'.join(['---'] * len(headers)) + ':|\n')
for model_name, summary in summary_data.items():
if len(summary) == 0:
# Skip models without results
continue
row = [model_name]
if 'Top 1 Accuracy' in summary:
metric = summary['Top 1 Accuracy']
row.append(f"{metric['expect']:.2f}")
row.append(f"{metric['result']:.2f}")
else:
row.extend([''] * 2)
if 'Top 5 Accuracy' in summary:
metric = summary['Top 5 Accuracy']
row.append(f"{metric['expect']:.2f}")
row.append(f"{metric['result']:.2f}")
else:
row.extend([''] * 2)
model_info = models_map[model_name]
row.append(model_info.config)
file.write('| ' + ' | '.join(row) + ' |\n')
file.close()
print('Summary file saved at ' + str(summary_path))
def show_summary(summary_data):
table = Table(title='Test Benchmark Regression Summary')
table.add_column('Model')
for metric in METRICS_MAP:
table.add_column(f'{metric} (expect)')
table.add_column(f'{metric}')
table.add_column('Date')
def set_color(value, expect):
if value > expect + 0.01:
return 'green'
elif value >= expect - 0.01:
return 'white'
else:
return 'red'
for model_name, summary in summary_data.items():
row = [model_name]
for metric_key in METRICS_MAP:
if metric_key in summary:
metric = summary[metric_key]
expect = metric['expect']
result = metric['result']
color = set_color(result, expect)
row.append(f'{expect:.2f}')
row.append(f'[{color}]{result:.2f}[/{color}]')
else:
row.extend([''] * 2)
if 'date' in summary:
row.append(summary['date'])
else:
row.append('')
table.add_row(*row)
console.print(table)
def summary(args):
model_index_file = MMCLS_ROOT / 'model-index.yml'
model_index = load(str(model_index_file))
model_index.build_models_with_collections()
models = OrderedDict({model.name: model for model in model_index.models})
work_dir = Path(args.work_dir)
if args.models:
patterns = [re.compile(pattern) for pattern in args.models]
filter_models = {}
for k, v in models.items():
if any([re.match(pattern, k) for pattern in patterns]):
filter_models[k] = v
if len(filter_models) == 0:
print('No model found, please specify models in:')
print('\n'.join(models.keys()))
return
models = filter_models
summary_data = {}
for model_name, model_info in models.items():
if model_info.results is None:
continue
# Skip if not found result file.
result_file = work_dir / model_name / 'result.pkl'
if not result_file.exists():
summary_data[model_name] = {}
continue
with open(result_file, 'rb') as file:
results = pickle.load(file)
date = datetime.fromtimestamp(result_file.lstat().st_mtime)
expect_metrics = model_info.results[0].metrics
# extract metrics
summary = {'date': date.strftime('%Y-%m-%d')}
for key_yml, key_res in METRICS_MAP.items():
if key_yml in expect_metrics:
assert key_res in results, \
f'{model_name}: No metric "{key_res}"'
expect_result = float(expect_metrics[key_yml])
result = float(results[key_res])
summary[key_yml] = dict(expect=expect_result, result=result)
summary_data[model_name] = summary
show_summary(summary_data)
if args.save:
save_summary(summary_data, models, work_dir)
def main():
args = parse_args()
if args.summary:
summary(args)
else:
test(args)
if __name__ == '__main__':
main()
| 10,357 | 0 | 161 |
1805f9763921ac17e4ac881bbcb1fd731b7b9306 | 27,477 | py | Python | fserve.py | Sultaneous/fbomb | f933d46ecb60b365e4bb450cb0a5a3e9fcf8c33d | [
"MIT"
] | null | null | null | fserve.py | Sultaneous/fbomb | f933d46ecb60b365e4bb450cb0a5a3e9fcf8c33d | [
"MIT"
] | null | null | null | fserve.py | Sultaneous/fbomb | f933d46ecb60b365e4bb450cb0a5a3e9fcf8c33d | [
"MIT"
] | null | null | null | #!/usr/bin/python
#"fserve" by Karim Sultan, September 2020
# fsend is a server (receiver) for the FBomb Protocol
# It communicates with a client (fsend) to transfer a file.
# This protocol makes it easy to drop f-bombs across machines.
#
# This design handles a single connection at a time but accepts
# backlogs. It is meant for individual use, not to serve
# concurrent users.
#
# FBOMB Particulars:
# -> Connect to server
# Client Server
# HI <user:pwd>
# OK <SID> | NOK <Message>
# FSEND <META>
# OK <Proceed> | NOK <Message>
# <DATA>
# OK <Received> | NOK <Message>
# BYE <CID> [CLOSE]
# [CLOSE]
#
# Messages are request / response based.
# Format is:
# [OPCODE] [TOKEN] " | " (optional message separator) <message> (optional message)
# Except for data which is sent in serial chunks.
# See FBomb documentation for details.
# NOTE: Avoid using "print" when possible. Instead, use one of the
# following. They apply formatting, trap the verbose flag, and log data:
#
# pip(msg) for an important success message (green on green)
# pip(msg, alert=True) for important warning messages (red on red)
# note(msg) for loggable verbose mode only messages
# notex(msg) for loggable all times message
import re
import types
import errno
import datetime
import ntpath
import socket
import select
import signal
import os
import hashlib
import getopt
from gamzia.timer import Timer
from gamzia.colours import Colours as C
from gamzia.filedescriptor import *
from gamzia.accountmanager import AccountManager
import sys
argv=sys.argv
argc=len(argv)
# App Info Constants
APP_NAME = "fserve"
APP_VERSION = 1.0
APP_AUTHOR = "Karim Sultan"
APP_DATE = "September 2020"
APP_EMAIL = "karimsultan@hotmail.com"
APP_BLURB = "Server program for file transfer using FBOMB protocol.\n" \
"Non-threaded version; blocking, uses connection queueing."
APP_SYNTAX = "Syntax: fserve [options] <inbound directory>"
# Settings defaults
DEF_ENC = "utf-8" # Default text encoding type
DEF_HOST = "localhost" # The server's hostname or IP address
DEF_PORT = 33333 # The port used by the server
DEF_OVERWRITE = False # Abort if file already exists in inbound dir.
DEF_ALLOWSUBS = False # Abort if inbound filename includes a subdir
DEF_MAXDEPTH = 3 # If allow sub dirs, max hierarchy depth permitted
DEF_VERBOSE = False # T/F for extra detail
DEF_LOGGING = False # Logs output to file
DEF_LOGFILE = "fbomb.log" # Default log file
DEF_HOME = "pub" # Default home directory for inbound files
DEF_ACCOUNTDB = "fb_accounts.db" # Default accounts database
DEF_AUTH = True # Require login?
# Global
FLAG_KILL = False
FLAG_LOGOPEN = False
logfilehandle = None
#Enums
#*************************************************************************
# The configuration class houses parameter and initialization data
# which configures the client.
# NOTE: Private variables must be prefixed with "_". ToDictionary() relies
# on this.
# Uses reflection to create a dictionary of public atributes
# Skips any methods or functions or internals.
#*************************************************************************
# Traps control C for a smooth exit
# Outputs a message to a log file.
# Strips the ANSI colour codes out of the string to stop log clutter.
# Caches file handle so that it is only opened once, and closed on exit.
# Applies a header to new logging session (captured to same logfile).
# Format of a log is:
# [time since program start] message
# Outputs a message for a serious error, and terminates program
# Use this for fatal errors only!
# "Pips up" to let you know something minor happened, doesn't impact
# program flow. This method is intended for non-fatal errors.
# Outputs a message to screen only if in verbose mode OR if show==true
# IE: note() -> only shown if verbose mode enabled
# Just outputs a message regardless of verboseness
# IE: notex() -> always shown
# Adds a user and password to database.
# Note that password is salted with username.
# The default action is "add", but "delete", "list" and "update" are
# also supported.
# KAS 210302 Fixed a password creation bug in add user
# Receives inbound instructions as a string
# Transmits outbound data as bytes
# Handles ASCII files an os dependent line terminators.
# Computes and compares hash values.
# Receives binary data
# OS indepedently strips path returning filename
# Extracts a user and password from string if present
# Returns boolean, string, string.
# NOTE: This method does not use a good pattern. It can
# be decomposed and refactored into a nicer approach.
# BUT it was implemented like this during agile development
# and works so was kept.
# TODO: Refactor / Decompose and clean
# Handles the protocol
# TODO: Convert from linear to state machine
# From:
# https://stackoverflow.com/questions/9532499/check-whether-a-path-is-valid-in-python-without-creating-a-file-at-the-paths-ta/9532586
# There was a problem with ERROR_INVALID_FILE not being defined under NT (but fine in Linux) so
# this was replaced with the value 123 to ensure NT portability.
def isSafeFilename(pathname: str) -> bool:
'''
`True` if the passed pathname is a valid pathname for the current OS;
`False` otherwise.
'''
ERROR_INVALID_NAME=123
# If this pathname is either not a string or is but is empty, this pathname
# is invalid.
try:
if not isinstance(pathname, str) or not pathname:
return False
# Strip this pathname's Windows-specific drive specifier (e.g., `C:\`)
# if any. Since Windows prohibits path components from containing `:`
# characters, failing to strip this `:`-suffixed prefix would
# erroneously invalidate all valid absolute Windows pathnames.
_, pathname = os.path.splitdrive(pathname)
# Directory guaranteed to exist. If the current OS is Windows, this is
# the drive to which Windows was installed (e.g., the "%HOMEDRIVE%"
# environment variable); else, the typical root directory.
root_dirname = os.environ.get('HOMEDRIVE', 'C:') \
if sys.platform == 'win32' else os.path.sep
assert os.path.isdir(root_dirname)
# Append a path separator to this directory if needed.
root_dirname = root_dirname.rstrip(os.path.sep) + os.path.sep
# Test whether each path component split from this pathname is valid or
# not, ignoring non-existent and non-readable path components.
for pathname_part in pathname.split(os.path.sep):
try:
os.lstat(root_dirname + pathname_part)
# If an OS-specific exception is raised, its error code
# indicates whether this pathname is valid or not. Unless this
# is the case, this exception implies an ignorable kernel or
# filesystem complaint (e.g., path not found or inaccessible).
# Only the following exceptions indicate invalid pathnames:
# * Instances of the Windows-specific "WindowsError" class
# defining the "winerror" attribute whose value is
# "ERROR_INVALID_NAME". Under Windows, "winerror" is more
# fine-grained and hence useful than the generic "errno"
# attribute. When a too-long pathname is passed, for example,
# "errno" is "ENOENT" (i.e., no such file or directory) rather
# than "ENAMETOOLONG" (i.e., file name too long).
# * Instances of the cross-platform "OSError" class defining the
# generic "errno" attribute whose value is either:
# * Under most POSIX-compatible OSes, "ENAMETOOLONG".
# * Under some edge-case OSes (e.g., SunOS, *BSD), "ERANGE".
# NOTE: ERROR_INVALID_NAME=123 and the constant isn't portable.
except OSError as exc:
if hasattr(exc, 'winerror'):
if exc.winerror == ERROR_INVALID_NAME:
return False
elif exc.errno in {errno.ENAMETOOLONG, errno.ERANGE}:
return False
# If a "TypeError" exception was raised, it almost certainly has the
# error message "embedded NUL character" indicating an invalid pathname.
except TypeError as exc:
return False
# If no exception was raised, all path components and hence this
# pathname itself are valid.
else:
return True
# Shows the info, blurb, syntax and options screen.
# Parses options, switches and args. Does validation. Populates the config
# structure which contains the info need to do the file transfer.
# Main function sets up server socket, and handles connections until CTRL-C or error.
# 210302 Cross-platform portability issue detected. Socket.accept() is blocking by default.
# However, on linux it will accept a CTRL-C and abort. In Windows 10, it does not
# interrupt the blocking call, and instead buffers the CTRL-C to be processed after the
# accept() call returns! This makes it hard to exit the server on Windows. Python
# does allow you to set sockets to non-blocking, but I wanted blocking reads and writes.
# So the solution was to use select.select() (which is blocking as well, BUT can accept
# a timeout value).
# Run program
if __name__ == "__main__":
main()
| 36.201581 | 134 | 0.617571 | #!/usr/bin/python
#"fserve" by Karim Sultan, September 2020
# fsend is a server (receiver) for the FBomb Protocol
# It communicates with a client (fsend) to transfer a file.
# This protocol makes it easy to drop f-bombs across machines.
#
# This design handles a single connection at a time but accepts
# backlogs. It is meant for individual use, not to serve
# concurrent users.
#
# FBOMB Particulars:
# -> Connect to server
# Client Server
# HI <user:pwd>
# OK <SID> | NOK <Message>
# FSEND <META>
# OK <Proceed> | NOK <Message>
# <DATA>
# OK <Received> | NOK <Message>
# BYE <CID> [CLOSE]
# [CLOSE]
#
# Messages are request / response based.
# Format is:
# [OPCODE] [TOKEN] " | " (optional message separator) <message> (optional message)
# Except for data which is sent in serial chunks.
# See FBomb documentation for details.
# NOTE: Avoid using "print" when possible. Instead, use one of the
# following. They apply formatting, trap the verbose flag, and log data:
#
# pip(msg) for an important success message (green on green)
# pip(msg, alert=True) for important warning messages (red on red)
# note(msg) for loggable verbose mode only messages
# notex(msg) for loggable all times message
import re
import types
import errno
import datetime
import ntpath
import socket
import select
import signal
import os
import hashlib
import getopt
from gamzia.timer import Timer
from gamzia.colours import Colours as C
from gamzia.filedescriptor import *
from gamzia.accountmanager import AccountManager
import sys
argv=sys.argv
argc=len(argv)
# App Info Constants
APP_NAME = "fserve"
APP_VERSION = 1.0
APP_AUTHOR = "Karim Sultan"
APP_DATE = "September 2020"
APP_EMAIL = "karimsultan@hotmail.com"
APP_BLURB = "Server program for file transfer using FBOMB protocol.\n" \
"Non-threaded version; blocking, uses connection queueing."
APP_SYNTAX = "Syntax: fserve [options] <inbound directory>"
# Settings defaults
DEF_ENC = "utf-8" # Default text encoding type
DEF_HOST = "localhost" # The server's hostname or IP address
DEF_PORT = 33333 # The port used by the server
DEF_OVERWRITE = False # Abort if file already exists in inbound dir.
DEF_ALLOWSUBS = False # Abort if inbound filename includes a subdir
DEF_MAXDEPTH = 3 # If allow sub dirs, max hierarchy depth permitted
DEF_VERBOSE = False # T/F for extra detail
DEF_LOGGING = False # Logs output to file
DEF_LOGFILE = "fbomb.log" # Default log file
DEF_HOME = "pub" # Default home directory for inbound files
DEF_ACCOUNTDB = "fb_accounts.db" # Default accounts database
DEF_AUTH = True # Require login?
# Global
FLAG_KILL = False
FLAG_LOGOPEN = False
logfilehandle = None
#Enums
class ACTMGR_ACTIONS(Enum):
ADD_USER = 1
LIST_USERS = 2
DELETE_USER = 3
UPDATE_USER = 4
#*************************************************************************
# The configuration class houses parameter and initialization data
# which configures the client.
# NOTE: Private variables must be prefixed with "_". ToDictionary() relies
# on this.
class Config:
def __init__(self):
# These are the public properties
self.host=DEF_HOST
self.port=DEF_PORT
self.home=DEF_HOME+os.path.sep
self.isverbose=DEF_VERBOSE
self.isoverwrite=DEF_OVERWRITE
self.islogging=DEF_LOGGING
self.logfile=DEF_LOGFILE
self.sid=socket.gethostname()
self.message=""
# Private members
self._auth=DEF_AUTH
self._chunk=4096
self._sock=None
self._timer=None
self._DEBUG=False
self._accountmanager=AccountManager(DEF_ACCOUNTDB)
# Uses reflection to create a dictionary of public atributes
# Skips any methods or functions or internals.
def toDictionary(self, showprivate=False):
d={}
s=dir(self)
i=0
while True:
if s[i].startswith("__") and s[i].endswith("__"):
# Attribute is an internal, remove
s.pop(i)
elif (s[i].startswith("_") and not showprivate):
# Attribute is a private variable or method, remove
s.pop(i)
elif (isinstance(getattr(self, s[i]), types.MethodType) or
"function" in str(type(getattr(self, s[i])))):
# Attribute is a method/function, remove
s.pop(i)
else:
# Attribute is a value attribute, continue
i+=1
if (i>=len(s)):
break
for key in s:
d[key]=getattr(self, key)
return (d)
def toString(self, showprivate=False):
s=""
d=self.toDictionary(showprivate)
for key, value in d.items():
s+=f"{key}={value}\n"
return(s)
#*************************************************************************
# Traps control C for a smooth exit
def onSignal_kill(sig, frame):
global FLAG_KILL
FLAG_KILL=True
print ()
print (f"{C.bdr}{C.cly}CTRL-C Detected!{C.off}")
print (f"{C.cwh}Aborting communications and exiting...{C.off} done. ")
# This is critical in order to kill the listener and free address/port.
if not config._sock==None:
config._sock.close()
if FLAG_LOGOPEN:
logfilehandle.close()
print ()
exit()
# Outputs a message to a log file.
# Strips the ANSI colour codes out of the string to stop log clutter.
# Caches file handle so that it is only opened once, and closed on exit.
# Applies a header to new logging session (captured to same logfile).
# Format of a log is:
# [time since program start] message
def log(message):
global FLAG_LOGOPEN, logfilehandle
if not FLAG_LOGOPEN:
logfilehandle=open(config.logfile, "at+")
FLAG_LOGOPEN=True
log.logtimer=Timer()
log.logtimer.start()
now=datetime.datetime.now()
header=f"\n******************************************************************************\n" \
f"FBOMB Log File for FSERVE {config.sid}@{config.host}:{config.port}\n" \
f"On: {now:%Y-%m-%d %H:%M}\n" \
f"******************************************************************************\n"
logfilehandle.write(f"{header}\n")
logmsg=f"[{log.logtimer.peek():.5f}] {C.cstrip(message)}"
if (not logmsg.endswith("\n")):
logmsg=logmsg+"\n"
logfilehandle.write(f"{logmsg}")
logfilehandle.flush()
# Outputs a message for a serious error, and terminates program
# Use this for fatal errors only!
def error(message):
if (not FLAG_KILL):
print(f"{C.clr}An error has occurred!");
print(f"{C.clm}{message}{C.coff}{C.boff}")
print(flush=True)
try:
if (config.islogging):
log(message)
if (config._sock):
config._sock.close()
except Exception as e:
pass
finally:
exit()
# "Pips up" to let you know something minor happened, doesn't impact
# program flow. This method is intended for non-fatal errors.
def pip(message, isalert=False):
if isalert:
print(f"{C.cly}{C.bdr}{message}{C.off}")
else:
print(f"{C.clg}{C.bdg}{message}{C.off}")
try:
if (config.islogging):
log(message)
except Exception as e:
pass
# Outputs a message to screen only if in verbose mode OR if show==true
# IE: note() -> only shown if verbose mode enabled
def note(message, show=False):
if (config.isverbose or show==True):
print(f"{C.clc}{C.boff}{message}{C.off}")
# Always write to logfile no matter if verbose or not
try:
if (config.islogging):
log(message)
except Exception as e:
pass
# Just outputs a message regardless of verboseness
# IE: notex() -> always shown
def notex(message):
note(message, show=True)
# Adds a user and password to database.
# Note that password is salted with username.
# The default action is "add", but "delete", "list" and "update" are
# also supported.
# KAS 210302 Fixed a password creation bug in add user
def manageAccount(arg="null", action=ACTMGR_ACTIONS.ADD_USER):
# Validate and sanitize
mgr=config._accountmanager
if (action==ACTMGR_ACTIONS.ADD_USER):
valid, user, password=extractUP(arg)
if (not valid):
error(f"Wrong format '{arg}'. To add a user account with a password, use form \"user:password\"")
passwordHash=AccountManager.saltPassword(user, password)
if (not mgr.addUser(user, password)):
error(f"Failed to add {user} with password {paswordHash}.")
pip(f"New user {user} successfully added.")
elif (action==ACTMGR_ACTIONS.LIST_USERS):
data=mgr.listUsers()
notex(f"{C.cwh}{'ID':<3}{'User':<12}" \
f"{'Salted Password Hash (SHA256)':<65}{'Created':<10}")
for record in data:
notex(f"{C.clgy}{record[0]:<3}{record[1]:<12}{record[2]:<65}"\
f"{record[3][0:10]:<10}")
elif (action==ACTMGR_ACTIONS.DELETE_USER):
if (arg=="null" or arg==""):
error(f"Malformed user '{arg}'. Please specify valid user. " \
f"Use --list to see all users.")
if (mgr.deleteUser(arg)):
pip(f"Successfully removed user {arg}.")
else:
error(f"User {arg} could not be deleted. May not exist. "\
f"Use --list to see all users.")
elif (action==ACTMGR_ACTIONS.UPDATE_USER):
valid, user, password=extractUP(arg)
if (not valid):
error(f"Wrong format '{arg}'. To update a password, use form \"user:password\"")
# updatePassword applies the salt and creates hash for us
if (not mgr.updatePassword(user, password)):
error(f"Failed to update password for user {user}.")
pip(f"Password for {user} successfully modified.")
return
# Receives inbound instructions as a string
def getRequest(con):
data=con.recv(1024)
msg=data.decode(DEF_ENC)
note(f"{C.cly}[{config._timer.peek():.5f} RECV] {C.cwh}{msg.rstrip()}")
return (msg)
# Transmits outbound data as bytes
def sendResponse(con, msg):
data=msg.encode(DEF_ENC)
con.sendall(data)
note(f"{C.cly}[{config._timer.peek():.5f} SENT] {C.clgy}{msg.rstrip()}{C.off}")
# Handles ASCII files an os dependent line terminators.
def receiveASCII(con, fd):
file=open(config.home+fd.filename, "w+")
notex (f"{C.cwh}Receving ASCII file: {C.clgy}{config.home+fd.filename}")
bytes=0
while True:
data=con.recv(config._chunk)
if not data:
break
file.write(data.decode(DEF_ENC))
bytes+=len(data)
progress=int((bytes/fd.length)*100)
print(f"{C.cly}Progress: {C.cwh}{progress:4}%", end='\r', flush="True")
if (bytes==fd.length):
break
file.close()
note (f"{C.clc}Received {C.clr}ASCII{C.clc} file " \
f"{C.cwh}{config.home+fd.filename} " \
f"{C.cwh}({C.clgy}{bytes:,} {C.cwh}bytes) {C.clc}in " \
f"{C.clg}{config._timer.peek():.5f} {C.clc}seconds.")
return
# Computes and compares hash values.
def isValidHash(fd):
h=hashlib.new(fd.hashtype.lower())
with open(config.home+fd.filename, "rb") as file:
h.update(file.read())
digest=h.hexdigest()
return (digest==fd.hash)
# Receives binary data
def receiveBinary(con, fd):
bytes=0
file=open(config.home+fd.filename, "wb+")
notex (f"{C.cwh}Receving binary file: {C.clgy}{config.home+fd.filename}")
while True:
data=con.recv(config._chunk)
if not data:
break
bytes+=len(data)
file.write(data)
progress=int((bytes/fd.length)*100)
print(f"{C.cly}Progress: {C.cwh}{progress:4}%", end='\r', flush="True")
if (bytes==fd.length):
break
file.close()
print()
note (f"{C.clc}Received {C.clr}BINARY{C.clc} file {C.cwh}{config.home+fd.filename} " \
f"{C.cwh}({C.clgy}{bytes:,} {C.cwh}bytes) {C.clc}in " \
f"{C.clg}{config._timer.peek():.5f} {C.clc}seconds.")
if isValidHash(fd):
pip (f"Hash is valid: {isValidHash(fd)}")
else:
pip (f"Warning! File hash codes do NOT match!", isalert=True)
# OS indepedently strips path returning filename
def strippath(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
# Extracts a user and password from string if present
# Returns boolean, string, string.
# NOTE: This method does not use a good pattern. It can
# be decomposed and refactored into a nicer approach.
# BUT it was implemented like this during agile development
# and works so was kept.
# TODO: Refactor / Decompose and clean
def extractUP(s):
# Init
response=False
user=""
password=""
# Validate
if (not ":" in s):
return (False, user, password)
# Strip
s=re.sub("(?i)hi","",s).strip()
if (s.startswith(":") or s.endswith(":")):
return (False, user, password)
ss=s.split(":")
if (len(ss)<2):
return (False, user, password)
user=ss[0]
password=ss[1]
return(True, user, password)
# Handles the protocol
# TODO: Convert from linear to state machine
def stateMachine(con, addr):
# Start FBOMB protocol communication
result=False
request=getRequest(con)
if (request.upper().startswith("HI ")):
if (config._auth):
# Authentication mode. Retrieve user+pwd (user:pwd)
valid,user,password=extractUP(request)
if valid:
pip(f"Login request from {user}:{password}")
result=config._accountmanager.verifyPassword(user, password)
if (result):
pip(f"Login approved for {user}@{addr[0]}:{addr[1]} " \
f"on {str(datetime.datetime.now())[0:-10]}")
else:
pip(f"Login denied for {user}@{addr[0]}:{addr[1]} " \
f"on {str(datetime.datetime.now())[0:-10]}", isalert=True)
else:
pip(f"Malformed user:pwd provided: {user}:{password}",
isalert=True)
result=False
if (not result):
response = f"NOK Bad user:pwd"
sendResponse(con, response)
con.close()
return
# No authentication or was authenticated
response = f"OK {config.sid}"
if (not config.message==''):
response+=f" | {config.message}"
sendResponse(con, response)
request=getRequest(con)
note ("Inbound File Descriptor info:")
fd = FileDescriptor.deserialize (request)
fd.filename=strippath(fd.filename)
note (f"{C.clgy}{fd.toString()}")
if (os.path.exists(config.home+fd.filename) and not config.isoverwrite):
reason = "File already exists; overwrite is turned off."
sendResponse(con, f"NOK | {reason}")
error(reason)
elif (not isSafeFilename(config.home+fd.filename)):
reason = "Filename is not a valid name on server OS."
sendResponse(con, f"NOK | {reason}")
error(reason)
else:
sendResponse(con, "OK Proceed")
if (fd.filemode==FILEMODE.ASCII):
receiveASCII(con, fd)
else:
receiveBinary(con, fd)
sendResponse(con, "OK Received | Issue BYE to terminate")
msg=getRequest(con)
con.close()
# From:
# https://stackoverflow.com/questions/9532499/check-whether-a-path-is-valid-in-python-without-creating-a-file-at-the-paths-ta/9532586
# There was a problem with ERROR_INVALID_FILE not being defined under NT (but fine in Linux) so
# this was replaced with the value 123 to ensure NT portability.
def isSafeFilename(pathname: str) -> bool:
'''
`True` if the passed pathname is a valid pathname for the current OS;
`False` otherwise.
'''
ERROR_INVALID_NAME=123
# If this pathname is either not a string or is but is empty, this pathname
# is invalid.
try:
if not isinstance(pathname, str) or not pathname:
return False
# Strip this pathname's Windows-specific drive specifier (e.g., `C:\`)
# if any. Since Windows prohibits path components from containing `:`
# characters, failing to strip this `:`-suffixed prefix would
# erroneously invalidate all valid absolute Windows pathnames.
_, pathname = os.path.splitdrive(pathname)
# Directory guaranteed to exist. If the current OS is Windows, this is
# the drive to which Windows was installed (e.g., the "%HOMEDRIVE%"
# environment variable); else, the typical root directory.
root_dirname = os.environ.get('HOMEDRIVE', 'C:') \
if sys.platform == 'win32' else os.path.sep
assert os.path.isdir(root_dirname)
# Append a path separator to this directory if needed.
root_dirname = root_dirname.rstrip(os.path.sep) + os.path.sep
# Test whether each path component split from this pathname is valid or
# not, ignoring non-existent and non-readable path components.
for pathname_part in pathname.split(os.path.sep):
try:
os.lstat(root_dirname + pathname_part)
# If an OS-specific exception is raised, its error code
# indicates whether this pathname is valid or not. Unless this
# is the case, this exception implies an ignorable kernel or
# filesystem complaint (e.g., path not found or inaccessible).
# Only the following exceptions indicate invalid pathnames:
# * Instances of the Windows-specific "WindowsError" class
# defining the "winerror" attribute whose value is
# "ERROR_INVALID_NAME". Under Windows, "winerror" is more
# fine-grained and hence useful than the generic "errno"
# attribute. When a too-long pathname is passed, for example,
# "errno" is "ENOENT" (i.e., no such file or directory) rather
# than "ENAMETOOLONG" (i.e., file name too long).
# * Instances of the cross-platform "OSError" class defining the
# generic "errno" attribute whose value is either:
# * Under most POSIX-compatible OSes, "ENAMETOOLONG".
# * Under some edge-case OSes (e.g., SunOS, *BSD), "ERANGE".
# NOTE: ERROR_INVALID_NAME=123 and the constant isn't portable.
except OSError as exc:
if hasattr(exc, 'winerror'):
if exc.winerror == ERROR_INVALID_NAME:
return False
elif exc.errno in {errno.ENAMETOOLONG, errno.ERANGE}:
return False
# If a "TypeError" exception was raised, it almost certainly has the
# error message "embedded NUL character" indicating an invalid pathname.
except TypeError as exc:
return False
# If no exception was raised, all path components and hence this
# pathname itself are valid.
else:
return True
# Shows the info, blurb, syntax and options screen.
def showHelp():
#This header line is now always printed at start of program.
#print (f"{C.cly}{C.bdb}{APP_NAME}{C.boff} v{APP_VERSION} by {APP_AUTHOR} {APP_DATE} {APP_EMAIL}")
print (f"{APP_BLURB}")
print ()
print (f"{C.clg}{APP_SYNTAX}");
print ()
print (f"{C.clc}Arguments")
print (f"{C.clg}-a, --adduser: {C.clc}Adds user account [{C.clm}user:password{C.clc}]")
print (f"{C.clg}-d, --deleteuser: {C.clc}Deletes user account [{C.clm}user{C.clc}]")
print (f"{C.clg}-u, --updatepassword: {C.clc}Change user password [{C.clm}user:password{C.clc}]")
print (f"{C.clg}-s, --sid: {C.clc}Set server ID ({C.clm}default{C.clc} is {C.cwh}{socket.gethostname()}{C.clc})")
print (f"{C.clg}-m, --message: {C.clc}Sets optional message to be sent to client on connection")
print (f"{C.clg}-h, --host: {C.clc}Address to bind to ({C.clm}default{C.clc} is {C.cwh}localhost{C.clc})")
print (f"{C.clg}-p, --port: {C.clc}Port to listen on ({C.clm}default{C.clc} is {C.cwh}33333)")
print (f"{C.clg}-l, --log: {C.clc}Enables logging and specifies log file; use -l \"\" for default")
print (f" ({C.clm}default{C.clc} log file is {C.cwh}{DEF_LOGFILE})")
print (f"{C.clgy}Parameter values can be after '=' or a space (ie, -p 10000 or -p=10000)")
print ()
print (f"{C.clc}Switches")
print (f"{C.clg}-?, --help: {C.clc}This help screen")
print (f"{C.clg}-x, --noauth: {C.clc}Turns user authentication off. Use with caution.")
print (f"{C.clg}-t, --list: {C.clc}List all users and exit")
print (f"{C.clg}-v, --verbose: {C.clc}Enable verbose mode")
print (f"{C.clg}-o, --overwrite: {C.clc}Enables overwriting existing files")
print (f"{C.clg}--version: {C.clc}Reports program version number and exits")
print (f"{C.coff}")
exit()
# Parses options, switches and args. Does validation. Populates the config
# structure which contains the info need to do the file transfer.
def parseCommandLine():
if argc<2:
showHelp()
# We store retrieved arguments ins a config structure
config=Config()
# Single switch options are listed with a ":" suffix only if they expect a value.
# Extended options (--) must have a "=" suffix if value is expected
try:
opts, args =getopt.getopt(argv[1:],
"?SvDotxa:d:h:p:m:s:l:u:",
["help","version","verbose", "overwrite", "DEBUG", "list",
"deleteuser=", "adduser=", "host=","port=", "message=",
"sid=", "log=", "noauth", "updatepassword=", "update="])
except getopt.GetoptError as e:
error(f"Arguments error: {e.msg} {e.opt}")
showHelp()
# Process
for opt, arg in opts:
#This line is useful for option debugging:
#print(f"OPT:{opt} ARG:{arg}")
if (opt in ("-?", "--help")):
showHelp()
# This option check must come before version check
# as "-v" is in version, and I'm sticking to the "in" patern
# (it makes expansion easy)
elif (opt in("-v", "--verbose")):
config.isverbose=True
# If file already exists, allows replacement
elif (opt in("-o", "--overwrite")):
config.isoverwrite=True
# Handle logging as either a switch or an argument
elif (opt in("-l", "--log")):
config.islogging=True
if (not arg==""):
if (isSafeFilename(arg)):
config.logfile=arg
else:
config.logfile=DEF_LOGFILE
# Debugging flag
elif (opt in ("-D", "--DEBUG")):
config._DEBUG=True
# Turn on/off authentication (login)
elif (opt in ("-x", "--noauth")):
config._auth=False
# Show version and then exit immediately
elif (opt in ("--version")):
print(f"{C.clc}Version: {C.clg}{APP_VERSION}{C.off}")
exit()
# Add a user then exit
elif (opt in ("-a", "--adduser")):
manageAccount(arg, ACTMGR_ACTIONS.ADD_USER)
exit()
elif (opt in ("-t", "--list")):
manageAccount(action=ACTMGR_ACTIONS.LIST_USERS)
exit()
elif (opt in ("-d", "--deleteuser")):
manageAccount(arg, action=ACTMGR_ACTIONS.DELETE_USER)
exit()
elif (opt in ("-u", "--update", "--updatepassword")):
manageAccount(arg, action=ACTMGR_ACTIONS.UPDATE_USER)
exit()
# Optional message for response to "HI" request
elif (opt in ("-m", "--message")):
config.message=arg.strip()
# Sets server host. Can be a resolvable name, or IP address
elif (opt in ("-h", "--host")):
config.host=arg
# Sets server port to connect to.
elif (opt in ("-p", "--port")):
config.port=int(arg)
# Sets the client ID; default is hostname
elif (opt in ("-s", "--sid")):
config.sid=arg.strip()
# Greetings are always welcome
elif (opt in ("-S")):
pip(f"{C.bdr}{C.cly}Sultaneous sends salutations.{C.off}")
exit()
# The first argument should be the home directory;
# decorate with OS specific path separator if necessary
for arg in args:
if not arg=="":
config.home=arg
if not config.home.endswith(os.path.sep):
config.home+=os.path.sep
break
return(config)
# Main function sets up server socket, and handles connections until CTRL-C or error.
# 210302 Cross-platform portability issue detected. Socket.accept() is blocking by default.
# However, on linux it will accept a CTRL-C and abort. In Windows 10, it does not
# interrupt the blocking call, and instead buffers the CTRL-C to be processed after the
# accept() call returns! This makes it hard to exit the server on Windows. Python
# does allow you to set sockets to non-blocking, but I wanted blocking reads and writes.
# So the solution was to use select.select() (which is blocking as well, BUT can accept
# a timeout value).
def main():
# Register signal handler
signal.signal(signal.SIGINT, onSignal_kill)
global config
config=Config()
print (f"{C.cly}{C.bdb}{APP_NAME}{C.boff} v{APP_VERSION} by {APP_AUTHOR} {APP_DATE} {APP_EMAIL}{C.off}")
config=parseCommandLine()
if (config._DEBUG):
notex (config.toString(showprivate=True))
exit()
# Inbound directory must exist
if not os.path.isdir(config.home):
error("Invalid home (inbound) directory: "+config.home)
if (config.isverbose):
note(f"{C.clg}{C.bdg}Verbose Mode{C.boff}{C.clc} is on. Outputting details.")
if (config.isoverwrite):
note(f"{C.clg}{C.bdg}Overwrite Mode{C.boff}{C.clc} is on. Existing files can be replaced.")
if (config.islogging):
note(f"{C.clg}{C.bdg}Logging Mode{C.boff}{C.clc} is on. Logging to {C.clg}{config.logfile}{C.clc}.")
note(f"{C.clc}Outputting client configuration:")
note(f"{C.clgy}{config.toString()}{C.coff}")
try:
sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
config._sock=sock
note(f"Attempting to bind to {config.host}:{config.port}")
sock.bind((config.host, config.port))
notex (f"{C.clc}Listening on {C.cly}{config.host}:{C.clg}{config.port}")
notex (f"{C.clr}Press {C.bdr}CTRL-C{C.boff} to quit...")
uptime=Timer()
uptime.start()
except socket.error as e:
error(f"Network error occurred: {e}")
# Setup for select loop
sock.listen()
inputs=[sock]
dummy=[]
connections=0
while True:
try:
# select guarantees a connection is available; a 1 second timeout lets Windows
# process the CTRL-C without being blocked
readable, writable, exceptionable = select.select(inputs, dummy, dummy, 1)
for s in readable:
con,addr = s.accept()
config._timer=Timer()
config._timer.start()
connections+=1
notex(f"{C.clg}{C.bdg}Connection from:{C.boff} {addr}")
stateMachine(con,addr)
config._timer.stop()
pip (f"Listening... Handled {connections} connections so far.")
# Flush any pending logs to the file
if FLAG_LOGOPEN:
logfilehandle.flush()
except socket.error as e:
error(f"Network error occurred: {e}")
sock.close()
uptime.stop()
print (f"{C.off}Server shutdown complete. Uptime: {uptime.elapsed():.5f} seconds")
# Run program
if __name__ == "__main__":
main()
| 17,326 | 74 | 516 |
775653550cc66599795db34a5459f32d60012a36 | 3,161 | py | Python | scripts/insertfile.py | hakuya/higu | 42effd9b1a5d55bfbbc20d857b070e7b703154b3 | [
"BSD-2-Clause"
] | null | null | null | scripts/insertfile.py | hakuya/higu | 42effd9b1a5d55bfbbc20d857b070e7b703154b3 | [
"BSD-2-Clause"
] | 2 | 2015-11-09T01:23:30.000Z | 2017-02-06T06:36:41.000Z | scripts/insertfile.py | hakuya/higu | 42effd9b1a5d55bfbbc20d857b070e7b703154b3 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
import sys
import os
import logging
log = logging.getLogger( __name__ )
logging.basicConfig()
import hdbfs
import higu.config
MAX_TEXT_LEN = 2**18
if( __name__ == '__main__' ):
import optparse
parser = optparse.OptionParser( usage = 'Usage: %prog [options] files...' )
parser.add_option( '-c', '--config',
dest = 'config',
help = 'Configuration File' )
parser.add_option( '-p', '--pretend',
dest = 'pretend', action = 'store_true', default = False,
help = 'Pretend, don\'t actually do anything' )
parser.add_option( '-r', '--recovery',
dest = 'recovery', action = 'store_true', default = False,
help = 'Recovery mode' )
parser.add_option( '-a', '--album',
dest = 'album',
help = 'Create album and add files to album' )
parser.add_option( '-x', '--text',
dest = 'text_data',
help = 'Add text description to album (txt file)' )
parser.add_option( '-t', '--tags',
dest = 'taglist',
help = 'List of tags (\',\' separated) to apply' )
parser.add_option( '-T', '--newtags',
dest = 'taglist_new',
help = 'Same as -t, but creates tags if they don\'t exist' )
parser.add_option( '-n', '--name-policy',
dest = 'name_policy',
help = 'Policy for persisting names ("noreg", "noset", "setundef", "setall")' )
opts, files = parser.parse_args()
if( len( files ) < 1 ):
parser.print_help()
sys.exit( 0 )
if( opts.config is not None ):
cfg = higu.config.init( opts.config )
hdbfs.init( cfg.get_path( 'library' ) )
else:
hdbfs.init()
h = hdbfs.Database()
h.enable_write_access()
if( opts.recovery ):
h.recover_files( files )
sys.exit( 0 )
tags = opts.taglist.split( ',' ) if( opts.taglist is not None ) else []
tags_new = opts.taglist_new.split( ',' ) if( opts.taglist_new is not None ) else []
create_album = opts.album is not None
album_name = opts.album if( opts.album != '-' ) else None
if( create_album and opts.text_data is not None ):
textfile = open( opts.text_data, 'r' )
text_data = unicode( textfile.read( MAX_TEXT_LEN ), 'utf-8' )
assert textfile.read( 1 ) == '', 'Text file too long'
else:
text_data = None
name_policy = hdbfs.NAME_POLICY_SET_IF_UNDEF
if( opts.name_policy == "noreg" ):
name_policy = hdbfs.NAME_POLICY_DONT_REGISTER
elif( opts.name_policy == "noset" ):
name_policy = hdbfs.NAME_POLICY_DONT_SET
elif( opts.name_policy == "setundef" ):
name_policy = hdbfs.NAME_POLICY_SET_IF_UNDEF
elif( opts.name_policy == "setall" ):
name_policy = hdbfs.NAME_POLICY_SET_ALWAYS
h.batch_add_files( files, tags, tags_new, name_policy,
create_album, album_name, text_data )
# vim:sts=4:et:sw=4
| 29.542056 | 87 | 0.597912 | #!/usr/bin/python
import sys
import os
import logging
log = logging.getLogger( __name__ )
logging.basicConfig()
import hdbfs
import higu.config
MAX_TEXT_LEN = 2**18
def create_album( name, text, tags ):
album = h.create_album()
if( name is not None ):
album.add_name( name )
if( text is not None ):
album.set_text( text )
for t in tags:
album.assign( t )
return album
if( __name__ == '__main__' ):
import optparse
parser = optparse.OptionParser( usage = 'Usage: %prog [options] files...' )
parser.add_option( '-c', '--config',
dest = 'config',
help = 'Configuration File' )
parser.add_option( '-p', '--pretend',
dest = 'pretend', action = 'store_true', default = False,
help = 'Pretend, don\'t actually do anything' )
parser.add_option( '-r', '--recovery',
dest = 'recovery', action = 'store_true', default = False,
help = 'Recovery mode' )
parser.add_option( '-a', '--album',
dest = 'album',
help = 'Create album and add files to album' )
parser.add_option( '-x', '--text',
dest = 'text_data',
help = 'Add text description to album (txt file)' )
parser.add_option( '-t', '--tags',
dest = 'taglist',
help = 'List of tags (\',\' separated) to apply' )
parser.add_option( '-T', '--newtags',
dest = 'taglist_new',
help = 'Same as -t, but creates tags if they don\'t exist' )
parser.add_option( '-n', '--name-policy',
dest = 'name_policy',
help = 'Policy for persisting names ("noreg", "noset", "setundef", "setall")' )
opts, files = parser.parse_args()
if( len( files ) < 1 ):
parser.print_help()
sys.exit( 0 )
if( opts.config is not None ):
cfg = higu.config.init( opts.config )
hdbfs.init( cfg.get_path( 'library' ) )
else:
hdbfs.init()
h = hdbfs.Database()
h.enable_write_access()
if( opts.recovery ):
h.recover_files( files )
sys.exit( 0 )
tags = opts.taglist.split( ',' ) if( opts.taglist is not None ) else []
tags_new = opts.taglist_new.split( ',' ) if( opts.taglist_new is not None ) else []
create_album = opts.album is not None
album_name = opts.album if( opts.album != '-' ) else None
if( create_album and opts.text_data is not None ):
textfile = open( opts.text_data, 'r' )
text_data = unicode( textfile.read( MAX_TEXT_LEN ), 'utf-8' )
assert textfile.read( 1 ) == '', 'Text file too long'
else:
text_data = None
name_policy = hdbfs.NAME_POLICY_SET_IF_UNDEF
if( opts.name_policy == "noreg" ):
name_policy = hdbfs.NAME_POLICY_DONT_REGISTER
elif( opts.name_policy == "noset" ):
name_policy = hdbfs.NAME_POLICY_DONT_SET
elif( opts.name_policy == "setundef" ):
name_policy = hdbfs.NAME_POLICY_SET_IF_UNDEF
elif( opts.name_policy == "setall" ):
name_policy = hdbfs.NAME_POLICY_SET_ALWAYS
h.batch_add_files( files, tags, tags_new, name_policy,
create_album, album_name, text_data )
# vim:sts=4:et:sw=4
| 230 | 0 | 23 |
09d48716c35ce56a4cefc285681818b94df72bef | 18,035 | py | Python | tests/test_alias.py | cameron/datahog | 815178ae576bc4b4e1994ca9fcdc0c1f854bfccf | [
"BSD-3-Clause"
] | 4 | 2015-09-09T23:05:39.000Z | 2016-10-20T15:24:58.000Z | tests/test_alias.py | cameron/datahog | 815178ae576bc4b4e1994ca9fcdc0c1f854bfccf | [
"BSD-3-Clause"
] | null | null | null | tests/test_alias.py | cameron/datahog | 815178ae576bc4b4e1994ca9fcdc0c1f854bfccf | [
"BSD-3-Clause"
] | null | null | null | # vim: fileencoding=utf8:et:sw=4:ts=8:sts=4
import hashlib
import hmac
import os
import sys
import unittest
import datahog
from datahog import error
import psycopg2
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import base
from pgmock import *
if __name__ == '__main__':
unittest.main()
| 24.175603 | 79 | 0.522318 | # vim: fileencoding=utf8:et:sw=4:ts=8:sts=4
import hashlib
import hmac
import os
import sys
import unittest
import datahog
from datahog import error
import psycopg2
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import base
from pgmock import *
class AliasTests(base.TestCase):
def setUp(self):
super(AliasTests, self).setUp()
datahog.set_context(1, datahog.NODE)
datahog.set_context(2, datahog.ALIAS, {'base_ctx': 1})
def test_set(self):
add_fetch_result([])
add_fetch_result([None])
self.assertEqual(
datahog.alias.set(self.p, 123, 2, 'value'),
True)
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
with selectquery (base_id) as (
select base_id
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
),
insertquery as (
insert into alias_lookup (hash, ctx, base_id, flags)
select %s, %s, %s, %s
where not exists (select 1 from selectquery)
)
select base_id
from selectquery
""", (h, 2, h, 2, 123, 0)),
ROWCOUNT,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
insert into alias (base_id, ctx, value, pos, flags)
select %s, %s, %s, coalesce((
select pos + 1
from alias
where
time_removed is null
and base_id=%s
and ctx=%s
order by pos desc
limit 1
), 1), %s
where exists (
select 1 from node
where
time_removed is null
and id=%s
and ctx=%s
)
""", (123, 2, 'value', 123, 2, 0, 123, 1)),
ROWCOUNT,
COMMIT,
TPC_COMMIT])
def test_set_failure_already_exists(self):
add_fetch_result([(123,)])
self.assertEqual(
datahog.alias.set(self.p, 123, 2, 'value'),
False)
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
with selectquery (base_id) as (
select base_id
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
),
insertquery as (
insert into alias_lookup (hash, ctx, base_id, flags)
select %s, %s, %s, %s
where not exists (select 1 from selectquery)
)
select base_id
from selectquery
""", (h, 2, h, 2, 123, 0)),
ROWCOUNT,
FETCH_ONE,
TPC_ROLLBACK])
def test_set_failure_claimed(self):
add_fetch_result([(124,)])
self.assertRaises(error.AliasInUse,
datahog.alias.set, self.p, 123, 2, 'value')
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
with selectquery (base_id) as (
select base_id
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
),
insertquery as (
insert into alias_lookup (hash, ctx, base_id, flags)
select %s, %s, %s, %s
where not exists (select 1 from selectquery)
)
select base_id
from selectquery
""", (h, 2, h, 2, 123, 0)),
ROWCOUNT,
FETCH_ONE,
TPC_ROLLBACK])
def test_set_race_condition_fallback(self):
@query_fail
def qf():
query_fail(None)
return psycopg2.IntegrityError()
add_fetch_result([(123, 0)])
self.assertEqual(
datahog.alias.set(self.p, 123, 2, 'value'),
False)
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE_FAILURE("""
with selectquery (base_id) as (
select base_id
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
),
insertquery as (
insert into alias_lookup (hash, ctx, base_id, flags)
select %s, %s, %s, %s
where not exists (select 1 from selectquery)
)
select base_id
from selectquery
""", (h, 2, h, 2, 123, 0)),
TPC_ROLLBACK,
GET_CURSOR,
EXECUTE("""
select base_id, flags
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
""", (h, 2)),
ROWCOUNT,
FETCH_ONE,
ROLLBACK])
def test_lookup(self):
add_fetch_result([(123, 0)])
self.assertEqual(
datahog.alias.lookup(self.p, 'value', 2),
{'base_id': 123, 'ctx': 2, 'value': 'value', 'flags': set([])})
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
""", (h, 2)),
ROWCOUNT,
FETCH_ONE,
COMMIT])
def test_lookup_failure(self):
add_fetch_result([])
self.assertEqual(
datahog.alias.lookup(self.p, 'value', 2),
None)
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
""", (h, 2)),
ROWCOUNT,
COMMIT])
def test_list(self):
add_fetch_result([(0, 'val1', 0), (0, 'val2', 1), (0, 'val3', 2)])
self.assertEqual(
datahog.alias.list(self.p, 123, 2),
([
{'base_id': 123, 'ctx': 2, 'value': 'val1',
'flags': set([])},
{'base_id': 123, 'ctx': 2, 'value': 'val2',
'flags': set([])},
{'base_id': 123, 'ctx': 2, 'value': 'val3',
'flags': set([])},
], 3))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select flags, value, pos
from alias
where
time_removed is null
and base_id=%s
and ctx=%s
and pos >= %s
order by pos asc
limit %s
""", (123, 2, 0, 100)),
FETCH_ALL,
COMMIT])
def test_list_empty(self):
add_fetch_result([])
self.assertEqual(
datahog.alias.list(self.p, 123, 2),
([], 0))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select flags, value, pos
from alias
where
time_removed is null
and base_id=%s
and ctx=%s
and pos >= %s
order by pos asc
limit %s
""", (123, 2, 0, 100)),
FETCH_ALL,
COMMIT])
def test_batch(self):
add_fetch_result([
(123, 0, 2, 'val1'),
(124, 0, 2, 'val2'),
(126, 0, 2, 'val3')])
self.assertEqual(
datahog.alias.batch(self.p,
[(123, 2), (124, 2), (125, 2), (126, 2)]),
[
{'base_id': 123, 'flags': set([]), 'ctx': 2,
'value': 'val1'},
{'base_id': 124, 'flags': set([]), 'ctx': 2,
'value': 'val2'},
None,
{'base_id': 126, 'flags': set([]), 'ctx': 2,
'value': 'val3'}])
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
with window_query as (
select base_id, flags, ctx, value, rank() over (
partition by base_id, ctx
order by pos
) as r
from alias
where
time_removed is null
and (base_id, ctx) in ((%s, %s),(%s, %s),(%s, %s),(%s, %s))
)
select base_id, flags, ctx, value
from window_query
where r=1
""", (123, 2, 124, 2, 125, 2, 126, 2)),
FETCH_ALL,
COMMIT])
def test_add_flags(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 5)])
add_fetch_result([(5,)])
add_fetch_result([(5,)])
self.assertEqual(
datahog.alias.set_flags(self.p, 123, 2, 'value', [1, 3], []),
set([1, 3]))
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
""", (h, 2)),
ROWCOUNT,
FETCH_ONE,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update alias_lookup
set flags=flags | %s
where time_removed is null and ctx=%s and hash=%s
returning flags
""", (5, 2, h)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update alias
set flags=flags | %s
where time_removed is null and ctx=%s and value=%s and base_id=%s
returning flags
""", (5, 2, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_add_flags_no_alias(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 5)])
add_fetch_result([])
self.assertEqual(
datahog.alias.set_flags(self.p, 123, 2, 'value', [1, 3], []),
None)
def test_clear_flags(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 5)])
add_fetch_result([(1,)])
add_fetch_result([(1,)])
self.assertEqual(
datahog.alias.set_flags(self.p, 123, 2, 'value', [], [2, 3]),
set([1]))
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
""", (h, 2)),
ROWCOUNT,
FETCH_ONE,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update alias_lookup
set flags=flags & ~%s
where time_removed is null and ctx=%s and hash=%s
returning flags
""", (6, 2, h)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update alias
set flags=flags & ~%s
where time_removed is null and ctx=%s and value=%s and base_id=%s
returning flags
""", (6, 2, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_clear_flags_no_alias(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 5)])
add_fetch_result([])
self.assertEqual(
datahog.alias.set_flags(self.p, 123, 2, 'value', [], [1, 3]),
None)
def test_set_flags_add(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 5)])
add_fetch_result([(5,)])
add_fetch_result([(5,)])
self.assertEqual(
datahog.alias.set_flags(self.p, 123, 2, 'value', [1, 3], []),
set([1, 3]))
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
""", (h, 2)),
ROWCOUNT,
FETCH_ONE,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update alias_lookup
set flags=flags | %s
where time_removed is null and ctx=%s and hash=%s
returning flags
""", (5, 2, h)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update alias
set flags=flags | %s
where time_removed is null and ctx=%s and value=%s and base_id=%s
returning flags
""", (5, 2, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_set_flags_clear(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 5)])
add_fetch_result([(4,)])
add_fetch_result([(4,)])
self.assertEqual(
datahog.alias.set_flags(self.p, 123, 2, 'value', [], [1, 2]),
set([3]))
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
""", (h, 2)),
ROWCOUNT,
FETCH_ONE,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update alias_lookup
set flags=flags & ~%s
where time_removed is null and ctx=%s and hash=%s
returning flags
""", (3, 2, h)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update alias
set flags=flags & ~%s
where time_removed is null and ctx=%s and value=%s and base_id=%s
returning flags
""", (3, 2, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_set_flags_both(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 5)])
add_fetch_result([(5,)])
add_fetch_result([(5,)])
self.assertEqual(
datahog.alias.set_flags(self.p, 123, 2, 'value', [1, 3], [2]),
set([1, 3]))
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
""", (h, 2)),
ROWCOUNT,
FETCH_ONE,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update alias_lookup
set flags=(flags & ~%s) | %s
where time_removed is null and ctx=%s and hash=%s
returning flags
""", (2, 5, 2, h)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update alias
set flags=(flags & ~%s) | %s
where time_removed is null and ctx=%s and value=%s and base_id=%s
returning flags
""", (2, 5, 2, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_set_flags_no_alias(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 5)])
add_fetch_result([])
self.assertEqual(
datahog.alias.set_flags(self.p, 123, 2, 'value', [], [1, 2]),
None)
def test_shift(self):
add_fetch_result([(True,)])
self.assertEqual(
datahog.alias.shift(self.p, 123, 2, 'value', 3),
True)
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
with oldpos as (
select pos
from alias
where
time_removed is null
and base_id=%s
and ctx=%s
and value=%s
), bump as (
update alias
set pos=pos + (case
when (select pos from oldpos) < pos
then -1
else 1
end)
where
exists (select 1 from oldpos)
and time_removed is null
and base_id=%s
and ctx=%s
and pos between symmetric (select pos from oldpos) and %s
), maxpos(n) as (
select pos
from alias
where
time_removed is null
and base_id=%s
and ctx=%s
order by pos desc
limit 1
), move as (
update alias
set pos=(case
when %s > (select n from maxpos)
then (select n from maxpos)
else %s
end)
where
exists (select 1 from oldpos)
and time_removed is null
and base_id=%s
and ctx=%s
and value=%s
returning 1
)
select exists (select 1 from move)
""", (123, 2, 'value', 123, 2, 3, 123, 2, 3, 3, 123, 2, 'value')),
FETCH_ONE,
COMMIT])
def test_remove(self):
add_fetch_result([(123, 0)])
add_fetch_result([()])
add_fetch_result([()])
self.assertEqual(
datahog.alias.remove(self.p, 123, 2, 'value'),
True)
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
""", (h, 2)),
ROWCOUNT,
FETCH_ONE,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update alias_lookup
set time_removed=now()
where
time_removed is null
and hash=%s
and ctx=%s
and base_id=%s
""", (h, 2, 123)),
ROWCOUNT,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
with removal as (
update alias
set time_removed=now()
where
time_removed is null
and base_id=%s
and ctx=%s
and value=%s
returning pos
), bump as (
update alias
set pos = pos - 1
where
exists (select 1 from removal)
and time_removed is null
and base_id=%s
and ctx=%s
and pos > (select pos from removal)
)
select 1 from removal
""", (123, 2, 'value', 123, 2)),
ROWCOUNT,
COMMIT,
TPC_COMMIT])
if __name__ == '__main__':
unittest.main()
| 17,138 | 11 | 574 |
c5b3ed774afd9eff5e1c28fcc842ea52c7084590 | 3,031 | py | Python | LCAnumpy/lca.py | asudeeaydin/LCAversions | dd4c8abfabc45ac8af61647e48d8b115042af4fb | [
"MIT"
] | null | null | null | LCAnumpy/lca.py | asudeeaydin/LCAversions | dd4c8abfabc45ac8af61647e48d8b115042af4fb | [
"MIT"
] | null | null | null | LCAnumpy/lca.py | asudeeaydin/LCAversions | dd4c8abfabc45ac8af61647e48d8b115042af4fb | [
"MIT"
] | null | null | null | #
#
# Jesse Livezey 2014-04-19
#
import numpy as np
#Initialize settings for inference
def infer(basis, stimuli, eta, lamb, nIter, adapt, coeffs=None, softThresh=0):
"""Infers sparse coefficients for dictionary elements when representing a stimulus using LCA algorithm.
Args:
basis: Dictionary used to represent stimuli. Should be arranged along rows.
coeffs: Values to start pre-threshold dictionary coefficients at for all stimuli.
stimuli: Goals for dictionary representation. Should be arranged along rows.
eta: Controls rate of inference. Equals to 1/tau in 2018 Olshausen paper.
thresh: Threshold used in calculation of output variable of model neuron.
lamb: Minimum value for thresh.
nIter: Numer of times to run inference loop.
softThresh: Boolean choice of threshold type.
adapt: Amount to change thresh by per run.
Results:
a: Post-threshold dictionary coefficients.
u: Pre-threshold internal *voltage.*
thresh: Final value of thresh variable.
Raises:
"""
numDict = basis.shape[0] # number of elements in dictionary
numStim = stimuli.shape[0] # number of stimuli
dataSize = basis.shape[1] # size of a dictionary element
#Initialize u and a
u = np.zeros((numStim, numDict))
# Don't understand what this does yet
if coeffs is not None:
u[:] = np.atleast_2d(coeffs)
a = np.zeros_like(u)
ci = np.zeros((numStim, numDict))
# Calculate G: overlap of basis functions with each other minus identity
# Row-wise correlation matrix - identity matrix to eliminate self correlation
G = basis.dot(basis.T) - np.eye(numDict)
#b[i,j] is the overlap from stimuli:i and basis:j
b = stimuli.dot(basis.T)
thresh = np.absolute(b).mean(1)
#Update u[i] and a[i] for nIter time steps
for kk in range(nIter):
#Calculate ci: amount other neurons are stimulated times overlap with rest of basis
ci[:] = a.dot(G)
# Update u using Rozell et al. (2008) eqn.
u[:] = eta*(b-ci)+(1-eta)*u
if softThresh == 1:
# shrinkage with thresh
a[:] = np.sign(u)*np.maximum(0.,np.absolute(u)-thresh[:,np.newaxis])
else: # hard thresh
a[:] = u
# Converts 'thresh' from 1D vector to 2D where each element is a row value.
# Compares every element of a row of 'a' with the element of the same row in 'thresh'.
# Hard threshold
a[np.absolute(a) < thresh[:,np.newaxis]] = 0.
# Multiply threshold values bigger than 'lamb' with 'adapt' to change thresh per run
thresh[thresh>lamb] = adapt*thresh[thresh>lamb]
# Soft thresholding - asude
# a[:] = np.sign(u) * np.maximum(0., np.absolute(u) - lamb)
return (a,u)
| 39.881579 | 124 | 0.606071 | #
#
# Jesse Livezey 2014-04-19
#
import numpy as np
#Initialize settings for inference
def infer(basis, stimuli, eta, lamb, nIter, adapt, coeffs=None, softThresh=0):
"""Infers sparse coefficients for dictionary elements when representing a stimulus using LCA algorithm.
Args:
basis: Dictionary used to represent stimuli. Should be arranged along rows.
coeffs: Values to start pre-threshold dictionary coefficients at for all stimuli.
stimuli: Goals for dictionary representation. Should be arranged along rows.
eta: Controls rate of inference. Equals to 1/tau in 2018 Olshausen paper.
thresh: Threshold used in calculation of output variable of model neuron.
lamb: Minimum value for thresh.
nIter: Numer of times to run inference loop.
softThresh: Boolean choice of threshold type.
adapt: Amount to change thresh by per run.
Results:
a: Post-threshold dictionary coefficients.
u: Pre-threshold internal *voltage.*
thresh: Final value of thresh variable.
Raises:
"""
numDict = basis.shape[0] # number of elements in dictionary
numStim = stimuli.shape[0] # number of stimuli
dataSize = basis.shape[1] # size of a dictionary element
#Initialize u and a
u = np.zeros((numStim, numDict))
# Don't understand what this does yet
if coeffs is not None:
u[:] = np.atleast_2d(coeffs)
a = np.zeros_like(u)
ci = np.zeros((numStim, numDict))
# Calculate G: overlap of basis functions with each other minus identity
# Row-wise correlation matrix - identity matrix to eliminate self correlation
G = basis.dot(basis.T) - np.eye(numDict)
#b[i,j] is the overlap from stimuli:i and basis:j
b = stimuli.dot(basis.T)
thresh = np.absolute(b).mean(1)
#Update u[i] and a[i] for nIter time steps
for kk in range(nIter):
#Calculate ci: amount other neurons are stimulated times overlap with rest of basis
ci[:] = a.dot(G)
# Update u using Rozell et al. (2008) eqn.
u[:] = eta*(b-ci)+(1-eta)*u
if softThresh == 1:
# shrinkage with thresh
a[:] = np.sign(u)*np.maximum(0.,np.absolute(u)-thresh[:,np.newaxis])
else: # hard thresh
a[:] = u
# Converts 'thresh' from 1D vector to 2D where each element is a row value.
# Compares every element of a row of 'a' with the element of the same row in 'thresh'.
# Hard threshold
a[np.absolute(a) < thresh[:,np.newaxis]] = 0.
# Multiply threshold values bigger than 'lamb' with 'adapt' to change thresh per run
thresh[thresh>lamb] = adapt*thresh[thresh>lamb]
# Soft thresholding - asude
# a[:] = np.sign(u) * np.maximum(0., np.absolute(u) - lamb)
return (a,u)
| 0 | 0 | 0 |
923e0b22ef247f6c85556ee41810da61a395384a | 2,053 | py | Python | wb/main/models/environment_model.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
] | 23 | 2022-03-17T12:24:09.000Z | 2022-03-31T09:13:30.000Z | wb/main/models/environment_model.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
] | 18 | 2022-03-21T08:17:44.000Z | 2022-03-30T12:42:30.000Z | wb/main/models/environment_model.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
] | 16 | 2022-03-17T12:24:14.000Z | 2022-03-31T12:15:12.000Z | """
OpenVINO DL Workbench
Class for ORM model described a Environment
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
from typing import List, Dict
from sqlalchemy import Column, Integer, String, Boolean
from sqlalchemy.orm import Session
from config.constants import ENVIRONMENTS_FOLDER
from wb.main.models.base_model import BaseModel
| 32.078125 | 99 | 0.717487 | """
OpenVINO DL Workbench
Class for ORM model described a Environment
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
from typing import List, Dict
from sqlalchemy import Column, Integer, String, Boolean
from sqlalchemy.orm import Session
from config.constants import ENVIRONMENTS_FOLDER
from wb.main.models.base_model import BaseModel
class EnvironmentModel(BaseModel):
__tablename__ = 'environments'
id = Column(Integer, primary_key=True, autoincrement=True)
path = Column(String, nullable=True)
manifest_path = Column(String, nullable=True)
is_ready = Column(Boolean, nullable=False, default=False)
dependencies: List['DependencyModel']
def __init__(self, manifest_path: Path = None, path: Path = None):
self.manifest_path = str(manifest_path) if manifest_path else None
self.path = str(path)
def json(self) -> dict:
return {
'isReady': self.is_ready,
'path': self.path,
'manifest': self.manifest_path
}
@property
def python_executable(self) -> Path:
return Path(self.path, 'bin', 'python')
@property
def installed_packages(self) -> Dict:
return {dependency.package.lower(): dependency.version for dependency in self.dependencies}
def build_environment_path(self) -> Path:
return Path(ENVIRONMENTS_FOLDER) / str(self.id)
def mark_as_not_ready(self, session: Session):
self.is_ready = False
self.write_record(session)
| 636 | 500 | 23 |
0e14fd906390d882a01ca62e4572286ae93a57c5 | 896 | py | Python | test.py | nuukedo29/mifs | 5a52cc456a95a3560a4d308d0d4cfde6231c531a | [
"Unlicense"
] | 18 | 2020-02-25T23:36:20.000Z | 2022-01-22T00:52:58.000Z | test.py | nuukedo29/mifs | 5a52cc456a95a3560a4d308d0d4cfde6231c531a | [
"Unlicense"
] | 5 | 2020-03-24T19:45:06.000Z | 2021-09-03T14:39:22.000Z | test.py | nuukedo29/mifs | 5a52cc456a95a3560a4d308d0d4cfde6231c531a | [
"Unlicense"
] | 1 | 2020-05-29T23:34:25.000Z | 2020-05-29T23:34:25.000Z | import os
import glob
import subprocess
import re
if __name__ == "__main__":
glob_remove("_test/*_mifs.*")
for file in glob.glob("_test/*"):
glob_remove("_test/*_mifs.*")
output = run(f'py mifs.py "{file}"')
filename, extension = os.path.splitext(os.path.basename(file))
output_file = glob.glob(f'_test/*_mifs.*')[0]
size_input = os.stat(file).st_size
size_output = os.stat(output_file).st_size
print( f'{file} {size_input}=>{size_output} ({(1 if size_input < size_output else -1)*round((size_input/size_output)*100, 2)}%)' )
for key, value in re.findall(r"(?m)^(.*?)\: ([^\s]+)", output):
print(f' {key}: {value}') | 28 | 132 | 0.677455 | import os
import glob
import subprocess
import re
def run(command):
return subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
encoding="utf8"
).stdout.read()
def glob_remove(pattern):
for file in glob.glob(pattern):
os.remove(file)
if __name__ == "__main__":
glob_remove("_test/*_mifs.*")
for file in glob.glob("_test/*"):
glob_remove("_test/*_mifs.*")
output = run(f'py mifs.py "{file}"')
filename, extension = os.path.splitext(os.path.basename(file))
output_file = glob.glob(f'_test/*_mifs.*')[0]
size_input = os.stat(file).st_size
size_output = os.stat(output_file).st_size
print( f'{file} {size_input}=>{size_output} ({(1 if size_input < size_output else -1)*round((size_input/size_output)*100, 2)}%)' )
for key, value in re.findall(r"(?m)^(.*?)\: ([^\s]+)", output):
print(f' {key}: {value}') | 208 | 0 | 46 |
ef7de3adb0d7c1e4243288d966644861df8a7b96 | 7,040 | py | Python | drf_firebase_token_auth/authentication.py | ronhe/drf-firebase-token-auth | c696569be5afd307a8d8ede75e1a64b099c568cf | [
"MIT"
] | 9 | 2020-10-11T14:11:43.000Z | 2022-03-24T19:25:33.000Z | drf_firebase_token_auth/authentication.py | Arka-cell/drf-firebase-token-auth | 061c074598a8cfbe3cea1405f8607c6e1a7a7e55 | [
"MIT"
] | 2 | 2021-03-04T15:20:35.000Z | 2022-03-02T15:18:16.000Z | drf_firebase_token_auth/authentication.py | Arka-cell/drf-firebase-token-auth | 061c074598a8cfbe3cea1405f8607c6e1a7a7e55 | [
"MIT"
] | 3 | 2021-03-03T20:26:39.000Z | 2021-12-28T05:50:02.000Z | """Firebase token authentication classes"""
from typing import *
from django.contrib import auth
from django.utils import timezone
from rest_framework import authentication, exceptions
import firebase_admin
from firebase_admin import auth as firebase_auth
from .settings import api_settings
from . import models
FIREBASE_APP_NAME = 'drf_firebase_token_auth'
_User = auth.get_user_model()
class FirebaseTokenAuthentication(authentication.TokenAuthentication):
"""Firebase token authentication class"""
keyword = api_settings.AUTH_HEADER_TOKEN_KEYWORD
@staticmethod
def _extract_email_from_firebase_user(
firebase_user: firebase_auth.UserRecord,
ignore_unverified_email=api_settings.IGNORE_FIREBASE_UNVERIFIED_EMAIL,
) -> Union[str, None]:
"""Extract user email from a Firebase user.
Args:
firebase_user: A Firebase user.
ignore_unverified_email: Is a verified email required.
Returns:
User's email address or None if not found.
"""
if ignore_unverified_email:
if firebase_user.email_verified and firebase_user.email:
return firebase_user.email
else:
return None
# Make best effort to extract an email address.
emails = [firebase_user.email] if firebase_user else []
emails += [data.email for data in firebase_user.provider_data if data.email]
return emails[0] if emails else None
def authenticate_firebase_user(self,
token: str) -> firebase_auth.UserRecord:
"""Authenticate a Firebase user using a given token
Args:
token: A Firebase token.
Returns:
A firebase user
"""
try:
decoded_token = firebase_auth.verify_id_token(
token,
app=self._firebase_app,
check_revoked=api_settings.VERIFY_FIREBASE_TOKEN_NOT_REVOKED
)
except ValueError:
raise exceptions.AuthenticationFailed(
'JWT was found to be invalid, or the App’s project ID cannot '
'be determined.'
)
except (firebase_auth.InvalidIdTokenError,
firebase_auth.ExpiredIdTokenError,
firebase_auth.RevokedIdTokenError,
firebase_auth.CertificateFetchError) as exc:
if exc.code == 'ID_TOKEN_REVOKED':
raise exceptions.AuthenticationFailed(
'Token revoked, inform the user to reauthenticate or '
'signOut().'
)
else:
raise exceptions.AuthenticationFailed(
'Token is invalid.'
)
return firebase_auth.get_user(decoded_token['uid'],
app=self._firebase_app)
def get_local_user(self, firebase_user: firebase_auth.UserRecord) -> _User:
"""Get a local user from a Firebase user.
Args:
firebase_user: A Firebase user.
Returns:
A local user model object.
Raises:
User.DoesNotExist: Could not find a local user matching to the
given Firebase user.
"""
# Try getting from a local firebase user.
try:
return models.FirebaseUser.objects.select_related('user').\
get(uid=firebase_user.uid).user
except models.FirebaseUser.DoesNotExist:
pass
# Try getting user by email.
email = self._extract_email_from_firebase_user(firebase_user)
if email:
try:
return _User.objects.get(**{_User.EMAIL_FIELD: email})
except _User.DoesNotExist:
pass
# Try getting user by uid, and let User.DoesNotExist raise if not found.
return _User.objects.get(**{_User.USERNAME_FIELD: firebase_user.uid})
def create_local_user(self,
firebase_user: firebase_auth.UserRecord) -> _User:
"""Create a local user for a given Firebase user
Args:
firebase_user: A Firebase user.
Returns:
The created local user model object.
"""
email = self._extract_email_from_firebase_user(firebase_user)
username = email if email else firebase_user.uid
user = _User.objects.create_user(**{_User.USERNAME_FIELD: username})
if email:
user.email = email
if firebase_user.display_name:
words = firebase_user.display_name.split(' ')
user.first_name = ' '.join(words[:-1])
user.last_name = words[-1]
user.save()
return user
@staticmethod
def get_or_create_local_firebase_user(
firebase_user: firebase_auth.UserRecord,
local_user
) -> models.FirebaseUser:
"""Get or create a local firebase user.
Args:
firebase_user: A Firebase user.
local_user: User model object.
Returns:
The created local Firebase user.
"""
local_firebase_user, created = \
models.FirebaseUser.objects.get_or_create(
uid=firebase_user.uid,
defaults={'user': local_user}
)
return local_firebase_user
def authenticate_credentials(self, token: str) -> Tuple[_User, None]:
"""Authenticate the token against Firebase
Args:
token: Firebase authentication token.
Returns:
The local user matching the Firebase authenticated user.
"""
# Authenticate the Firebase token.
firebase_user = self.authenticate_firebase_user(token)
# Get or create local user that matches the Firebase user.
try:
local_user = self.get_local_user(firebase_user)
except _User.DoesNotExist:
if api_settings.SHOULD_CREATE_LOCAL_USER:
local_user = self.create_local_user(firebase_user)
else:
raise exceptions.AuthenticationFailed(
'User is not registered to the application.'
)
# Update user last login.
local_user.last_login = timezone.now()
local_user.save()
# Get or create a local Firebase user.
self.get_or_create_local_firebase_user(firebase_user=firebase_user,
local_user=local_user)
return local_user, None
| 33.684211 | 84 | 0.608239 | """Firebase token authentication classes"""
from typing import *
from django.contrib import auth
from django.utils import timezone
from rest_framework import authentication, exceptions
import firebase_admin
from firebase_admin import auth as firebase_auth
from .settings import api_settings
from . import models
FIREBASE_APP_NAME = 'drf_firebase_token_auth'
_User = auth.get_user_model()
class FirebaseTokenAuthentication(authentication.TokenAuthentication):
"""Firebase token authentication class"""
keyword = api_settings.AUTH_HEADER_TOKEN_KEYWORD
def __init__(self) -> None:
try:
self._firebase_app = firebase_admin.get_app(FIREBASE_APP_NAME)
except ValueError:
firebase_credentials = firebase_admin.credentials.Certificate(
api_settings.FIREBASE_SERVICE_ACCOUNT_KEY_FILE_PATH
)
self._firebase_app = firebase_admin.initialize_app(
firebase_credentials,
name=FIREBASE_APP_NAME
)
@staticmethod
def _extract_email_from_firebase_user(
firebase_user: firebase_auth.UserRecord,
ignore_unverified_email=api_settings.IGNORE_FIREBASE_UNVERIFIED_EMAIL,
) -> Union[str, None]:
"""Extract user email from a Firebase user.
Args:
firebase_user: A Firebase user.
ignore_unverified_email: Is a verified email required.
Returns:
User's email address or None if not found.
"""
if ignore_unverified_email:
if firebase_user.email_verified and firebase_user.email:
return firebase_user.email
else:
return None
# Make best effort to extract an email address.
emails = [firebase_user.email] if firebase_user else []
emails += [data.email for data in firebase_user.provider_data if data.email]
return emails[0] if emails else None
def authenticate_firebase_user(self,
token: str) -> firebase_auth.UserRecord:
"""Authenticate a Firebase user using a given token
Args:
token: A Firebase token.
Returns:
A firebase user
"""
try:
decoded_token = firebase_auth.verify_id_token(
token,
app=self._firebase_app,
check_revoked=api_settings.VERIFY_FIREBASE_TOKEN_NOT_REVOKED
)
except ValueError:
raise exceptions.AuthenticationFailed(
'JWT was found to be invalid, or the App’s project ID cannot '
'be determined.'
)
except (firebase_auth.InvalidIdTokenError,
firebase_auth.ExpiredIdTokenError,
firebase_auth.RevokedIdTokenError,
firebase_auth.CertificateFetchError) as exc:
if exc.code == 'ID_TOKEN_REVOKED':
raise exceptions.AuthenticationFailed(
'Token revoked, inform the user to reauthenticate or '
'signOut().'
)
else:
raise exceptions.AuthenticationFailed(
'Token is invalid.'
)
return firebase_auth.get_user(decoded_token['uid'],
app=self._firebase_app)
def get_local_user(self, firebase_user: firebase_auth.UserRecord) -> _User:
"""Get a local user from a Firebase user.
Args:
firebase_user: A Firebase user.
Returns:
A local user model object.
Raises:
User.DoesNotExist: Could not find a local user matching to the
given Firebase user.
"""
# Try getting from a local firebase user.
try:
return models.FirebaseUser.objects.select_related('user').\
get(uid=firebase_user.uid).user
except models.FirebaseUser.DoesNotExist:
pass
# Try getting user by email.
email = self._extract_email_from_firebase_user(firebase_user)
if email:
try:
return _User.objects.get(**{_User.EMAIL_FIELD: email})
except _User.DoesNotExist:
pass
# Try getting user by uid, and let User.DoesNotExist raise if not found.
return _User.objects.get(**{_User.USERNAME_FIELD: firebase_user.uid})
def create_local_user(self,
firebase_user: firebase_auth.UserRecord) -> _User:
"""Create a local user for a given Firebase user
Args:
firebase_user: A Firebase user.
Returns:
The created local user model object.
"""
email = self._extract_email_from_firebase_user(firebase_user)
username = email if email else firebase_user.uid
user = _User.objects.create_user(**{_User.USERNAME_FIELD: username})
if email:
user.email = email
if firebase_user.display_name:
words = firebase_user.display_name.split(' ')
user.first_name = ' '.join(words[:-1])
user.last_name = words[-1]
user.save()
return user
@staticmethod
def get_or_create_local_firebase_user(
firebase_user: firebase_auth.UserRecord,
local_user
) -> models.FirebaseUser:
"""Get or create a local firebase user.
Args:
firebase_user: A Firebase user.
local_user: User model object.
Returns:
The created local Firebase user.
"""
local_firebase_user, created = \
models.FirebaseUser.objects.get_or_create(
uid=firebase_user.uid,
defaults={'user': local_user}
)
return local_firebase_user
def authenticate_credentials(self, token: str) -> Tuple[_User, None]:
"""Authenticate the token against Firebase
Args:
token: Firebase authentication token.
Returns:
The local user matching the Firebase authenticated user.
"""
# Authenticate the Firebase token.
firebase_user = self.authenticate_firebase_user(token)
# Get or create local user that matches the Firebase user.
try:
local_user = self.get_local_user(firebase_user)
except _User.DoesNotExist:
if api_settings.SHOULD_CREATE_LOCAL_USER:
local_user = self.create_local_user(firebase_user)
else:
raise exceptions.AuthenticationFailed(
'User is not registered to the application.'
)
# Update user last login.
local_user.last_login = timezone.now()
local_user.save()
# Get or create a local Firebase user.
self.get_or_create_local_firebase_user(firebase_user=firebase_user,
local_user=local_user)
return local_user, None
| 433 | 0 | 27 |
28461ea07e161a5864fe9b05a35a864f7f0eb1c7 | 1,146 | py | Python | NeuNorm/loader.py | ornlneutronimaging/normalization | e38b3023145e8c7af69d0854cc2ad27921f1b436 | [
"BSD-3-Clause"
] | 2 | 2022-03-16T02:15:37.000Z | 2022-03-16T15:41:39.000Z | NeuNorm/loader.py | ornlneutronimaging/normalization | e38b3023145e8c7af69d0854cc2ad27921f1b436 | [
"BSD-3-Clause"
] | 7 | 2018-07-31T12:31:04.000Z | 2022-03-15T20:11:03.000Z | NeuNorm/loader.py | ornlneutronimaging/normalization | e38b3023145e8c7af69d0854cc2ad27921f1b436 | [
"BSD-3-Clause"
] | null | null | null | from pathlib import Path
from astropy.io import fits
import numpy as np
from PIL import Image
def load_hdf(file_name):
'''load HDF image
Parameters
----------
full file name of HDF5 file
'''
hdf = h5py.File(path,'r')['entry']['data']['data'].value
tmp = []
for iScan in hdf:
tmp.append(iScan)
return tmp
def load_fits(file_name):
'''load fits image
Parameters
----------
full file name of fits image
'''
tmp = []
try:
tmp = fits.open(file_name,ignore_missing_end=True)[0].data
if len(tmp.shape) == 3:
tmp = tmp.reshape(tmp.shape[1:])
return tmp
except OSError:
raise OSError("Unable to read the FITS file provided!")
def load_tiff(file_name):
'''load tiff image
Parameters:
-----------
full file name of tiff image
'''
try:
_image = Image.open(file_name)
metadata = dict(_image.tag_v2)
data = np.asarray(_image)
_image.close()
return [data, metadata]
except:
raise OSError("Unable to read the TIFF file provided!") | 22.470588 | 66 | 0.56719 | from pathlib import Path
from astropy.io import fits
import numpy as np
from PIL import Image
def load_hdf(file_name):
'''load HDF image
Parameters
----------
full file name of HDF5 file
'''
hdf = h5py.File(path,'r')['entry']['data']['data'].value
tmp = []
for iScan in hdf:
tmp.append(iScan)
return tmp
def load_fits(file_name):
'''load fits image
Parameters
----------
full file name of fits image
'''
tmp = []
try:
tmp = fits.open(file_name,ignore_missing_end=True)[0].data
if len(tmp.shape) == 3:
tmp = tmp.reshape(tmp.shape[1:])
return tmp
except OSError:
raise OSError("Unable to read the FITS file provided!")
def load_tiff(file_name):
'''load tiff image
Parameters:
-----------
full file name of tiff image
'''
try:
_image = Image.open(file_name)
metadata = dict(_image.tag_v2)
data = np.asarray(_image)
_image.close()
return [data, metadata]
except:
raise OSError("Unable to read the TIFF file provided!") | 0 | 0 | 0 |
cb9d29cd627e2e3a9d520cd64506b7db86d79a2d | 5,086 | py | Python | Minimizing-Financial-Loss/scripts/xgboost_search.py | PaulAdams4361/Quantifying-the-World | d923f482941c8fd4269ff474125d509f591fd2d6 | [
"MIT"
] | null | null | null | Minimizing-Financial-Loss/scripts/xgboost_search.py | PaulAdams4361/Quantifying-the-World | d923f482941c8fd4269ff474125d509f591fd2d6 | [
"MIT"
] | null | null | null | Minimizing-Financial-Loss/scripts/xgboost_search.py | PaulAdams4361/Quantifying-the-World | d923f482941c8fd4269ff474125d509f591fd2d6 | [
"MIT"
] | 2 | 2020-12-12T16:01:23.000Z | 2021-01-05T04:54:09.000Z |
import warnings
import pandas as pd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.impute import KNNImputer, SimpleImputer
from sklearn.feature_selection import RFECV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import (
confusion_matrix,
classification_report,
make_scorer,
accuracy_score
)
def cost_score(y, y_pred, fp_cost=25, fn_cost=125):
'''
'''
# get the misclassifications
misclass_idx = np.where(np.equal(y, y_pred) == False)[0]
# get the false positives
fp_idx = np.where(y_pred[misclass_idx] == 1)[0]
# get the false negatives
fn_idx = np.where(y_pred[misclass_idx] == 0)[0]
# calc the misclassification cost
misclassification_cost = fp_idx.size * fp_cost + fn_idx.size * fn_cost
return misclassification_cost
warnings.filterwarnings('ignore')
# pd.options.display.max_columns = 100
random_state = 42
random_generator = np.random.RandomState(random_state)
cost_scorer = make_scorer(cost_score, greater_is_better=False)
data = pd.read_csv('../final_project.csv')
y = data['y']
X = data.drop(['y'], axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=random_state, stratify=y)
print('X_train: ', X_train.shape,
'\ny_train: ', y_train.shape,
'\nX_test: ', X_test.shape,
'\ny_test: ', y_test.shape)
# fix spelling error
X_test['x24'] = X_test['x24'].str.replace('euorpe', 'europe')
# remove %
X_test['x32'] = pd.to_numeric(X_test['x32'].str.replace('%', ''))
# remove $
X_test['x37'] = pd.to_numeric(X_test['x37'].str.replace('$', ''))
# repeat process for training set
X_train['x24'] = X_train['x24'].str.replace('euorpe', 'europe')
X_train['x32'] = pd.to_numeric(X_train['x32'].str.replace('%', ''))
X_train['x37'] = pd.to_numeric(X_train['x37'].str.replace('$', ''))
# remake objects
objects = X_train.select_dtypes(['O'])
objects_test = X_test.select_dtypes(['O'])
# imputing with mode from training data
X_train['x24'].fillna('asia', inplace=True)
X_train['x29'].fillna('July', inplace=True)
X_train['x30'].fillna('wednesday', inplace=True)
X_test['x24'].fillna('asia', inplace=True)
X_test['x29'].fillna('July', inplace=True)
X_test['x30'].fillna('wednesday', inplace=True)
names = [i for i in list(objects.columns)]
le = LabelEncoder()
for i in names:
le.fit(objects[i].astype(str))
X_train[i] = le.transform(X_train[i])
X_test[i] = le.transform(X_test[i])
KNNimp = KNNImputer(n_neighbors=3)
X_train = KNNimp.fit_transform(X_train)
X_test = KNNimp.transform(X_test)
# define the estimator
logistic = LogisticRegression()
# provide the parameters of the feature selection process
feature_selector = RFECV(logistic,
step = 1,
min_features_to_select= 1,
cv = 5,
n_jobs = -1)
feature_selector = feature_selector.fit(X_train, y_train)
X_train = feature_selector.transform(X_train)
X_test = feature_selector.transform(X_test)
print('X_train shape: ', X_train.shape,
'\nX_test shape: ', X_test.shape)
xgb_params = {
'n_estimators': np.arange(100, 500, 10, dtype='int'),
'learning_rate': np.linspace(0.01, 1, num=1000, dtype='float'),
'gamma':np.geomspace(0.001, 10, num=1000, dtype='float'),
'max_depth':[d for d in range(1, 11)],
'subsample':np.linspace(0.1, 1, num=100, dtype='float'),
'colsample_bytree':[0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
'colsample_bylevel':[0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
'colsample_bynode':[0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
'lambda': np.geomspace(0.001, 10, num=100, dtype='float'),
'alpha': np.geomspace(0.001, 10, num=100, dtype='float')
}
xgb = XGBClassifier(booster='gbtree',
early_stopping_rounds=10,
random_state=random_state)
xgb_search = RandomizedSearchCV(xgb,
xgb_params,
random_state=random_state,
scoring=cost_scorer,
n_iter=100,
cv=5,
verbose=0,
n_jobs=-1)
xgb_search.fit(X_train, y_train)
y_pred = xgb_search.best_estimator_.predict(X_train)
print('\n\n\nTraining Performance')
print('Best model Score:', -xgb_search.best_score_) # negate since 'greater_is_better=False'
print('Best model Accuracy:', accuracy_score(y_train, y_pred) )
y_pred = xgb_search.best_estimator_.predict(X_test)
test_cost = cost_score(y_test, y_pred)
test_acc = accuracy_score(y_test, y_pred)
print('\n\n\nTest Performance')
print('Best Model Test Cost', test_cost)
print('Best Model Test Accuracy', test_acc)
print('\n\n\nBest Parameters')
print(xgb_search.best_params_)
| 30.45509 | 111 | 0.67381 |
import warnings
import pandas as pd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.impute import KNNImputer, SimpleImputer
from sklearn.feature_selection import RFECV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import (
confusion_matrix,
classification_report,
make_scorer,
accuracy_score
)
def cost_score(y, y_pred, fp_cost=25, fn_cost=125):
'''
'''
# get the misclassifications
misclass_idx = np.where(np.equal(y, y_pred) == False)[0]
# get the false positives
fp_idx = np.where(y_pred[misclass_idx] == 1)[0]
# get the false negatives
fn_idx = np.where(y_pred[misclass_idx] == 0)[0]
# calc the misclassification cost
misclassification_cost = fp_idx.size * fp_cost + fn_idx.size * fn_cost
return misclassification_cost
warnings.filterwarnings('ignore')
# pd.options.display.max_columns = 100
random_state = 42
random_generator = np.random.RandomState(random_state)
cost_scorer = make_scorer(cost_score, greater_is_better=False)
data = pd.read_csv('../final_project.csv')
y = data['y']
X = data.drop(['y'], axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=random_state, stratify=y)
print('X_train: ', X_train.shape,
'\ny_train: ', y_train.shape,
'\nX_test: ', X_test.shape,
'\ny_test: ', y_test.shape)
# fix spelling error
X_test['x24'] = X_test['x24'].str.replace('euorpe', 'europe')
# remove %
X_test['x32'] = pd.to_numeric(X_test['x32'].str.replace('%', ''))
# remove $
X_test['x37'] = pd.to_numeric(X_test['x37'].str.replace('$', ''))
# repeat process for training set
X_train['x24'] = X_train['x24'].str.replace('euorpe', 'europe')
X_train['x32'] = pd.to_numeric(X_train['x32'].str.replace('%', ''))
X_train['x37'] = pd.to_numeric(X_train['x37'].str.replace('$', ''))
# remake objects
objects = X_train.select_dtypes(['O'])
objects_test = X_test.select_dtypes(['O'])
# imputing with mode from training data
X_train['x24'].fillna('asia', inplace=True)
X_train['x29'].fillna('July', inplace=True)
X_train['x30'].fillna('wednesday', inplace=True)
X_test['x24'].fillna('asia', inplace=True)
X_test['x29'].fillna('July', inplace=True)
X_test['x30'].fillna('wednesday', inplace=True)
names = [i for i in list(objects.columns)]
le = LabelEncoder()
for i in names:
le.fit(objects[i].astype(str))
X_train[i] = le.transform(X_train[i])
X_test[i] = le.transform(X_test[i])
KNNimp = KNNImputer(n_neighbors=3)
X_train = KNNimp.fit_transform(X_train)
X_test = KNNimp.transform(X_test)
# define the estimator
logistic = LogisticRegression()
# provide the parameters of the feature selection process
feature_selector = RFECV(logistic,
step = 1,
min_features_to_select= 1,
cv = 5,
n_jobs = -1)
feature_selector = feature_selector.fit(X_train, y_train)
X_train = feature_selector.transform(X_train)
X_test = feature_selector.transform(X_test)
print('X_train shape: ', X_train.shape,
'\nX_test shape: ', X_test.shape)
xgb_params = {
'n_estimators': np.arange(100, 500, 10, dtype='int'),
'learning_rate': np.linspace(0.01, 1, num=1000, dtype='float'),
'gamma':np.geomspace(0.001, 10, num=1000, dtype='float'),
'max_depth':[d for d in range(1, 11)],
'subsample':np.linspace(0.1, 1, num=100, dtype='float'),
'colsample_bytree':[0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
'colsample_bylevel':[0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
'colsample_bynode':[0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
'lambda': np.geomspace(0.001, 10, num=100, dtype='float'),
'alpha': np.geomspace(0.001, 10, num=100, dtype='float')
}
xgb = XGBClassifier(booster='gbtree',
early_stopping_rounds=10,
random_state=random_state)
xgb_search = RandomizedSearchCV(xgb,
xgb_params,
random_state=random_state,
scoring=cost_scorer,
n_iter=100,
cv=5,
verbose=0,
n_jobs=-1)
xgb_search.fit(X_train, y_train)
y_pred = xgb_search.best_estimator_.predict(X_train)
print('\n\n\nTraining Performance')
print('Best model Score:', -xgb_search.best_score_) # negate since 'greater_is_better=False'
print('Best model Accuracy:', accuracy_score(y_train, y_pred) )
y_pred = xgb_search.best_estimator_.predict(X_test)
test_cost = cost_score(y_test, y_pred)
test_acc = accuracy_score(y_test, y_pred)
print('\n\n\nTest Performance')
print('Best Model Test Cost', test_cost)
print('Best Model Test Accuracy', test_acc)
print('\n\n\nBest Parameters')
print(xgb_search.best_params_)
| 0 | 0 | 0 |
0a0fa115e7f6e6e42463efd615e422c2f9db3236 | 2,789 | py | Python | manipularDadosCovid/ManDadosCovid/sistema.py | SkiereszDiego/Learning-Python | 883f561ca5fc2eb4d4c74c7337d52c13e193db9f | [
"MIT"
] | null | null | null | manipularDadosCovid/ManDadosCovid/sistema.py | SkiereszDiego/Learning-Python | 883f561ca5fc2eb4d4c74c7337d52c13e193db9f | [
"MIT"
] | null | null | null | manipularDadosCovid/ManDadosCovid/sistema.py | SkiereszDiego/Learning-Python | 883f561ca5fc2eb4d4c74c7337d52c13e193db9f | [
"MIT"
] | null | null | null | import interface
import arquivo
import classes
from time import sleep
#Ler os Arquivos txt.
arquivo.carregarArqEst()
arquivo.carregarArqCid()
#Funcoes
#Menu
while True:
resposta = interface.menu(['Finalizar o Programa', 'Cadastrar Estados', 'Cadastrar Cidades', 'Relatório de Estados', 'Relatório de Cidades', 'Atualizar números de casos'])
if resposta == 1:
interface.cabecalho('Saindo do Sistema.....Até logo e lave as mãos!')
break
elif resposta == 2:
cadastroDeEstado()
elif resposta == 3:
cadastroDeCidade()
elif resposta == 4:
relatorioEstado()
elif resposta == 5:
relatorioCidade()
elif resposta == 6:
atualizacaoCasos()
else:
print('\033[31mOpção inválida. Digite novamente!\033[m')
sleep(2)
| 32.057471 | 176 | 0.603084 | import interface
import arquivo
import classes
from time import sleep
#Ler os Arquivos txt.
arquivo.carregarArqEst()
arquivo.carregarArqCid()
#Funcoes
def cadastroDeEstado():
interface.cabecalho('Novo Estado')
est = classes.Estado(input('Estado: '),input('Sigla: '))
a = False #bandeira
for estado in arquivo.lst_estado:
if estado.getNomeEstado() == est.getNomeEstado():
print('\033[31mOpção inválida. Estado já cadastrado!\033[m')
a = True
if not a:
arquivo.lst_estado.append(est)
def cadastroDeCidade():
interface.cabecalho('Nova Cidade')
cid = classes.Cidade(input('Cidade: '),interface.leiaInt('Quantidade de casos: '),input('Estado(Sigla): '))
a = False
for cidade in arquivo.lst_cidade:
if cidade.getCidade() == cid.getCidade():
print('\033[31mOpção inválida. Cidade já cadastrada!\033[m')
a = True
if not a:
arquivo.lst_cidade.append(cid)
def relatorioEstado():
print('=-=-=-=-=- Relatório dos Estados:\n')
for estado in arquivo.lst_estado:
print(estado.retornaAtributos())
def relatorioCidade():
print('=-=-=-=-=- Relatório das Cidades:\n')
for cidade in arquivo.lst_cidade:
print(cidade.retornaAtributos())
def atualizacaoCasos():
print('=-=-=-=-=- Atualização de casos:\n')
for cidade in arquivo.lst_cidade:
print(cidade.retornaAtributos())
cidadeEscolhida = input("Escolha uma das cidades: ").upper()
a = False
for cidade in arquivo.lst_cidade:
if cidade.getCidade() == cidadeEscolhida:
cidade.atualizaCasos(int(input("Digite a quantidade atualizada de casos: ")))
soma = 0
for sigla in arquivo.lst_estado:
if sigla.getSigla() == cidade.getEstado():
soma = soma + cidade.getCasos()
sigla.atualizarCasos(soma)
a = True
if not a:
print('\033[31mOpção inválida. Cidade não consta no sistema!\033[m')
#Menu
while True:
resposta = interface.menu(['Finalizar o Programa', 'Cadastrar Estados', 'Cadastrar Cidades', 'Relatório de Estados', 'Relatório de Cidades', 'Atualizar números de casos'])
if resposta == 1:
interface.cabecalho('Saindo do Sistema.....Até logo e lave as mãos!')
break
elif resposta == 2:
cadastroDeEstado()
elif resposta == 3:
cadastroDeCidade()
elif resposta == 4:
relatorioEstado()
elif resposta == 5:
relatorioCidade()
elif resposta == 6:
atualizacaoCasos()
else:
print('\033[31mOpção inválida. Digite novamente!\033[m')
sleep(2)
| 1,814 | 0 | 123 |
38182b04869d458fec66bdda280dda46870fee4c | 8,288 | py | Python | hw3/code/A3a.py | bobbydyr/CSE546-Machine-Learning | c3f7e487b60506acfa7886d7cc64dfa61550ee4b | [
"MIT"
] | null | null | null | hw3/code/A3a.py | bobbydyr/CSE546-Machine-Learning | c3f7e487b60506acfa7886d7cc64dfa61550ee4b | [
"MIT"
] | null | null | null | hw3/code/A3a.py | bobbydyr/CSE546-Machine-Learning | c3f7e487b60506acfa7886d7cc64dfa61550ee4b | [
"MIT"
] | null | null | null | # A3.b1
import numpy as np
import matplotlib.pyplot as plt
n = 30
# np.random.seed(1)
x = np.random.uniform(0,1,n)
x_mean = np.mean(x)
x_sd = np.std(x)
# x = (x-x_mean) # x after standardization
y = 4*np.sin(np.pi*x)*np.cos(6*np.pi*(x**2)) + np.random.standard_normal(n)
# y = (y - x_mean) / x_sd
error_validation_list = []
lamb = 500
lamb_list = []
d_list = []
for lamb in list(500 * (1/2)**(np.arange(0,20))):
for d in list(range(0, 51)):
error_validation = 0
print("Lam: ", lamb, ", d: ", d)
for i in range(n):
x_train = np.append(x[0:i], x[i+1:n])
y_train = np.append(y[0:i], y[i+1:n])
x_validation = x[i]
y_validation = y[i]
K = k_poly(x_train[:, np.newaxis], x_train[:, np.newaxis], d)
alpha = np.linalg.pinv(K + lamb) @ y_train
# in predicted y formula
k_xi_x = (1 + x_validation * x_train[np.newaxis, :]) ** d # use this when polynomial kernel
# k_xi_x = np.exp(-gamma*np.linalg.norm(x_validation - x_train[np.newaxis, :], 2))
y_predicted = alpha @ k_xi_x.T
error_validation += (y_predicted - y_validation).T @ (y_predicted- y_validation)
# error_validation = error_validation[0][0]
error_validation /= n
print("error_validation: ", error_validation)
error_validation_list.append(error_validation)
lamb_list.append(lamb)
d_list.append(d)
min_error = min(error_validation_list)
index_boostrap_sample_min_error = error_validation_list.index(min(error_validation_list))
lamb_best_poly = lamb_list[index_boostrap_sample_min_error]
d_best = d_list[index_boostrap_sample_min_error]
print("Best lamb: ", lamb_best_poly, ", Best d: ", d_best)
# lamb_best_poly = 0.48828125
d_best = 30
# plots the comparaison
# np.random.seed(1)
x_fine = np.array(list(np.arange(min(x),max(x), 0.01)) )
n = len(x_fine)
y_fine_true = 4*np.sin(np.pi*x_fine)*np.cos(6*np.pi*(x_fine**2))
y_fine_grid = y_fine_true + np.random.standard_normal(n)
f_poly_predicted = []
for xi in x_fine:
K = k_poly(x_fine[:, np.newaxis], x_fine[:, np.newaxis], d_best)
alpha = np.linalg.pinv(K + lamb_best_poly) @ y_fine_grid
k_xi_x = (1 + xi * x_fine[np.newaxis, :]) ** d_best # use this when polynomial kernel
y_predicted = alpha @ k_xi_x.T
f_poly_predicted.append(y_predicted)
plt.plot(x_fine, y_fine_true, label='True')
plt.plot(x_fine, f_poly_predicted, label='Poly Kernel')
plt.plot(x, y,'bo', label='Observed')
plt.xlabel("X")
plt.ylabel("Y")
plt.legend()
plt.savefig("/Users/yinruideng/Desktop/senior_spring/cse546/hw/hw3/latex/plots/A3b_1_test.png")
plt.show()
# A3.c1
B = 300
n = 30
n_fine = len(x_fine)
# np.random.seed(0)
boostrap_predicted_poly_matrix = []
for j in range(B):
index_boostrap_sample = np.random.choice(n,n)
x_training = x[index_boostrap_sample]
y_training = y[index_boostrap_sample]
K = k_poly(x_training[:,np.newaxis],x_training[:,np.newaxis], d_best)
alpha = np.linalg.solve((K + lamb_best_poly*np.eye(n, n)), y_training)
y_predicted_boostrap_ploy = []
for xi in x_fine:
y_predicted_boostrap_ploy.append(np.sum((1+xi*x_training[np.newaxis,:]) ** d_best @ alpha))
boostrap_predicted_poly_matrix.append(y_predicted_boostrap_ploy)
boostrap_predicted_poly_matrix = np.array(boostrap_predicted_poly_matrix)
percent_5_list_poly = []
percent_95_list_poly = []
for i in range(n_fine):
sorted_xi_from_300_B_sample = np.sort(boostrap_predicted_poly_matrix[:, i])
x_percentile_5 = sorted_xi_from_300_B_sample[int(B * 0.05)]
x_percentile_95 = sorted_xi_from_300_B_sample[int(B * 0.95)]
percent_5_list_poly.append(x_percentile_5)
percent_95_list_poly.append(x_percentile_95)
plt.plot(x_fine, y_fine_true, label = 'True Model')
plt.plot(x_fine, f_poly_predicted, label = 'Poly Kernel Prediction')
plt.plot(x, y,'bo', label ='Observed data')
plt.fill_between(x_fine, percent_5_list_poly, percent_95_list_poly, alpha=0.3, label="90% CI")
plt.ylim(-6, 6)
plt.legend()
plt.savefig("/Users/yinruideng/Desktop/senior_spring/cse546/hw/hw3/latex/plots/A3c_1_test.png")
plt.show()
#######################################################################################################################
# A3.b2
n = 30
# np.random.seed(0)
# x = np.random.rand(n)
x = np.random.uniform(0,1,n)
y_true = 4*np.sin(np.pi*x)*np.cos(6*np.pi*(x**2))
y = y_true + np.random.randn(n)
error_validation_list = []
lamb_list = []
gamma_list = []
d_list =[]
lamb = 1
for lamb in list(500 * (1/2)**(np.arange(0,30))):
for gamma in list(50 * (1/1.1)**(np.arange(0,30))):
print("Lam: ", lamb, ", gamma: ", gamma)
error_validation = 0
for i in range(n):
x_train = np.append(x[0:i], x[i+1:n])
y_train = np.append(y[0:i], y[i+1:n])
x_validation = x[i]
y_validation = y[i]
K = k_rbf(x_train[:,np.newaxis],x_train[np.newaxis,:], gamma)
alpha = np.linalg.pinv(K + lamb) @ y_train
k_xi_x = np.exp(-gamma*(x_validation-x_train[np.newaxis,:])**2)
error_validation += (k_xi_x@alpha - y_validation).T@(k_xi_x@alpha - y_validation)
error_validation_list.append(error_validation)
print("error_validation: ", error_validation)
lamb_list.append(lamb)
gamma_list.append(gamma)
min_error = min(error_validation_list)
index_boostrap_sample_min_error = error_validation_list.index(min_error)
lamb_best_rbf = lamb_list[index_boostrap_sample_min_error]
gamma_best = gamma_list[index_boostrap_sample_min_error]
print('Best gamma for RBF kernel is : ', gamma_best)
print('Best Lambda for RBF kernel is :', lamb_best_rbf)
gamma_best= 10.175399541327897
lamb_best_rbf= 9.313225746154785e-07
# np.random.seed(10)
x_fine = np.arange(min(x),max(x),0.001)
n = len(x_fine)
y_fine_true = 4*np.sin(np.pi*x_fine)*np.cos(6*np.pi*(x_fine**2))
y_fine_grid = y_fine_true + np.random.standard_normal(n)
f_rbf_predicted = []
K_rbf = k_rbf(x_fine[:,np.newaxis],x_fine[np.newaxis,:], gamma_best)
alpha = np.linalg.solve((K_rbf + lamb_best_rbf*np.eye(n, n)), y_fine_grid)
for xi in x_fine:
f_rbf_predicted.append(np.sum(alpha * np.exp(-gamma_best*(xi-x_fine)**2)))
plt.plot(x_fine, y_fine_true, label = 'True Model')
plt.plot(x_fine, f_rbf_predicted, label = 'RBF Kernel Prediction')
plt.plot(x, y,'bo', label ='Observed data')
plt.legend()
plt.savefig("/Users/yinruideng/Desktop/senior_spring/cse546/hw/hw3/latex/plots/A3b_2.png")
plt.show()
# A3.c2
B = 300
n=30
n_fine = len(x_fine)
# np.random.seed(0)
boostrap_predicted_rbf_matrix = []
# user x, y from previous
for j in range(B):
index_boostrap_sample = np.random.choice(n,n)
x_training = x[index_boostrap_sample]
y_training = y[index_boostrap_sample]
K_rbf = k_rbf(x_training[:, np.newaxis], x_training[np.newaxis, :], gamma_best)
alpha = np.linalg.solve((K_rbf + lamb_best_rbf * np.eye(n, n)), y_training)
y_predicted_boostrap_rbf = []
for xi in x_fine:
y_predicted_boostrap_rbf.append(np.sum(alpha * np.exp(-gamma_best*(xi-x_training)**2)))
boostrap_predicted_rbf_matrix.append(y_predicted_boostrap_rbf)
boostrap_predicted_rbf_matrix = np.array(boostrap_predicted_rbf_matrix)
percent_5_list_rbf = []
percent_95_list_rbf = []
for i in range(n_fine):
sorted_xi_from_300_B_sample = np.sort(boostrap_predicted_rbf_matrix[:, i])
x_percentile_5 = sorted_xi_from_300_B_sample[int(B * 0.05)]
x_percentile_95 = sorted_xi_from_300_B_sample[int(B * 0.95)]
percent_5_list_rbf.append(x_percentile_5)
percent_95_list_rbf.append(x_percentile_95)
plt.plot(x_fine, y_fine_true, label = 'True Model')
plt.plot(x_fine, f_rbf_predicted, label = 'rbf Kernel Prediction')
plt.plot(x, y,'bo', label ='Observed data')
plt.fill_between(x_fine, percent_5_list_rbf, percent_95_list_rbf, alpha=0.3, label="90% CI")
plt.ylim(-6, 6)
plt.legend()
plt.savefig("/Users/yinruideng/Desktop/senior_spring/cse546/hw/hw3/latex/plots/A3c_2_test.png")
plt.show()
#######################################################################################################################
| 37.165919 | 119 | 0.671573 | # A3.b1
import numpy as np
import matplotlib.pyplot as plt
n = 30
# np.random.seed(1)
x = np.random.uniform(0,1,n)
x_mean = np.mean(x)
x_sd = np.std(x)
# x = (x-x_mean) # x after standardization
y = 4*np.sin(np.pi*x)*np.cos(6*np.pi*(x**2)) + np.random.standard_normal(n)
# y = (y - x_mean) / x_sd
def k_poly(x, z, d):
a = x @ z.T
k = (1 + x @ z.T)**d
return k
error_validation_list = []
lamb = 500
lamb_list = []
d_list = []
for lamb in list(500 * (1/2)**(np.arange(0,20))):
for d in list(range(0, 51)):
error_validation = 0
print("Lam: ", lamb, ", d: ", d)
for i in range(n):
x_train = np.append(x[0:i], x[i+1:n])
y_train = np.append(y[0:i], y[i+1:n])
x_validation = x[i]
y_validation = y[i]
K = k_poly(x_train[:, np.newaxis], x_train[:, np.newaxis], d)
alpha = np.linalg.pinv(K + lamb) @ y_train
# in predicted y formula
k_xi_x = (1 + x_validation * x_train[np.newaxis, :]) ** d # use this when polynomial kernel
# k_xi_x = np.exp(-gamma*np.linalg.norm(x_validation - x_train[np.newaxis, :], 2))
y_predicted = alpha @ k_xi_x.T
error_validation += (y_predicted - y_validation).T @ (y_predicted- y_validation)
# error_validation = error_validation[0][0]
error_validation /= n
print("error_validation: ", error_validation)
error_validation_list.append(error_validation)
lamb_list.append(lamb)
d_list.append(d)
min_error = min(error_validation_list)
index_boostrap_sample_min_error = error_validation_list.index(min(error_validation_list))
lamb_best_poly = lamb_list[index_boostrap_sample_min_error]
d_best = d_list[index_boostrap_sample_min_error]
print("Best lamb: ", lamb_best_poly, ", Best d: ", d_best)
# lamb_best_poly = 0.48828125
d_best = 30
# plots the comparaison
# np.random.seed(1)
x_fine = np.array(list(np.arange(min(x),max(x), 0.01)) )
n = len(x_fine)
y_fine_true = 4*np.sin(np.pi*x_fine)*np.cos(6*np.pi*(x_fine**2))
y_fine_grid = y_fine_true + np.random.standard_normal(n)
f_poly_predicted = []
for xi in x_fine:
K = k_poly(x_fine[:, np.newaxis], x_fine[:, np.newaxis], d_best)
alpha = np.linalg.pinv(K + lamb_best_poly) @ y_fine_grid
k_xi_x = (1 + xi * x_fine[np.newaxis, :]) ** d_best # use this when polynomial kernel
y_predicted = alpha @ k_xi_x.T
f_poly_predicted.append(y_predicted)
plt.plot(x_fine, y_fine_true, label='True')
plt.plot(x_fine, f_poly_predicted, label='Poly Kernel')
plt.plot(x, y,'bo', label='Observed')
plt.xlabel("X")
plt.ylabel("Y")
plt.legend()
plt.savefig("/Users/yinruideng/Desktop/senior_spring/cse546/hw/hw3/latex/plots/A3b_1_test.png")
plt.show()
# A3.c1
B = 300
n = 30
n_fine = len(x_fine)
# np.random.seed(0)
boostrap_predicted_poly_matrix = []
for j in range(B):
index_boostrap_sample = np.random.choice(n,n)
x_training = x[index_boostrap_sample]
y_training = y[index_boostrap_sample]
K = k_poly(x_training[:,np.newaxis],x_training[:,np.newaxis], d_best)
alpha = np.linalg.solve((K + lamb_best_poly*np.eye(n, n)), y_training)
y_predicted_boostrap_ploy = []
for xi in x_fine:
y_predicted_boostrap_ploy.append(np.sum((1+xi*x_training[np.newaxis,:]) ** d_best @ alpha))
boostrap_predicted_poly_matrix.append(y_predicted_boostrap_ploy)
boostrap_predicted_poly_matrix = np.array(boostrap_predicted_poly_matrix)
percent_5_list_poly = []
percent_95_list_poly = []
for i in range(n_fine):
sorted_xi_from_300_B_sample = np.sort(boostrap_predicted_poly_matrix[:, i])
x_percentile_5 = sorted_xi_from_300_B_sample[int(B * 0.05)]
x_percentile_95 = sorted_xi_from_300_B_sample[int(B * 0.95)]
percent_5_list_poly.append(x_percentile_5)
percent_95_list_poly.append(x_percentile_95)
plt.plot(x_fine, y_fine_true, label = 'True Model')
plt.plot(x_fine, f_poly_predicted, label = 'Poly Kernel Prediction')
plt.plot(x, y,'bo', label ='Observed data')
plt.fill_between(x_fine, percent_5_list_poly, percent_95_list_poly, alpha=0.3, label="90% CI")
plt.ylim(-6, 6)
plt.legend()
plt.savefig("/Users/yinruideng/Desktop/senior_spring/cse546/hw/hw3/latex/plots/A3c_1_test.png")
plt.show()
#######################################################################################################################
# A3.b2
def k_rbf(x, z, gamma):
return np.exp(-gamma*(x-z)*(x-z))
n = 30
# np.random.seed(0)
# x = np.random.rand(n)
x = np.random.uniform(0,1,n)
y_true = 4*np.sin(np.pi*x)*np.cos(6*np.pi*(x**2))
y = y_true + np.random.randn(n)
error_validation_list = []
lamb_list = []
gamma_list = []
d_list =[]
lamb = 1
for lamb in list(500 * (1/2)**(np.arange(0,30))):
for gamma in list(50 * (1/1.1)**(np.arange(0,30))):
print("Lam: ", lamb, ", gamma: ", gamma)
error_validation = 0
for i in range(n):
x_train = np.append(x[0:i], x[i+1:n])
y_train = np.append(y[0:i], y[i+1:n])
x_validation = x[i]
y_validation = y[i]
K = k_rbf(x_train[:,np.newaxis],x_train[np.newaxis,:], gamma)
alpha = np.linalg.pinv(K + lamb) @ y_train
k_xi_x = np.exp(-gamma*(x_validation-x_train[np.newaxis,:])**2)
error_validation += (k_xi_x@alpha - y_validation).T@(k_xi_x@alpha - y_validation)
error_validation_list.append(error_validation)
print("error_validation: ", error_validation)
lamb_list.append(lamb)
gamma_list.append(gamma)
min_error = min(error_validation_list)
index_boostrap_sample_min_error = error_validation_list.index(min_error)
lamb_best_rbf = lamb_list[index_boostrap_sample_min_error]
gamma_best = gamma_list[index_boostrap_sample_min_error]
print('Best gamma for RBF kernel is : ', gamma_best)
print('Best Lambda for RBF kernel is :', lamb_best_rbf)
gamma_best= 10.175399541327897
lamb_best_rbf= 9.313225746154785e-07
# np.random.seed(10)
x_fine = np.arange(min(x),max(x),0.001)
n = len(x_fine)
y_fine_true = 4*np.sin(np.pi*x_fine)*np.cos(6*np.pi*(x_fine**2))
y_fine_grid = y_fine_true + np.random.standard_normal(n)
f_rbf_predicted = []
K_rbf = k_rbf(x_fine[:,np.newaxis],x_fine[np.newaxis,:], gamma_best)
alpha = np.linalg.solve((K_rbf + lamb_best_rbf*np.eye(n, n)), y_fine_grid)
for xi in x_fine:
f_rbf_predicted.append(np.sum(alpha * np.exp(-gamma_best*(xi-x_fine)**2)))
plt.plot(x_fine, y_fine_true, label = 'True Model')
plt.plot(x_fine, f_rbf_predicted, label = 'RBF Kernel Prediction')
plt.plot(x, y,'bo', label ='Observed data')
plt.legend()
plt.savefig("/Users/yinruideng/Desktop/senior_spring/cse546/hw/hw3/latex/plots/A3b_2.png")
plt.show()
# A3.c2
B = 300
n=30
n_fine = len(x_fine)
# np.random.seed(0)
boostrap_predicted_rbf_matrix = []
# user x, y from previous
for j in range(B):
index_boostrap_sample = np.random.choice(n,n)
x_training = x[index_boostrap_sample]
y_training = y[index_boostrap_sample]
K_rbf = k_rbf(x_training[:, np.newaxis], x_training[np.newaxis, :], gamma_best)
alpha = np.linalg.solve((K_rbf + lamb_best_rbf * np.eye(n, n)), y_training)
y_predicted_boostrap_rbf = []
for xi in x_fine:
y_predicted_boostrap_rbf.append(np.sum(alpha * np.exp(-gamma_best*(xi-x_training)**2)))
boostrap_predicted_rbf_matrix.append(y_predicted_boostrap_rbf)
boostrap_predicted_rbf_matrix = np.array(boostrap_predicted_rbf_matrix)
percent_5_list_rbf = []
percent_95_list_rbf = []
for i in range(n_fine):
sorted_xi_from_300_B_sample = np.sort(boostrap_predicted_rbf_matrix[:, i])
x_percentile_5 = sorted_xi_from_300_B_sample[int(B * 0.05)]
x_percentile_95 = sorted_xi_from_300_B_sample[int(B * 0.95)]
percent_5_list_rbf.append(x_percentile_5)
percent_95_list_rbf.append(x_percentile_95)
plt.plot(x_fine, y_fine_true, label = 'True Model')
plt.plot(x_fine, f_rbf_predicted, label = 'rbf Kernel Prediction')
plt.plot(x, y,'bo', label ='Observed data')
plt.fill_between(x_fine, percent_5_list_rbf, percent_95_list_rbf, alpha=0.3, label="90% CI")
plt.ylim(-6, 6)
plt.legend()
plt.savefig("/Users/yinruideng/Desktop/senior_spring/cse546/hw/hw3/latex/plots/A3c_2_test.png")
plt.show()
#######################################################################################################################
| 93 | 0 | 45 |
c0ac61a4e74441ae23403208689b3a59c5bea08f | 6,745 | py | Python | CI/machinery/mksenv.py | siconos/siconos-deb | 2739a23f23d797dbfecec79d409e914e13c45c67 | [
"Apache-2.0"
] | null | null | null | CI/machinery/mksenv.py | siconos/siconos-deb | 2739a23f23d797dbfecec79d409e914e13c45c67 | [
"Apache-2.0"
] | null | null | null | CI/machinery/mksenv.py | siconos/siconos-deb | 2739a23f23d797dbfecec79d409e914e13c45c67 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Make software environment from a yml database.
#
import getopt
import yaml
import sys
import shlex
def get_entry(spec=None, distrib=None, distrib_version=None, pkg=None,
section=None):
"""
Get one entry with precedence distrib with version > distrib >
match distrib > wildcard.
"""
distrib_full = '{0}-{1}'.format(distrib, distrib_version)
if pkg in spec[section]:
if distrib_full in spec[section][pkg]:
return spec[section][pkg][distrib_full]
elif distrib in spec[section][pkg]:
return spec[section][pkg][distrib]
elif distrib in spec['match']:
match_distrib = spec['match'][distrib]
if match_distrib in spec[section][pkg]:
return spec[section][pkg][match_distrib]
if wildcard in spec[section][pkg]:
return spec[section][pkg][wildcard]
else:
return None
def pkg_entries(spec=None, distrib=None, distrib_version=None, pkg=None):
"""
Find recursively entries for pkg and distribution distrib in a
specification spec.
"""
result = None
if pkg in spec['pkgs']:
result = get_entry(spec, distrib, distrib_version, pkg, 'pkgs')
if result is None:
return [result]
elif is_dict(result):
return [result]
else:
if is_atom(result):
result = [result]
r = list()
for e in result:
if e != pkg:
ne = pkg_entries(spec=spec, distrib=distrib,
distrib_version=distrib_version, pkg=e)
if ne == [None]:
r.append(e)
else:
r += ne
return r
def begin(distrib=None, distrib_version=None, output_mode=None):
"""
Distribution preamble.
"""
if output_mode == OutputMode.Docker:
sys.stdout.write('FROM {0}:{1}\n'.format(distrib, distrib_version))
def env(definitions=None, output_mode=None):
"""
Environment specification.
"""
if len(definitions) > 0:
items = list()
if output_mode == OutputMode.Docker:
items.append('ENV')
items += definitions
sys.stdout.write('{0}\n'.format(' \\ \n '.join(items)))
def run(installer=None, command=None, pkg=None, pkgs=None,
output_mode=OutputMode.Script):
"""
Format an install command according to output mode.
"""
if output_mode == OutputMode.Docker:
items = ['RUN']
else:
if output_mode == OutputMode.Script:
items = []
else:
sys.stderr.write('output mode {0} is not implemented\n'.format(
output_mode))
exit(1)
if installer is not None:
items.append(installer)
if command is not None:
if '&&' in command:
coms = command.split('&&')
items += ['{0} &&'.format(c.lstrip().rstrip()) for c in coms[:-1]]
items.append(coms[-1].lstrip().rstrip())
else:
items.append(command)
if pkg is not None:
items.append(pkg)
if pkgs is not None:
items += pkgs
sys.stdout.write('{0}\n'.format(' \\ \n '.join(items)))
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], '',
['pkg=',
'pkgs=',
'script',
'docker',
'vagrant',
'split=',
'distrib='])
except getopt.GetoptError as err:
sys.stderr.write('{0}\n'.format(str(err)))
usage()
exit(2)
distrib = None
distrib_version = None
pkgs = list()
output_mode = OutputMode.Script
split = False
for o, a in opts:
if o == '--distrib':
if ':' in a:
distrib, distrib_version = a.split(':')
else:
distrib = a
elif o == '--pkg':
pkgs.append(a)
elif o == '--pkgs':
pkgs += a.split(',')
elif o == '--script':
output_mode = OutputMode.Script
elif o == '--docker':
output_mode = OutputMode.Docker
elif o == '--vagrant':
output_mode = OutputMode.Vagrant
elif o == '--split':
split = a.lower() in ['true', 'yes', '1']
specfilename = args[0]
with open(specfilename) as specfile:
spec = yaml.load(specfile.read())
wildcard = None
if 'wildcard' in spec:
wildcard = spec['wildcard']
else:
wildcard = 'any'
by_installer = list()
by_command = list()
definitions = list()
for pkg in pkgs:
definition = get_entry(spec, distrib, distrib_version, pkg, 'env')
if definition is not None:
if is_list(definition):
for iter_def in definition:
definitions.append(iter_def)
else:
definitions.append(definition)
entries = pkg_entries(spec=spec, distrib=distrib,
distrib_version=distrib_version, pkg=pkg)
for entry in entries:
if entry is not None:
if hasattr(entry, 'has_key'):
if 'command' in entry:
by_command.append(entry['command'])
elif hasattr(entry, 'sort'):
by_installer += entry
else:
by_installer.append(entry)
else:
by_installer.append(pkg)
begin(distrib=distrib, distrib_version=distrib_version,
output_mode=output_mode)
installer = get_entry(spec, distrib, distrib_version, wildcard,
'installer')
assert installer is not None
updater = get_entry(spec, distrib, distrib_version, wildcard, 'updater')
if updater:
installer = '{0} && {1}'.format(updater, installer)
if split:
for pkg in by_installer:
run(installer=installer,
pkg=pkg, output_mode=output_mode)
else:
run(installer=installer,
pkgs=by_installer, output_mode=output_mode)
for command in by_command:
run(command=command, output_mode=output_mode)
env(definitions, output_mode)
| 24.70696 | 78 | 0.540549 | #!/usr/bin/env python
#
# Make software environment from a yml database.
#
import getopt
import yaml
import sys
import shlex
def usage():
print("""
{0} [--pkg=<pkg>] [--pkgs=<pkg1,pkg2>,...] [--script] \
[--docker] [--vagrant] [--split=...] [--distrib=...] \
/path/to/<example>.yml""".format(sys.argv[0]))
class OutputMode:
Script, Docker, Vagrant = range(3)
def is_list(a):
return isinstance(a, list)
def is_dict(a):
return isinstance(a, dict)
def is_atom(a):
return not (hasattr(a, '__iter__'))
def get_entry(spec=None, distrib=None, distrib_version=None, pkg=None,
section=None):
"""
Get one entry with precedence distrib with version > distrib >
match distrib > wildcard.
"""
distrib_full = '{0}-{1}'.format(distrib, distrib_version)
if pkg in spec[section]:
if distrib_full in spec[section][pkg]:
return spec[section][pkg][distrib_full]
elif distrib in spec[section][pkg]:
return spec[section][pkg][distrib]
elif distrib in spec['match']:
match_distrib = spec['match'][distrib]
if match_distrib in spec[section][pkg]:
return spec[section][pkg][match_distrib]
if wildcard in spec[section][pkg]:
return spec[section][pkg][wildcard]
else:
return None
def pkg_entries(spec=None, distrib=None, distrib_version=None, pkg=None):
"""
Find recursively entries for pkg and distribution distrib in a
specification spec.
"""
result = None
if pkg in spec['pkgs']:
result = get_entry(spec, distrib, distrib_version, pkg, 'pkgs')
if result is None:
return [result]
elif is_dict(result):
return [result]
else:
if is_atom(result):
result = [result]
r = list()
for e in result:
if e != pkg:
ne = pkg_entries(spec=spec, distrib=distrib,
distrib_version=distrib_version, pkg=e)
if ne == [None]:
r.append(e)
else:
r += ne
return r
def begin(distrib=None, distrib_version=None, output_mode=None):
"""
Distribution preamble.
"""
if output_mode == OutputMode.Docker:
sys.stdout.write('FROM {0}:{1}\n'.format(distrib, distrib_version))
def env(definitions=None, output_mode=None):
"""
Environment specification.
"""
if len(definitions) > 0:
items = list()
if output_mode == OutputMode.Docker:
items.append('ENV')
items += definitions
sys.stdout.write('{0}\n'.format(' \\ \n '.join(items)))
def run(installer=None, command=None, pkg=None, pkgs=None,
output_mode=OutputMode.Script):
"""
Format an install command according to output mode.
"""
if output_mode == OutputMode.Docker:
items = ['RUN']
else:
if output_mode == OutputMode.Script:
items = []
else:
sys.stderr.write('output mode {0} is not implemented\n'.format(
output_mode))
exit(1)
if installer is not None:
items.append(installer)
if command is not None:
if '&&' in command:
coms = command.split('&&')
items += ['{0} &&'.format(c.lstrip().rstrip()) for c in coms[:-1]]
items.append(coms[-1].lstrip().rstrip())
else:
items.append(command)
if pkg is not None:
items.append(pkg)
if pkgs is not None:
items += pkgs
sys.stdout.write('{0}\n'.format(' \\ \n '.join(items)))
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], '',
['pkg=',
'pkgs=',
'script',
'docker',
'vagrant',
'split=',
'distrib='])
except getopt.GetoptError as err:
sys.stderr.write('{0}\n'.format(str(err)))
usage()
exit(2)
distrib = None
distrib_version = None
pkgs = list()
output_mode = OutputMode.Script
split = False
for o, a in opts:
if o == '--distrib':
if ':' in a:
distrib, distrib_version = a.split(':')
else:
distrib = a
elif o == '--pkg':
pkgs.append(a)
elif o == '--pkgs':
pkgs += a.split(',')
elif o == '--script':
output_mode = OutputMode.Script
elif o == '--docker':
output_mode = OutputMode.Docker
elif o == '--vagrant':
output_mode = OutputMode.Vagrant
elif o == '--split':
split = a.lower() in ['true', 'yes', '1']
specfilename = args[0]
with open(specfilename) as specfile:
spec = yaml.load(specfile.read())
wildcard = None
if 'wildcard' in spec:
wildcard = spec['wildcard']
else:
wildcard = 'any'
by_installer = list()
by_command = list()
definitions = list()
for pkg in pkgs:
definition = get_entry(spec, distrib, distrib_version, pkg, 'env')
if definition is not None:
if is_list(definition):
for iter_def in definition:
definitions.append(iter_def)
else:
definitions.append(definition)
entries = pkg_entries(spec=spec, distrib=distrib,
distrib_version=distrib_version, pkg=pkg)
for entry in entries:
if entry is not None:
if hasattr(entry, 'has_key'):
if 'command' in entry:
by_command.append(entry['command'])
elif hasattr(entry, 'sort'):
by_installer += entry
else:
by_installer.append(entry)
else:
by_installer.append(pkg)
begin(distrib=distrib, distrib_version=distrib_version,
output_mode=output_mode)
installer = get_entry(spec, distrib, distrib_version, wildcard,
'installer')
assert installer is not None
updater = get_entry(spec, distrib, distrib_version, wildcard, 'updater')
if updater:
installer = '{0} && {1}'.format(updater, installer)
if split:
for pkg in by_installer:
run(installer=installer,
pkg=pkg, output_mode=output_mode)
else:
run(installer=installer,
pkgs=by_installer, output_mode=output_mode)
for command in by_command:
run(command=command, output_mode=output_mode)
env(definitions, output_mode)
| 251 | 35 | 115 |
83537dff1b05b68d9e65ac5753b5bce5987f3a53 | 2,821 | py | Python | sdk/python/pulumi_controltower/config/outputs.py | VMGVentures/pulumi-provider-controltower | e6ff7ad77abb71ae6d88e1f59e80ea2fd66913ae | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_controltower/config/outputs.py | VMGVentures/pulumi-provider-controltower | e6ff7ad77abb71ae6d88e1f59e80ea2fd66913ae | [
"ECL-2.0",
"Apache-2.0"
] | 13 | 2022-02-28T18:54:28.000Z | 2022-03-31T13:41:28.000Z | sdk/python/pulumi_awscontroltower/config/outputs.py | jaxxstorm/pulumi-awscontroltower | 1e8af1c6b0add6bb0cbf54b321e4903b954562dc | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'AssumeRole',
]
@pulumi.output_type
| 33.583333 | 87 | 0.633463 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'AssumeRole',
]
@pulumi.output_type
class AssumeRole(dict):
def __init__(__self__, *,
duration_seconds: Optional[int] = None,
external_id: Optional[str] = None,
policy: Optional[str] = None,
policy_arns: Optional[Sequence[str]] = None,
role_arn: Optional[str] = None,
session_name: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
transitive_tag_keys: Optional[Sequence[str]] = None):
if duration_seconds is not None:
pulumi.set(__self__, "duration_seconds", duration_seconds)
if external_id is not None:
pulumi.set(__self__, "external_id", external_id)
if policy is not None:
pulumi.set(__self__, "policy", policy)
if policy_arns is not None:
pulumi.set(__self__, "policy_arns", policy_arns)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
if session_name is not None:
pulumi.set(__self__, "session_name", session_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if transitive_tag_keys is not None:
pulumi.set(__self__, "transitive_tag_keys", transitive_tag_keys)
@property
@pulumi.getter(name="durationSeconds")
def duration_seconds(self) -> Optional[int]:
return pulumi.get(self, "duration_seconds")
@property
@pulumi.getter(name="externalId")
def external_id(self) -> Optional[str]:
return pulumi.get(self, "external_id")
@property
@pulumi.getter
def policy(self) -> Optional[str]:
return pulumi.get(self, "policy")
@property
@pulumi.getter(name="policyArns")
def policy_arns(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "policy_arns")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[str]:
return pulumi.get(self, "role_arn")
@property
@pulumi.getter(name="sessionName")
def session_name(self) -> Optional[str]:
return pulumi.get(self, "session_name")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="transitiveTagKeys")
def transitive_tag_keys(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "transitive_tag_keys")
| 1,779 | 632 | 22 |
0655a4c447c4ef9a6d1b4fa93c734e3a2ba6774d | 13,555 | py | Python | src/pygamescratch/sprite.py | jetwang/pygame-scratch | acb109437e444f3d628a49b67073a99608e23871 | [
"MIT"
] | 1 | 2021-11-13T14:39:11.000Z | 2021-11-13T14:39:11.000Z | src/pygamescratch/sprite.py | jetwang/pygame-scratch | acb109437e444f3d628a49b67073a99608e23871 | [
"MIT"
] | null | null | null | src/pygamescratch/sprite.py | jetwang/pygame-scratch | acb109437e444f3d628a49b67073a99608e23871 | [
"MIT"
] | null | null | null | import random
from pygamescratch.pygs import *
| 27.948454 | 143 | 0.55699 | import random
from pygamescratch.pygs import *
class Sprite(object):
def __init__(self, sprite_name, center_x=0, center_y=0):
"""
定义一个角色对象
:param sprite_name: 角色名称,该名称也对应default_sprite_image_folder定义的文件夹下面的角色图片所在的文件夹
:param center_x:
:param center_y:
"""
pygs._sprites_max_id = pygs._sprites_max_id + 1
self.id = sprite_name + str(pygs._sprites_max_id)
self.sprite_name = sprite_name
self.size = 100
self.direction = 0
self.timer_start = time.perf_counter()
self.event_watcher = {}
self.costume = {}
self.text = None
self.text_end_time = None
self.showing = True
sprite_image_name = sprite_name
if not os.path.exists(sprite_image_name):
sprite_image_name = pygs.default_sprite_image_folder + sprite_image_name
for file_name in os.listdir(sprite_image_name):
file_name_key = os.path.splitext(file_name)[0]
self.costume[file_name_key] = os.path.join(sprite_image_name, file_name) # open(os.path.join(name,file_name), 'r')
current_costume = list(self.costume.items())[0]
self.current_costume_key = current_costume[0]
self.current_costume_value = current_costume[1]
self.image = pygame.image.load(self.current_costume_value).convert_alpha()
self.rect = self.image.get_rect() # rect(1,2,3,4) # self.sprite.get_rect()
width = self.rect.width
height = self.rect.height
self.rect.x = center_x - width / 2
self.rect.y = center_y - height / 2
self.center_x = center_x # 存这个浮点数的原因是,pygame里面的坐标是整数,如果改变坐标的值小于1,那么里面的坐标实际上不会移动
self.center_y = center_y # 还有一个原因是,坐标都是角色左上角的位置,但是角度计算都是计算角色中心点,存这2个值方便计算
self.rotate_angle = 0
pygs.sprites_in_game[self.id] = self
self.event(EVENT_SPRITE_CREATED, self)
def move(self, steps):
"""
根据角色的direction(这是一个角度)移动,会根据direction计算出x和y分别移动的像素值
:param steps:
:return:
"""
direction_pi = math.pi * (self.direction / 180) # to π
steps_x = steps * math.cos(direction_pi)
steps_y = steps * math.sin(direction_pi)
self.go_to(self.center_x + steps_x, self.center_y + steps_y)
def turn_right(self, degrees):
"""
向右旋转
:param degrees:
:return:
"""
self.turn(-degrees)
def turn_left(self, degrees):
"""
向左旋转
:param degrees:
:return:
"""
self.turn(degrees)
def go_to(self, new_x, new_y):
"""
移到新的坐标
:param new_x:
:param new_y:
:return:
"""
self.set_x_to(new_x)
self.set_y_to(new_y)
def go_to_random_position(self):
"""
移到窗口内随机位置
:return:
"""
random_x = random.randint(0, pygs.max_x)
random_y = random.randint(0, pygs.max_y)
self.go_to(random_x, random_y)
def go_to_mouse_pointer(self):
"""
移到鼠标所在位置
:return:
"""
self.go_to(pygs.mouse_position[0], pygs.mouse_position[1])
def point(self, direction):
"""
指向特定角度,正右为0度,按照顺时针累加,正上为-90度,正下90度,正左为180度或-180度。
:param direction:
:return:
"""
self.direction = direction
def point_to(self, center_x, center_y):
"""
指向特定坐标
:param center_x:
:param center_y:
:return:
"""
direction_pi = math.atan2(center_y - self.center_y, center_x - self.center_x)
self.direction = (direction_pi * 180) / math.pi
def point_to_sprite(self, target_sprite):
"""
指定特定角色
:param target_sprite:
:return:
"""
self.point_to(target_sprite.center_x, target_sprite.center_y)
def point_towards_mouse_pointer(self):
"""
指向鼠标所在位置
:return:
"""
mouse_x = pygs.mouse_position[0]
mouse_y = pygs.mouse_position[1]
self.point_to(mouse_x, mouse_y)
def change_x_by(self, change_x):
"""
调整x坐标
:param change_x: 要调整的值
:return:
"""
self.center_x = self.center_x + change_x
self._adjust_position()
def set_x_to(self, new_x):
"""
设置x坐标
:param new_x: 要设置的新值
:return:
"""
self.center_x = new_x
self._adjust_position()
def change_y_by(self, change_y):
"""
调整y坐标
:param change_y: 要调整的值
:return:
"""
self.center_y = self.center_y + change_y
self._adjust_position()
def set_y_to(self, new_y):
"""
设置y坐标
:param new_y: 要设置的新值
:return:
"""
self.center_y = new_y
self._adjust_position()
def touching_edge(self):
"""
判断是否在边缘
:return:
"""
if self.rect.x >= pygs.max_x - self.rect.width or self.rect.x <= 0 or self.rect.y >= pygs.max_y - self.rect.height or self.rect.y <= 0:
return True
return False
def bounce_if_on_edge(self):
"""
如果碰到边缘就反弹
:return:
"""
if self.rect.x >= pygs.max_x - self.rect.width:
self.direction = 180 - self.direction
elif self.rect.x <= 0:
self.direction = 180 - self.direction
elif self.rect.y >= pygs.max_y - self.rect.height:
self.direction = - self.direction
elif self.rect.y <= 0:
self.direction = - self.direction
def _adjust_position(self):
max_center_x = pygs.max_x - self.rect.width / 2
max_center_y = pygs.max_y - self.rect.height / 2
if self.center_x > max_center_x:
self.center_x = max_center_x
if self.center_x < self.rect.width / 2:
self.center_x = self.rect.width / 2
if self.center_y > max_center_y:
self.center_y = max_center_y
if self.center_y < self.rect.height / 2:
self.center_y = self.rect.height / 2
self.rect.x = self.center_x - self.rect.width / 2
self.rect.y = self.center_y - self.rect.height / 2
def flip(self):
"""
翻转
:return:
"""
self.sprite = pygame.transform.flip(self.sprite, True, False)
def turn(self, degrees):
self.rotate_angle += degrees
self.direction = self.direction + degrees
# Looks
def say(self, text_str, size=20, color=(128, 128, 128), bg_color=None):
"""
角色标注,可以在角色旁边显示一段文字
:param text_str: 文字内容
:param size: 字体大小
:param color: 字体颜色
:param bg_color: 字体背景颜色
:return:
"""
self.say_for_seconds(text_str, None, size, color, bg_color)
def say_for_seconds(self, text_str, secs=2, size=20, color=(128, 128, 128), bg_color=None):
"""
角色标注,可以在角色旁边显示一段文字, 若干秒后会消失
:param text_str: 文字内容
:param secs: 存在秒数
:param size: 字体大小
:param color: 字体颜色
:param bg_color: 字体背景颜色
:return:
"""
font = pygame.font.Font(pygs.default_font_name, size)
text_image = font.render(str(text_str), True, color) # ,(128,128,128)
self.text = {"text": text_str, "size": size, "text_image": text_image, "bg_color": bg_color}
if secs is not None:
self.text_end_time = time.perf_counter() + secs
else:
self.text_end_time = None
def switch_costume_to(self, name):
"""
切换造型
:param name: 造型名称(也就是图片去掉扩展名的名称)
:return:
"""
if name != self.current_costume_key:
self.current_costume_key = name
self.current_costume_value = self.costume.get(name)
new_sprite = pygame.image.load(self.current_costume_value).convert_alpha()
self.image = new_sprite
self.set_size_to(self.size)
def next_costume(self):
"""
下一个造型
:return:
"""
keys = list(self.costume.keys())
size = len(keys)
index = keys.index(self.current_costume_key)
if index >= size - 1:
index = 0
else:
index = index + 1
self.switch_costume_to(keys[index])
def set_size_to(self, num):
"""
修改大小
:param num: 新的大小,100就是100%,1就是缩放为1%
:return:
"""
proto_rect = self.image.get_rect()
width = proto_rect.width
height = proto_rect.height
new_width = int(width * (num / 100))
new_height = int(height * (num / 100))
self.image = pygame.transform.smoothscale(self.image, (new_width, new_height))
self.rect.width = new_width
self.rect.height = new_height
self.rect.x = self.center_x - new_width / 2
self.rect.y = self.center_y - new_height / 2
self.size = num
def change_size_by(self, size_by):
"""
调整大小
:param size_by: 调整的数量
:return:
"""
new_size = self.size + size_by
if new_size > 0:
self.set_size_to(new_size)
def show(self):
"""
显示
:return:
"""
self.showing = True
def hide(self):
"""
隐藏
:return:
"""
self.showing = False
def action(self):
"""
角色在每帧的活动情况,比如如果希望角色不断移动1步,就可以重载这个方法,里面加入self.move(1)的代码
:return:
"""
pass
def goto_front_layer(self):
"""
显示在前面
:return:
"""
s = pygs.sprites_in_game[self.id]
del pygs.sprites_in_game[self.id]
pygs.sprites_in_game[self.id] = s
def goto_back_layer(self):
"""
显示在后面
:return:
"""
s = pygs.sprites_in_game[self.id]
del pygs.sprites_in_game[self.id]
new_dict = OrderedDict()
new_dict[self.id] = s
for k, v in list(pygs.sprites_in_game.items()):
new_dict[k] = v
sprites_in_game = new_dict
# Events
def regist_event(self, event_name, func):
"""
监听事件
:param event_name: 事件名称
:param func: 事件发生时,调用的函数
:return:
"""
if event_name in self.event_watcher:
functions = self.event_watcher.get(event_name)
functions.append(func)
else:
self.event_watcher[event_name] = [func]
def when_start(self, func):
"""
监听游戏启动事件
:param func:
:return:
"""
self.regist_event(EVENT_START, func)
def when_key_pressed(self, key_name, func):
"""
监听键盘按住事件
:param key_name: 键名
:param func:
:return:
"""
self.regist_event(pygs._get_key_down_event_name(key_name), func)
def when_key_up(self, key_name, func):
"""
监听键盘松开事件
:param key_name: 键名
:param func:
:return:
"""
self.regist_event(pygs._get_key_up_event_name(key_name), func)
def when_created(self, func):
"""
监听角色创建事件
:param func:
:return:
"""
self.regist_event(EVENT_SPRITE_CREATED, func)
def broadcast(self, event_name):
"""
广播事件
:param event_name:
:return:
"""
pygs.global_event(event_name)
# Sensing
def get_touching_sprite(self, sprite_name=None):
"""
获取接触到的角色
:param sprite_name: 接触的角色名称
:return:
"""
sprites = []
for sprite in list(pygs.sprites_in_game.values()):
if sprite.id != self.id:
if sprite_name is None or sprite.sprite_name == sprite_name:
if pygame.Rect.colliderect(self.rect, sprite.rect) and pygame.sprite.collide_mask(self, sprite):
sprites.append(sprite)
return sprites
def get_closest_sprite_by_name(self, sprite_name):
"""
获取最近的特定角色
:param sprite_name: 角色名称
:return:
"""
sprites = pygs.get_sprites_by_name(sprite_name)
return self.get_closest_sprite(sprites)
def get_closest_sprite(self, sprites):
"""
从角色列表中找出离自己最近的
:param sprites: 角色列表
:return:
"""
min_distance = 9999
closest_sprite = None
self_point = (self.center_x, self.center_y)
for sprite in sprites:
distance = pygs.get_distance(self_point, (sprite.center_x, sprite.center_y))
if min_distance > distance:
min_distance = distance
closest_sprite = sprite
return closest_sprite
def reset_timer(self):
"""
重置定时器
:return:
"""
self.timer_start = time.perf_counter()
def timer(self):
"""
上次定时后到目前的秒数
:return:
"""
return time.perf_counter() - self.timer_start
def event(self, event_name, *args, **kwargs):
"""
触发事件
:param event_name:
:param args:
:param kwargs:
:return:
"""
if event_name in self.event_watcher:
functions = self.event_watcher.get(event_name)
for func in functions:
func(*args, **kwargs)
def delete(self):
"""
删除自己
:return:
"""
self.hide()
if self.id in pygs.sprites_in_game.keys():
del pygs.sprites_in_game[self.id]
pygs.delete_delay_function_by_object(self)
| 684 | 14,048 | 23 |
78a38fa2f190b597d2a9d9f2355b5ecb608c896f | 6,287 | py | Python | cdk/python/sample/more.py | bilardi/aws-tool-comparison | 0baf265be2e4690f966047a10b5e9ca2107ed7b8 | [
"MIT"
] | null | null | null | cdk/python/sample/more.py | bilardi/aws-tool-comparison | 0baf265be2e4690f966047a10b5e9ca2107ed7b8 | [
"MIT"
] | null | null | null | cdk/python/sample/more.py | bilardi/aws-tool-comparison | 0baf265be2e4690f966047a10b5e9ca2107ed7b8 | [
"MIT"
] | null | null | null | """The class for managing more environment
The class requires the follow properties:
'id' (str): the suffix name of resource created
'ec2_params' (dict): the dictionary of the EC2 custom parameters
'lambda_params' (dict): the dictionary of the Lambda custom parameters
All properties are mandatory. See the unit tests for an example.
The class extendes the class named Basic.
# license MIT
# support https://github.com/bilardi/aws-simple-pipeline/issues
"""
from aws_cdk import (core, aws_ec2 as ec2,
aws_cloudwatch as cloudwatch)
from sample.basic import Basic
import json | 41.091503 | 112 | 0.60299 | """The class for managing more environment
The class requires the follow properties:
'id' (str): the suffix name of resource created
'ec2_params' (dict): the dictionary of the EC2 custom parameters
'lambda_params' (dict): the dictionary of the Lambda custom parameters
All properties are mandatory. See the unit tests for an example.
The class extendes the class named Basic.
# license MIT
# support https://github.com/bilardi/aws-simple-pipeline/issues
"""
from aws_cdk import (core, aws_ec2 as ec2,
aws_cloudwatch as cloudwatch)
from sample.basic import Basic
import json
class More(Basic):
ec2_alarm = None
lambda_alarm = None
def __init__(self, scope: core.Construct, id: str, ec2_params: dict, lambda_params: dict, **kwargs) -> None:
"""
deploys all AWS resources for more environment
Resources:
AWS::EC2::Instance with your details
AWS::Lambda::Function with your policies
AWS::Cloudwatch::Alarm for EC2 and Lambda
AWS::Cloudwatch::Dashboard for EC2 and Lamnbda
"""
super().__init__(scope, id, ec2_params, lambda_params, **kwargs)
# ec2
ec2_params['name'] = self.ec2_name
self.ec2_alarm = self.get_alarm(ec2_params)
ec2_dashboard = self.get_dashboard(ec2_params)
# lambda
lambda_params['name'] = self.lambda_name
self.lambda_alarm = self.get_alarm(lambda_params)
lambda_dashboard = self.get_dashboard(lambda_params)
def get_vpc(self, ec2_params):
vpc = None
if 'vpc' in ec2_params:
vpc = ec2_params['vpc']
else:
if 'vpc_id' in ec2_params and ec2_params['vpc_id']:
vpc = ec2.Vpc.from_lookup(self, "vpc", vpc_id=ec2_params['vpc_id'])
else:
vpc = ec2.Vpc.from_lookup(self, "vpc", is_default=True)
return vpc
def get_security_group(self, ec2_params):
security_group = None
if 'security_group' in ec2_params:
security_group = ec2_params['security_group']
else:
if 'security_group_id' in ec2_params and ec2_params['security_group_id']:
security_group = ec2.SecurityGroup.from_security_group_id(self, "SecurityGroup",
security_group_id=ec2_params['security_group_id'],
mutable=False
)
else:
security_group = ec2.SecurityGroup(self, "SecurityGroup",
vpc=ec2_params['vpc']
)
security_group.add_ingress_rule(
peer=ec2.Peer.any_ipv4(),
connection=ec2.Port(
string_representation="sr",
protocol=ec2.Protocol("UDP"),
from_port=ec2_params['from_port'],
to_port=ec2_params['to_port']
)
)
return security_group
def get_block_device(self, params):
volume = ec2.BlockDeviceVolume.ebs(
delete_on_termination=params['delete_on_termination'],
volume_size=params['volume_size'],
volume_type=ec2.EbsDeviceVolumeType(params['volume_type'])
)
block_device = ec2.BlockDevice(
device_name=params['device_name'],
volume=volume
)
return block_device
def get_instance(self, ec2_params):
if 'vpc' not in ec2_params:
ec2_params['vpc'] = self.get_vpc(ec2_params)
if 'security_group' not in ec2_params:
ec2_params['security_group'] = self.get_security_group(ec2_params)
block_device = self.get_block_device(ec2_params)
ec2_instance = ec2.Instance(self, self.ec2_name,
machine_image=ec2.MachineImage.generic_linux(
ami_map={ec2_params['region']:ec2_params['ami_id']}
),
vpc=ec2_params['vpc'],
security_group=ec2_params['security_group'],
instance_type=ec2.InstanceType(ec2_params['instance_type']),
key_name=ec2_params['key_name'],
block_devices=[block_device]
)
ec2_instance.user_data.add_commands(ec2_params['user_data'])
self.add_tags(self.ec2_name, ec2_instance, ec2_params['tags'])
return ec2_instance
def get_alarm(self, params):
description = params['description'].format(params['name'], self.account)
metric = cloudwatch.Metric(
metric_name=params['metric_name'],
namespace=params['namespace'],
dimensions={params['dimension']:params['name']}
)
alarm = cloudwatch.Alarm(self, "{}Alarm".format(params['name']),
alarm_description=description,
alarm_name=description,
comparison_operator=cloudwatch.ComparisonOperator('GREATER_THAN_OR_EQUAL_TO_THRESHOLD'),
metric=metric,
evaluation_periods=params['evaluation_periods'],
period=core.Duration.seconds(params['period']),
statistic=params['statistic'],
threshold=params['threshold'],
treat_missing_data=cloudwatch.TreatMissingData('MISSING')
)
return alarm
def get_dashboard(self, params):
with open(params['dashboard_file']) as json_file:
params['dashboard_widgets'] = json.load(json_file)
graph_widgets = []
for widget in params['dashboard_widgets']:
metric = [cloudwatch.Metric(
namespace=widget['properties']['metrics'][0][0],
metric_name=widget['properties']['metrics'][0][1],
dimensions={widget['properties']['metrics'][0][2]: params['name']}
)]
graph_widget = cloudwatch.GraphWidget(
height=widget['height'],
width=widget['width'],
left=metric
)
graph_widget.position(widget['x'], widget['y'])
graph_widgets.append(graph_widget)
dashboard = cloudwatch.Dashboard(self, "{}Dashboard".format(params['name']),
dashboard_name=params['name'],
widgets=[graph_widgets]
)
return dashboard | 4,568 | 1,087 | 23 |
e4168a690bb9436fa0955ac0ac0951214d2f625a | 7,188 | py | Python | nbgrader/apps/listapp.py | ellisonbg/nbgrader | 038d381d82925ae60a71596e29bbd867c85afe7f | [
"BSD-3-Clause-Clear"
] | null | null | null | nbgrader/apps/listapp.py | ellisonbg/nbgrader | 038d381d82925ae60a71596e29bbd867c85afe7f | [
"BSD-3-Clause-Clear"
] | null | null | null | nbgrader/apps/listapp.py | ellisonbg/nbgrader | 038d381d82925ae60a71596e29bbd867c85afe7f | [
"BSD-3-Clause-Clear"
] | null | null | null | import os
import glob
import shutil
import re
import json
from traitlets import Bool
from .baseapp import TransferApp, transfer_aliases, transfer_flags
aliases = {}
aliases.update(transfer_aliases)
aliases.update({
})
flags = {}
flags.update(transfer_flags)
flags.update({
'inbound': (
{'ListApp' : {'inbound': True}},
"List inbound files rather than outbound."
),
'cached': (
{'ListApp' : {'cached': True}},
"List cached files rather than inbound/outbound."
),
'remove': (
{'ListApp' : {'remove': True}},
"Remove an assignment from the exchange."
),
'json': (
{'ListApp' : {'as_json': True}},
"Print out assignments as json."
),
})
| 32.378378 | 126 | 0.586394 | import os
import glob
import shutil
import re
import json
from traitlets import Bool
from .baseapp import TransferApp, transfer_aliases, transfer_flags
aliases = {}
aliases.update(transfer_aliases)
aliases.update({
})
flags = {}
flags.update(transfer_flags)
flags.update({
'inbound': (
{'ListApp' : {'inbound': True}},
"List inbound files rather than outbound."
),
'cached': (
{'ListApp' : {'cached': True}},
"List cached files rather than inbound/outbound."
),
'remove': (
{'ListApp' : {'remove': True}},
"Remove an assignment from the exchange."
),
'json': (
{'ListApp' : {'as_json': True}},
"Print out assignments as json."
),
})
class ListApp(TransferApp):
name = u'nbgrader-list'
description = u'List assignments in the nbgrader exchange'
aliases = aliases
flags = flags
examples = """
List assignments in the nbgrader exchange. For the usage of instructors
and students.
Students
========
To list assignments for a course, you must first know the `course_id` for
your course. If you don't know it, ask your instructor.
To list the released assignments for the course `phys101`:
nbgrader list phys101
Instructors
===========
To list outbound (released) or inbound (submitted) assignments for a course,
you must configure the `course_id` in your config file or the command line.
To see all of the released assignments, run
nbgrader list # course_id in the config file
or
nbgrader list --course phys101 # course_id provided
To see the inbound (submitted) assignments:
nbgrader list --inbound
You can use the `--student` and `--assignment` options to filter the list
by student or assignment:
nbgrader list --inbound --student=student1 --assignment=assignment1
If a student has submitted an assignment multiple times, the `list` command
will show all submissions with their timestamps.
The `list` command can optionally remove listed assignments by providing the
`--remove` flag:
nbgrader list --inbound --remove --student=student1
"""
inbound = Bool(False, config=True, help="List inbound files rather than outbound.")
cached = Bool(False, config=True, help="List assignments in submission cache.")
remove = Bool(False, config=True, help="Remove, rather than list files.")
as_json = Bool(False, config=True, help="Print out assignments as json")
def init_src(self):
pass
def init_dest(self):
course_id = self.course_id if self.course_id else '*'
assignment_id = self.assignment_id if self.assignment_id else '*'
student_id = self.student_id if self.student_id else '*'
if self.inbound:
pattern = os.path.join(self.exchange_directory, course_id, 'inbound', '{}+{}+*'.format(student_id, assignment_id))
elif self.cached:
pattern = os.path.join(self.cache_directory, course_id, '{}+{}+*'.format(student_id, assignment_id))
else:
pattern = os.path.join(self.exchange_directory, course_id, 'outbound', '{}'.format(assignment_id))
self.assignments = sorted(glob.glob(pattern))
def parse_assignment(self, assignment):
if self.inbound:
regexp = r".*/(?P<course_id>.*)/inbound/(?P<student_id>.*)\+(?P<assignment_id>.*)\+(?P<timestamp>.*)"
elif self.cached:
regexp = r".*/(?P<course_id>.*)/(?P<student_id>.*)\+(?P<assignment_id>.*)\+(?P<timestamp>.*)"
else:
regexp = r".*/(?P<course_id>.*)/outbound/(?P<assignment_id>.*)"
m = re.match(regexp, assignment)
if m is None:
raise RuntimeError("Could not match '%s' with regexp '%s'", assignment, regexp)
return m.groupdict()
def format_inbound_assignment(self, info):
return "{course_id} {student_id} {assignment_id} {timestamp}".format(**info)
def format_outbound_assignment(self, info):
msg = "{course_id} {assignment_id}".format(**info)
if os.path.exists(info['assignment_id']):
msg += " (already downloaded)"
return msg
def copy_files(self):
pass
def parse_assignments(self):
assignments = []
for path in self.assignments:
info = self.parse_assignment(path)
if self.path_includes_course:
root = os.path.join(info['course_id'], info['assignment_id'])
else:
root = info['assignment_id']
if self.inbound or self.cached:
info['status'] = 'submitted'
info['path'] = path
elif os.path.exists(root):
info['status'] = 'fetched'
info['path'] = os.path.abspath(root)
else:
info['status'] = 'released'
info['path'] = path
if self.remove:
info['status'] = 'removed'
info['notebooks'] = []
for notebook in sorted(glob.glob(os.path.join(info['path'], '*.ipynb'))):
info['notebooks'].append({
'notebook_id': os.path.splitext(os.path.split(notebook)[1])[0],
'path': os.path.abspath(notebook)
})
assignments.append(info)
return assignments
def list_files(self):
"""List files."""
assignments = self.parse_assignments()
if self.as_json:
print(json.dumps(assignments))
else:
if self.inbound or self.cached:
self.log.info("Submitted assignments:")
for info in assignments:
self.log.info(self.format_inbound_assignment(info))
else:
self.log.info("Released assignments:")
for info in assignments:
self.log.info(self.format_outbound_assignment(info))
def remove_files(self):
"""List and remove files."""
assignments = self.parse_assignments()
if self.as_json:
print(json.dumps(assignments))
else:
if self.inbound or self.cached:
self.log.info("Removing submitted assignments:")
for info in assignments:
self.log.info(self.format_inbound_assignment(info))
else:
self.log.info("Removing released assignments:")
for info in assignments:
self.log.info(self.format_outbound_assignment(info))
for assignment in self.assignments:
shutil.rmtree(assignment)
def start(self):
if self.inbound and self.cached:
self.fail("Options --inbound and --cached are incompatible.")
if len(self.extra_args) == 0:
self.extra_args = ["*"] # allow user to not put in assignment
super(ListApp, self).start()
if self.remove:
self.remove_files()
else:
self.list_files()
| 3,040 | 3,391 | 23 |
b7c49394c6adc665a2f9cfffd797b04259b5eb28 | 2,311 | py | Python | bot.py | tusharmverma/bot_for_twitch | 4bf41ff741d272570c717cf41652514e48ba4942 | [
"MIT"
] | null | null | null | bot.py | tusharmverma/bot_for_twitch | 4bf41ff741d272570c717cf41652514e48ba4942 | [
"MIT"
] | null | null | null | bot.py | tusharmverma/bot_for_twitch | 4bf41ff741d272570c717cf41652514e48ba4942 | [
"MIT"
] | null | null | null | import socket
import re
import yaml
from time import sleep
config = yaml.safe_load(open('config.yml', 'rb'))
HOST = config['HOST']
PORT = config['PORT']
NICK = config['NICK']
PASS = config['PASS']
class Bot(object):
""""""
| 30.012987 | 81 | 0.566854 | import socket
import re
import yaml
from time import sleep
config = yaml.safe_load(open('config.yml', 'rb'))
HOST = config['HOST']
PORT = config['PORT']
NICK = config['NICK']
PASS = config['PASS']
class Bot(object):
""""""
def __init__(self, channel, n_msg_per_sec=100):
super(Bot, self).__init__()
self._nickname = NICK
self.channel = channel
self.connect(channel)
# print(NICK, channel, '\n', '-' * (len(NICK + channel) + 1))
print("{} {}\n{}".format(NICK, channel, '-' * (len(NICK + channel) + 1)))
self._msg_count = 0
self.n_msg_per_sec = n_msg_per_sec
def connect(self, channel):
self._socket = socket.socket()
self._socket.connect((HOST, PORT))
self._socket.send("PASS {}\r\n".format(PASS).encode("utf-8"))
self._socket.send("NICK {}\r\n".format(NICK).encode("utf-8"))
self._socket.send("JOIN {}\r\n".format(channel).encode("utf-8"))
def chat(self, msg):
self._socket.send("PRIVMSG {} :{}\r\n".format(self.channel, msg))
def _ping_pong(self, response):
if response == "PING :tmi.twitch.tv\r\n":
# send pong back to prevent timeout
self._socket.send("PONG :tmi.twitch.tv\r\n".encode("utf-8"))
return True
else:
return False
def _get_response(self):
try:
response = self._socket.recv(1024).decode("utf-8")
except UnicodeDecodeError as e:
print('\n\n%s\n\n' % e)
return False
if self._ping_pong(response):
return False
elif ':tmi.twitch.tv' in response:
return False
else:
return response
def _process_msg(self, response):
username = re.search(r"\w+", response).group(0)
mask = re.compile(r"^:\w+!\w+@\w+\.tmi\.twitch\.tv PRIVMSG #\w+ :")
message = mask.sub("", response).strip('\r\n')
return username, message
def action(self, username, msg):
return NotImplementedError()
def run(self):
while True:
response = self._get_response()
if response:
username, msg = self._process_msg(response)
self.action(username, msg)
sleep(1 / float(self.n_msg_per_sec))
| 1,864 | 0 | 215 |
ab05609a631f784c6cf8c377f350401d4e824216 | 585 | py | Python | setup.py | Aratz/arteria-runfolder | 1b77bc1995443fc8394d7dfe24f5b8d26240f8a2 | [
"MIT"
] | 5 | 2016-06-10T04:03:57.000Z | 2018-10-10T14:31:45.000Z | setup.py | Aratz/arteria-runfolder | 1b77bc1995443fc8394d7dfe24f5b8d26240f8a2 | [
"MIT"
] | 16 | 2015-08-27T12:55:14.000Z | 2022-01-25T12:47:11.000Z | setup.py | Aratz/arteria-runfolder | 1b77bc1995443fc8394d7dfe24f5b8d26240f8a2 | [
"MIT"
] | 12 | 2015-08-24T14:53:58.000Z | 2022-01-21T16:09:56.000Z | from setuptools import setup, find_packages
from runfolder import __version__
import os
setup(
name='runfolder',
version=__version__,
description="Microservice for managing runfolders",
long_description=read_file('README.md'),
keywords='bioinformatics',
author='SNP&SEQ Technology Platform, Uppsala University',
packages=find_packages(),
include_package_data=True,
entry_points={
'console_scripts': ['runfolder-ws = runfolder.app:start']
}
)
| 27.857143 | 70 | 0.726496 | from setuptools import setup, find_packages
from runfolder import __version__
import os
def read_file(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='runfolder',
version=__version__,
description="Microservice for managing runfolders",
long_description=read_file('README.md'),
keywords='bioinformatics',
author='SNP&SEQ Technology Platform, Uppsala University',
packages=find_packages(),
include_package_data=True,
entry_points={
'console_scripts': ['runfolder-ws = runfolder.app:start']
}
)
| 71 | 0 | 23 |
5909b0006168cddba964eb1409d2370a6aa4977f | 48 | py | Python | django_jekyll/__init__.py | steinbachr/django-jekyll | 91ec91d20b68d36d7ebdf2894ac8cbc6ab5f434e | [
"MIT"
] | 2 | 2017-04-30T11:13:13.000Z | 2017-04-30T14:32:10.000Z | django_jekyll/__init__.py | MeanPug/django-jekyll | 91ec91d20b68d36d7ebdf2894ac8cbc6ab5f434e | [
"MIT"
] | 2 | 2017-04-30T17:59:57.000Z | 2017-04-30T18:02:13.000Z | django_jekyll/__init__.py | steinbachr/django-jekyll | 91ec91d20b68d36d7ebdf2894ac8cbc6ab5f434e | [
"MIT"
] | 2 | 2017-06-28T12:49:09.000Z | 2018-09-21T20:53:04.000Z | from django_jekyll.lib.configparse import config | 48 | 48 | 0.895833 | from django_jekyll.lib.configparse import config | 0 | 0 | 0 |
bc9ce8b80cc8609b5751135948e4cc3e637373e2 | 2,678 | py | Python | jazzband/admin.py | tipabu/jazzband-website | 30102e87348924eb56b610e74609a3475d3a14de | [
"MIT"
] | null | null | null | jazzband/admin.py | tipabu/jazzband-website | 30102e87348924eb56b610e74609a3475d3a14de | [
"MIT"
] | null | null | null | jazzband/admin.py | tipabu/jazzband-website | 30102e87348924eb56b610e74609a3475d3a14de | [
"MIT"
] | null | null | null | from flask import redirect, url_for, request, session
from flask_admin import Admin, AdminIndexView, expose
from flask_admin.contrib import sqla
from flask_login import current_user
from .auth import current_user_is_roadie
from .db import postgres
from .account.models import OAuth
from .members.models import User, EmailAddress
from .projects.models import (
Project,
ProjectCredential,
ProjectUpload,
ProjectMembership,
)
| 27.895833 | 75 | 0.684093 | from flask import redirect, url_for, request, session
from flask_admin import Admin, AdminIndexView, expose
from flask_admin.contrib import sqla
from flask_login import current_user
from .auth import current_user_is_roadie
from .db import postgres
from .account.models import OAuth
from .members.models import User, EmailAddress
from .projects.models import (
Project,
ProjectCredential,
ProjectUpload,
ProjectMembership,
)
class JazzbandModelView(sqla.ModelView):
def is_accessible(self):
return current_user_is_roadie()
def inaccessible_callback(self, name, **kwargs):
# redirect to login page if user doesn't have access
session["next_url"] = request.url
return redirect(url_for("github.login"))
class JazzbandAdminIndexView(AdminIndexView):
@expose("/")
def index(self):
if not current_user.is_authenticated:
return redirect(url_for("github.login", next=request.url))
return super().index()
class UserAdmin(JazzbandModelView):
column_searchable_list = ("login",)
column_filters = (
"is_member",
"is_roadie",
"is_banned",
"is_restricted",
"has_2fa",
"joined_at",
"left_at",
"consented_at",
"profile_consent",
"org_consent",
"cookies_consent",
"age_consent",
)
inline_models = (OAuth, EmailAddress, ProjectMembership)
class OAuthAdmin(JazzbandModelView):
column_searchable_list = ("token", "user_id")
column_filters = ("created_at", "provider")
class EmailAddressAdmin(JazzbandModelView):
column_searchable_list = ("email",)
column_filters = ("verified", "primary")
class ProjectAdmin(JazzbandModelView):
column_searchable_list = ("name", "description")
column_filters = ("is_active", "created_at", "updated_at", "pushed_at")
inline_models = (ProjectCredential, ProjectUpload, ProjectMembership)
class ProjectUploadAdmin(JazzbandModelView):
column_searchable_list = ("filename", "sha256_digest")
column_filters = ("uploaded_at", "released_at")
def init_app(app):
admin = Admin(
app,
name="jazzband",
template_mode="bootstrap3",
index_view=JazzbandAdminIndexView(),
)
model_admins = [
(User, UserAdmin),
(OAuth, OAuthAdmin),
(EmailAddress, EmailAddressAdmin),
(Project, ProjectAdmin),
(ProjectMembership, JazzbandModelView),
(ProjectUpload, ProjectUploadAdmin),
(ProjectCredential, JazzbandModelView),
]
for model_cls, admin_cls in model_admins:
admin.add_view(admin_cls(model_cls, postgres.session))
| 917 | 1,075 | 237 |
c7a999c40576c5ff6e73081dfe8a7e715790c23f | 84 | py | Python | Python-Django-2016/Python/Clase3/exceptions.py | juandc/platzi-courses | 27443bdb40a35100b9c839e2d598bc3cf4e43032 | [
"MIT"
] | 3 | 2017-09-14T23:14:21.000Z | 2018-03-22T20:07:37.000Z | Python-Django-2016/Python/Clase3/exceptions.py | juandc/platzi-courses | 27443bdb40a35100b9c839e2d598bc3cf4e43032 | [
"MIT"
] | null | null | null | Python-Django-2016/Python/Clase3/exceptions.py | juandc/platzi-courses | 27443bdb40a35100b9c839e2d598bc3cf4e43032 | [
"MIT"
] | null | null | null | try:
6/0
except Exception as e:
print "%s" % e
raise e
print "Despues"
| 10.5 | 22 | 0.571429 | try:
6/0
except Exception as e:
print "%s" % e
raise e
print "Despues"
| 0 | 0 | 0 |
06cd33af3283f896360712c3372c2c52913a5d19 | 786 | py | Python | Serveur.py | Divulgacheur/ScanIP | 99eb1113c3d51fb115e074f67aa690ba0a8bdf39 | [
"MIT"
] | null | null | null | Serveur.py | Divulgacheur/ScanIP | 99eb1113c3d51fb115e074f67aa690ba0a8bdf39 | [
"MIT"
] | null | null | null | Serveur.py | Divulgacheur/ScanIP | 99eb1113c3d51fb115e074f67aa690ba0a8bdf39 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import socket
import platform
import getpass
import psutil | 22.457143 | 137 | 0.71883 | # -*- coding: utf-8 -*-
import socket
import platform
import getpass
import psutil
def lanceServeur(Nport):
hote = ''
port = int(Nport)
connexion_principale = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connexion_principale.bind((hote, port))
connexion_principale.listen(5)
def servv():
print("Le serveur écoute à présent sur le port {}".format(port))
connexion_avec_client, infos_connexion = connexion_principale.accept()
infotest=[]
for proc in psutil.process_iter():
infotest=proc
r = ("Serveur "+connexion_avec_client.getsockname()[0]+' '+"OS : "+platform.platform()+' '+"Utilisateur : "+getpass.getuser()).encode()
connexion_avec_client.send(r)
print("Fermeture de la connexion")
connexion_avec_client.close()
while True:
servv() | 683 | 0 | 23 |
b1e413cf03e0068f368e65c76488677a9ace07c7 | 1,421 | py | Python | aca/tests/test_client.py | didx-xyz/yoma-oidc-bridge | 7e3ff6ab3ea4fed01cd7d4c113c7c3b3244356eb | [
"Apache-2.0"
] | 4 | 2020-10-08T07:36:22.000Z | 2022-02-04T12:31:31.000Z | aca/tests/test_client.py | didx-xyz/yoma-oidc-bridge | 7e3ff6ab3ea4fed01cd7d4c113c7c3b3244356eb | [
"Apache-2.0"
] | 1 | 2021-04-10T08:32:42.000Z | 2021-04-10T08:32:42.000Z | aca/tests/test_client.py | didx-xyz/yoma-oidc-bridge | 7e3ff6ab3ea4fed01cd7d4c113c7c3b3244356eb | [
"Apache-2.0"
] | 7 | 2021-02-10T16:04:29.000Z | 2022-03-16T08:04:48.000Z | from aca.client import ACAClient
| 35.525 | 88 | 0.655172 | from aca.client import ACAClient
class TestAcaClient:
url = "http://127.0.0.1"
client = ACAClient(url, url, "token")
def test_create_proof_request(self, requests_mock):
mock_result = {"result": 0}
requests_mock.post(f"{self.url}/present-proof/create-request", json=mock_result)
result = self.client.create_proof_request({})
assert result == mock_result
def test_get_public_did(self, requests_mock):
mock_result = {"result": 0}
requests_mock.get(f"{self.url}/wallet/did/public", json=mock_result)
result = self.client.get_public_did()
assert result == 0
def test_get_credential_definition(self, requests_mock):
mock_result = {"definition": 0}
requests_mock.get(
f"{self.url}/credential-definitions/some_def_id",
json={"credential_definition": mock_result},
)
result = self.client.get_credential_definition("some_def_id")
assert result == mock_result
def test_get_schema(self, requests_mock):
mock_result = {"schema": 0}
requests_mock.get(
f"{self.url}/schemas/some_schema_id", json={"schema_json": mock_result}
)
result = self.client.get_schema("some_schema_id")
assert result == mock_result
def test_get_endpoint_url(self):
result = self.client.get_endpoint_url()
assert result == self.url
| 1,159 | 205 | 23 |
72ca50e072d353c8adb6b6f504e93496b765e4e5 | 3,146 | py | Python | tests/hamcrest_unit_test/core/allof_test.py | rbalint/PyHamcrest | 713aa08e313dba997fd8e4b7e0d3d599a72bdd72 | [
"BSD-3-Clause"
] | null | null | null | tests/hamcrest_unit_test/core/allof_test.py | rbalint/PyHamcrest | 713aa08e313dba997fd8e4b7e0d3d599a72bdd72 | [
"BSD-3-Clause"
] | null | null | null | tests/hamcrest_unit_test/core/allof_test.py | rbalint/PyHamcrest | 713aa08e313dba997fd8e4b7e0d3d599a72bdd72 | [
"BSD-3-Clause"
] | null | null | null | if __name__ == "__main__":
import sys
sys.path.insert(0, "..")
sys.path.insert(0, "../..")
import unittest
from hamcrest.core.core.allof import *
from hamcrest.core.core.isequal import equal_to
from hamcrest_unit_test.matcher_test import MatcherTest
__author__ = "Jon Reid"
__copyright__ = "Copyright 2011 hamcrest.org"
__license__ = "BSD, see License.txt"
if __name__ == "__main__":
unittest.main()
| 32.43299 | 96 | 0.595677 | if __name__ == "__main__":
import sys
sys.path.insert(0, "..")
sys.path.insert(0, "../..")
import unittest
from hamcrest.core.core.allof import *
from hamcrest.core.core.isequal import equal_to
from hamcrest_unit_test.matcher_test import MatcherTest
__author__ = "Jon Reid"
__copyright__ = "Copyright 2011 hamcrest.org"
__license__ = "BSD, see License.txt"
class AllOfTest(MatcherTest):
def testMatchesIfArgumentSatisfiesBothOfTwoOtherMatchers(self):
self.assert_matches("both matchers", all_of(equal_to("good"), equal_to("good")), "good")
def testProvidesConvenientShortcutForMatchingWithEqualTo(self):
self.assert_matches("both matchers", all_of("good", "good"), "good")
def testNoMatchIfArgumentFailsToSatisfyEitherOfTwoOtherMatchers(self):
self.assert_does_not_match(
"first matcher", all_of(equal_to("bad"), equal_to("good")), "good"
)
self.assert_does_not_match(
"second matcher", all_of(equal_to("good"), equal_to("bad")), "good"
)
self.assert_does_not_match(
"either matcher", all_of(equal_to("bad"), equal_to("bad")), "good"
)
def testMatchesIfArgumentSatisfiesAllOfManyOtherMatchers(self):
self.assert_matches(
"all matchers",
all_of(
equal_to("good"),
equal_to("good"),
equal_to("good"),
equal_to("good"),
equal_to("good"),
),
"good",
)
def testNoMatchIfArgumentFailsToSatisfyAllOfManyOtherMatchers(self):
self.assert_does_not_match(
"matcher in the middle",
all_of(
equal_to("good"),
equal_to("good"),
equal_to("good"),
equal_to("bad"),
equal_to("good"),
equal_to("good"),
),
"good",
)
def testHasAReadableDescription(self):
self.assert_description(
"('good' and 'bad' and 'ugly')",
all_of(equal_to("good"), equal_to("bad"), equal_to("ugly")),
)
def testSuccessfulMatchDoesNotGenerateMismatchDescription(self):
self.assert_no_mismatch_description(all_of(equal_to("good"), equal_to("good")), "good")
def testMismatchDescriptionDescribesFirstFailingMatch(self):
self.assert_mismatch_description(
"'good' was 'bad'", all_of(equal_to("bad"), equal_to("good")), "bad"
)
def testDescribeMismatch(self):
self.assert_describe_mismatch(
"'good' was 'bad'", all_of(equal_to("bad"), equal_to("good")), "bad"
)
def testMismatchDescriptionOptionallyDescribesMultipleFailingMatches(self):
self.assert_mismatch_description(
"'bad' was 'indifferent' and 'good' was 'indifferent'",
AllOf(
equal_to("bad"),
equal_to("indifferent"),
equal_to("good"),
describe_all_mismatches=True,
),
"indifferent",
)
if __name__ == "__main__":
unittest.main()
| 2,423 | 8 | 292 |
161c35ae115f73fe788f0aaea950e88e183e8b91 | 20,071 | py | Python | tools/gn.py | kagouda/sdk | a9d212b94aa73472824f4218a567a8e004575112 | [
"BSD-3-Clause"
] | null | null | null | tools/gn.py | kagouda/sdk | a9d212b94aa73472824f4218a567a8e004575112 | [
"BSD-3-Clause"
] | null | null | null | tools/gn.py | kagouda/sdk | a9d212b94aa73472824f4218a567a8e004575112 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright 2016 The Dart project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import shutil
import subprocess
import sys
import time
import utils
HOST_OS = utils.GuessOS()
HOST_ARCH = utils.GuessArchitecture()
SCRIPT_DIR = os.path.dirname(sys.argv[0])
DART_ROOT = os.path.realpath(os.path.join(SCRIPT_DIR, '..'))
AVAILABLE_ARCHS = utils.ARCH_FAMILY.keys()
GN = os.path.join(DART_ROOT, 'buildtools', 'gn')
# Environment variables for default settings.
DART_USE_TOOLCHAIN = "DART_USE_TOOLCHAIN" # Use instread of --toolchain-prefix
DART_USE_SYSROOT = "DART_USE_SYSROOT" # Use instead of --target-sysroot
DART_USE_CRASHPAD = "DART_USE_CRASHPAD" # Use instead of --use-crashpad
# use instead of --platform-sdk
DART_MAKE_PLATFORM_SDK = "DART_MAKE_PLATFORM_SDK"
DART_GN_ARGS = "DART_GN_ARGS"
# The C compiler's target.
# The Dart compiler's target.
# Where string_map is formatted as X1=Y1,X2=Y2 etc.
# If key is X1, returns Y1.
def AddCommonGnOptionArgs(parser):
"""Adds arguments that will change the default GN arguments."""
parser.add_argument('--goma', help='Use goma', action='store_true')
parser.add_argument('--no-goma',
help='Disable goma',
dest='goma',
action='store_false')
parser.set_defaults(goma=True)
parser.add_argument('--verify-sdk-hash',
help='Enable SDK hash checks (default)',
dest='verify_sdk_hash',
action='store_true')
parser.add_argument('-nvh',
'--no-verify-sdk-hash',
help='Disable SDK hash checks',
dest='verify_sdk_hash',
action='store_false')
parser.set_defaults(verify_sdk_hash=True)
parser.add_argument('--clang', help='Use Clang', action='store_true')
parser.add_argument('--no-clang',
help='Disable Clang',
dest='clang',
action='store_false')
parser.set_defaults(clang=True)
parser.add_argument(
'--platform-sdk',
help='Directs the create_sdk target to create a smaller "Platform" SDK',
default=MakePlatformSDK(),
action='store_true')
parser.add_argument('--use-crashpad',
default=False,
dest='use_crashpad',
action='store_true')
parser.add_argument('--use-qemu',
default=False,
dest='use_qemu',
action='store_true')
parser.add_argument('--exclude-kernel-service',
help='Exclude the kernel service.',
default=False,
dest='exclude_kernel_service',
action='store_true')
parser.add_argument('--arm-float-abi',
type=str,
help='The ARM float ABI (soft, softfp, hard)',
metavar='[soft,softfp,hard]',
default='')
parser.add_argument('--code-coverage',
help='Enable code coverage for the standalone VM',
default=False,
dest="code_coverage",
action='store_true')
parser.add_argument('--debug-opt-level',
'-d',
help='The optimization level to use for debug builds',
type=str)
parser.add_argument('--gn-args',
help='Set extra GN args',
dest='gn_args',
action='append')
parser.add_argument(
'--toolchain-prefix',
'-t',
type=str,
help='Comma-separated list of arch=/path/to/toolchain-prefix mappings')
parser.add_argument('--ide',
help='Generate an IDE file.',
default=os_has_ide(HOST_OS),
action='store_true')
parser.add_argument(
'--target-sysroot',
'-s',
type=str,
help='Comma-separated list of arch=/path/to/sysroot mappings')
def AddCommonConfigurationArgs(parser):
"""Adds arguments that influence which configuration will be built."""
parser.add_argument("-a",
"--arch",
type=str,
help='Target architectures (comma-separated).',
metavar='[all,' + ','.join(AVAILABLE_ARCHS) + ']',
default=utils.GuessArchitecture())
parser.add_argument('--mode',
'-m',
type=str,
help='Build variants (comma-separated).',
metavar='[all,debug,release,product]',
default='debug')
parser.add_argument('--os',
type=str,
help='Target OSs (comma-separated).',
metavar='[all,host,android,fuchsia]',
default='host')
parser.add_argument('--sanitizer',
type=str,
help='Build variants (comma-separated).',
metavar='[all,none,asan,lsan,msan,tsan,ubsan]',
default='none')
def AddOtherArgs(parser):
"""Adds miscellaneous arguments to the parser."""
parser.add_argument("-v",
"--verbose",
help='Verbose output.',
default=False,
action="store_true")
if __name__ == '__main__':
sys.exit(Main(sys.argv))
| 34.845486 | 83 | 0.584973 | #!/usr/bin/env python
# Copyright 2016 The Dart project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import shutil
import subprocess
import sys
import time
import utils
HOST_OS = utils.GuessOS()
HOST_ARCH = utils.GuessArchitecture()
SCRIPT_DIR = os.path.dirname(sys.argv[0])
DART_ROOT = os.path.realpath(os.path.join(SCRIPT_DIR, '..'))
AVAILABLE_ARCHS = utils.ARCH_FAMILY.keys()
GN = os.path.join(DART_ROOT, 'buildtools', 'gn')
# Environment variables for default settings.
DART_USE_TOOLCHAIN = "DART_USE_TOOLCHAIN" # Use instread of --toolchain-prefix
DART_USE_SYSROOT = "DART_USE_SYSROOT" # Use instead of --target-sysroot
DART_USE_CRASHPAD = "DART_USE_CRASHPAD" # Use instead of --use-crashpad
# use instead of --platform-sdk
DART_MAKE_PLATFORM_SDK = "DART_MAKE_PLATFORM_SDK"
DART_GN_ARGS = "DART_GN_ARGS"
def ToolchainPrefix(args):
if args.toolchain_prefix:
return args.toolchain_prefix
return os.environ.get(DART_USE_TOOLCHAIN)
def TargetSysroot(args):
if args.target_sysroot:
return args.target_sysroot
return os.environ.get(DART_USE_SYSROOT)
def MakePlatformSDK():
return DART_MAKE_PLATFORM_SDK in os.environ
def GetGNArgs(args):
if args.gn_args != None:
return args.gn_args
args = os.environ.get(DART_GN_ARGS) or ""
return args.split()
def GetOutDir(mode, arch, target_os, sanitizer):
return utils.GetBuildRoot(HOST_OS, mode, arch, target_os, sanitizer)
def ToCommandLine(gn_args):
def merge(key, value):
if type(value) is bool:
return '%s=%s' % (key, 'true' if value else 'false')
elif type(value) is int:
return '%s=%d' % (key, value)
return '%s="%s"' % (key, value)
return [merge(x, y) for x, y in gn_args.iteritems()]
def HostCpuForArch(arch):
if arch in [
'ia32', 'arm', 'armv6', 'simarm', 'simarmv6', 'simarm_x64'
]:
return 'x86'
if arch in ['x64', 'arm64', 'simarm64', 'arm_x64']:
return 'x64'
# The C compiler's target.
def TargetCpuForArch(arch, target_os):
if arch in ['ia32', 'simarm', 'simarmv6']:
return 'x86'
if arch in ['x64', 'simarm64', 'simarm_x64']:
return 'x64'
if arch == 'arm_x64':
return 'arm'
return arch
# The Dart compiler's target.
def DartTargetCpuForArch(arch):
if arch in ['ia32']:
return 'ia32'
if arch in ['x64']:
return 'x64'
if arch in ['arm', 'simarm', 'simarm_x64', 'arm_x64']:
return 'arm'
if arch in ['armv6', 'simarmv6']:
return 'armv6'
if arch in ['arm64', 'simarm64']:
return 'arm64'
return arch
def HostOsForGn(host_os):
if host_os.startswith('macos'):
return 'mac'
if host_os.startswith('win'):
return 'win'
return host_os
# Where string_map is formatted as X1=Y1,X2=Y2 etc.
# If key is X1, returns Y1.
def ParseStringMap(key, string_map):
for m in string_map.split(','):
l = m.split('=')
if l[0] == key:
return l[1]
return None
def UseSysroot(args, gn_args):
# Don't try to use a Linux sysroot if we aren't on Linux.
if gn_args['target_os'] != 'linux' and HOST_OS != 'linux':
return False
# Don't use the sysroot if we're given another sysroot.
if TargetSysroot(args):
return False
# Our Debian Jesse sysroot doesn't work with GCC 9
if not gn_args['is_clang']:
return False
# Our Debian Jesse sysroot has incorrect annotations on realloc.
if gn_args['is_ubsan']:
return False
# Otherwise use the sysroot.
return True
def ToGnArgs(args, mode, arch, target_os, sanitizer, verify_sdk_hash):
gn_args = {}
host_os = HostOsForGn(HOST_OS)
if target_os == 'host':
gn_args['target_os'] = host_os
else:
gn_args['target_os'] = target_os
gn_args['host_cpu'] = HostCpuForArch(arch)
gn_args['target_cpu'] = TargetCpuForArch(arch, target_os)
gn_args['dart_target_arch'] = DartTargetCpuForArch(arch)
# Configure Crashpad library if it is used.
gn_args['dart_use_crashpad'] = (args.use_crashpad or
DART_USE_CRASHPAD in os.environ)
if gn_args['dart_use_crashpad']:
# Tell Crashpad's BUILD files which checkout layout to use.
gn_args['crashpad_dependencies'] = 'dart'
if arch != HostCpuForArch(arch):
# Training an app-jit snapshot under a simulator is slow. Use script
# snapshots instead.
gn_args['dart_snapshot_kind'] = 'kernel'
else:
gn_args['dart_snapshot_kind'] = 'app-jit'
# We only want the fallback root certs in the standalone VM on
# Linux and Windows.
if gn_args['target_os'] in ['linux', 'win']:
gn_args['dart_use_fallback_root_certificates'] = True
# Use tcmalloc only when targeting Linux and when not using ASAN.
gn_args['dart_use_tcmalloc'] = ((gn_args['target_os'] == 'linux') and
sanitizer == 'none')
if gn_args['target_os'] == 'linux':
if gn_args['target_cpu'] == 'arm':
# Default to -mfloat-abi=hard and -mfpu=neon for arm on Linux as we're
# specifying a gnueabihf compiler in //build/toolchain/linux/BUILD.gn.
floatabi = 'hard' if args.arm_float_abi == '' else args.arm_float_abi
gn_args['arm_version'] = 7
gn_args['arm_float_abi'] = floatabi
gn_args['arm_use_neon'] = True
elif gn_args['target_cpu'] == 'armv6':
floatabi = 'softfp' if args.arm_float_abi == '' else args.arm_float_abi
gn_args['target_cpu'] = 'arm'
gn_args['arm_version'] = 6
gn_args['arm_float_abi'] = floatabi
gn_args['is_debug'] = mode == 'debug'
gn_args['is_release'] = mode == 'release'
gn_args['is_product'] = mode == 'product'
gn_args['dart_debug'] = mode == 'debug'
# This setting is only meaningful for Flutter. Standalone builds of the VM
# should leave this set to 'develop', which causes the build to defer to
# 'is_debug', 'is_release' and 'is_product'.
if mode == 'product':
gn_args['dart_runtime_mode'] = 'release'
else:
gn_args['dart_runtime_mode'] = 'develop'
gn_args['exclude_kernel_service'] = args.exclude_kernel_service
gn_args['is_clang'] = args.clang
enable_code_coverage = args.code_coverage and gn_args['is_clang']
gn_args['dart_vm_code_coverage'] = enable_code_coverage
gn_args['is_asan'] = sanitizer == 'asan'
gn_args['is_lsan'] = sanitizer == 'lsan'
gn_args['is_msan'] = sanitizer == 'msan'
gn_args['is_tsan'] = sanitizer == 'tsan'
gn_args['is_ubsan'] = sanitizer == 'ubsan'
gn_args['is_qemu'] = args.use_qemu
if not args.platform_sdk and not gn_args['target_cpu'].startswith('arm'):
gn_args['dart_platform_sdk'] = args.platform_sdk
# We don't support stripping on Windows
if host_os != 'win':
gn_args['dart_stripped_binary'] = 'exe.stripped/dart'
gn_args['dart_precompiled_runtime_stripped_binary'] = (
'exe.stripped/dart_precompiled_runtime_product')
gn_args['gen_snapshot_stripped_binary'] = (
'exe.stripped/gen_snapshot_product')
# Setup the user-defined sysroot.
if UseSysroot(args, gn_args):
gn_args['dart_use_debian_sysroot'] = True
else:
sysroot = TargetSysroot(args)
if sysroot:
gn_args['target_sysroot'] = ParseStringMap(arch, sysroot)
toolchain = ToolchainPrefix(args)
if toolchain:
gn_args['toolchain_prefix'] = ParseStringMap(arch, toolchain)
goma_dir = os.environ.get('GOMA_DIR')
# Search for goma in depot_tools in path
goma_depot_tools_dir = None
for path in os.environ.get('PATH', '').split(os.pathsep):
if os.path.basename(path) == 'depot_tools':
cipd_bin = os.path.join(path, '.cipd_bin')
if os.path.isfile(os.path.join(cipd_bin, 'gomacc')):
goma_depot_tools_dir = cipd_bin
break
# Otherwise use goma from home directory.
# TODO(whesse): Remove support for goma installed in home directory.
# Goma will only be distributed through depot_tools.
goma_home_dir = os.path.join(os.getenv('HOME', ''), 'goma')
if args.goma and goma_dir:
gn_args['use_goma'] = True
gn_args['goma_dir'] = goma_dir
elif args.goma and goma_depot_tools_dir:
gn_args['use_goma'] = True
gn_args['goma_dir'] = goma_depot_tools_dir
elif args.goma and os.path.exists(goma_home_dir):
gn_args['use_goma'] = True
gn_args['goma_dir'] = goma_home_dir
else:
gn_args['use_goma'] = False
gn_args['goma_dir'] = None
if gn_args['target_os'] == 'mac' and gn_args['use_goma']:
gn_args['mac_use_goma_rbe'] = True
# Code coverage requires -O0 to be set.
if enable_code_coverage:
gn_args['dart_debug_optimization_level'] = 0
gn_args['debug_optimization_level'] = 0
elif args.debug_opt_level:
gn_args['dart_debug_optimization_level'] = args.debug_opt_level
gn_args['debug_optimization_level'] = args.debug_opt_level
gn_args['verify_sdk_hash'] = verify_sdk_hash
return gn_args
def ProcessOsOption(os_name):
if os_name == 'host':
return HOST_OS
return os_name
def ProcessOptions(args):
if args.arch == 'all':
args.arch = 'ia32,x64,simarm,simarm64'
if args.mode == 'all':
args.mode = 'debug,release,product'
if args.os == 'all':
args.os = 'host,android,fuchsia'
if args.sanitizer == 'all':
args.sanitizer = 'none,asan,lsan,msan,tsan,ubsan'
args.mode = args.mode.split(',')
args.arch = args.arch.split(',')
args.os = args.os.split(',')
args.sanitizer = args.sanitizer.split(',')
for mode in args.mode:
if not mode in ['debug', 'release', 'product']:
print("Unknown mode %s" % mode)
return False
for i, arch in enumerate(args.arch):
if not arch in AVAILABLE_ARCHS:
# Normalise to lower case form to make it less case-picky.
arch_lower = arch.lower()
if arch_lower in AVAILABLE_ARCHS:
args.arch[i] = arch_lower
continue
print("Unknown arch %s" % arch)
return False
oses = [ProcessOsOption(os_name) for os_name in args.os]
for os_name in oses:
if not os_name in [
'android', 'freebsd', 'linux', 'macos', 'win32', 'fuchsia'
]:
print("Unknown os %s" % os_name)
return False
if os_name == 'android':
if not HOST_OS in ['linux', 'macos']:
print("Cross-compilation to %s is not supported on host os %s."
% (os_name, HOST_OS))
return False
if not arch in [
'ia32', 'x64', 'arm', 'arm_x64', 'armv6', 'arm64'
]:
print(
"Cross-compilation to %s is not supported for architecture %s."
% (os_name, arch))
return False
elif os_name == 'fuchsia':
if HOST_OS != 'linux':
print("Cross-compilation to %s is not supported on host os %s."
% (os_name, HOST_OS))
return False
if arch != 'x64':
print(
"Cross-compilation to %s is not supported for architecture %s."
% (os_name, arch))
return False
elif os_name != HOST_OS:
print("Unsupported target os %s" % os_name)
return False
if HOST_OS != 'win' and args.use_crashpad:
print("Crashpad is only supported on Windows")
return False
return True
def os_has_ide(host_os):
return host_os.startswith('win') or host_os.startswith('mac')
def ide_switch(host_os):
if host_os.startswith('win'):
return '--ide=vs'
elif host_os.startswith('mac'):
return '--ide=xcode'
else:
return '--ide=json'
def AddCommonGnOptionArgs(parser):
"""Adds arguments that will change the default GN arguments."""
parser.add_argument('--goma', help='Use goma', action='store_true')
parser.add_argument('--no-goma',
help='Disable goma',
dest='goma',
action='store_false')
parser.set_defaults(goma=True)
parser.add_argument('--verify-sdk-hash',
help='Enable SDK hash checks (default)',
dest='verify_sdk_hash',
action='store_true')
parser.add_argument('-nvh',
'--no-verify-sdk-hash',
help='Disable SDK hash checks',
dest='verify_sdk_hash',
action='store_false')
parser.set_defaults(verify_sdk_hash=True)
parser.add_argument('--clang', help='Use Clang', action='store_true')
parser.add_argument('--no-clang',
help='Disable Clang',
dest='clang',
action='store_false')
parser.set_defaults(clang=True)
parser.add_argument(
'--platform-sdk',
help='Directs the create_sdk target to create a smaller "Platform" SDK',
default=MakePlatformSDK(),
action='store_true')
parser.add_argument('--use-crashpad',
default=False,
dest='use_crashpad',
action='store_true')
parser.add_argument('--use-qemu',
default=False,
dest='use_qemu',
action='store_true')
parser.add_argument('--exclude-kernel-service',
help='Exclude the kernel service.',
default=False,
dest='exclude_kernel_service',
action='store_true')
parser.add_argument('--arm-float-abi',
type=str,
help='The ARM float ABI (soft, softfp, hard)',
metavar='[soft,softfp,hard]',
default='')
parser.add_argument('--code-coverage',
help='Enable code coverage for the standalone VM',
default=False,
dest="code_coverage",
action='store_true')
parser.add_argument('--debug-opt-level',
'-d',
help='The optimization level to use for debug builds',
type=str)
parser.add_argument('--gn-args',
help='Set extra GN args',
dest='gn_args',
action='append')
parser.add_argument(
'--toolchain-prefix',
'-t',
type=str,
help='Comma-separated list of arch=/path/to/toolchain-prefix mappings')
parser.add_argument('--ide',
help='Generate an IDE file.',
default=os_has_ide(HOST_OS),
action='store_true')
parser.add_argument(
'--target-sysroot',
'-s',
type=str,
help='Comma-separated list of arch=/path/to/sysroot mappings')
def AddCommonConfigurationArgs(parser):
"""Adds arguments that influence which configuration will be built."""
parser.add_argument("-a",
"--arch",
type=str,
help='Target architectures (comma-separated).',
metavar='[all,' + ','.join(AVAILABLE_ARCHS) + ']',
default=utils.GuessArchitecture())
parser.add_argument('--mode',
'-m',
type=str,
help='Build variants (comma-separated).',
metavar='[all,debug,release,product]',
default='debug')
parser.add_argument('--os',
type=str,
help='Target OSs (comma-separated).',
metavar='[all,host,android,fuchsia]',
default='host')
parser.add_argument('--sanitizer',
type=str,
help='Build variants (comma-separated).',
metavar='[all,none,asan,lsan,msan,tsan,ubsan]',
default='none')
def AddOtherArgs(parser):
"""Adds miscellaneous arguments to the parser."""
parser.add_argument("-v",
"--verbose",
help='Verbose output.',
default=False,
action="store_true")
def parse_args(args):
args = args[1:]
parser = argparse.ArgumentParser(
description='A script to run `gn gen`.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
config_group = parser.add_argument_group('Configuration Related Arguments')
AddCommonConfigurationArgs(config_group)
gn_group = parser.add_argument_group('GN Related Arguments')
AddCommonGnOptionArgs(gn_group)
other_group = parser.add_argument_group('Other Arguments')
AddOtherArgs(other_group)
options = parser.parse_args(args)
if not ProcessOptions(options):
parser.print_help()
return None
return options
def BuildGnCommand(args, mode, arch, target_os, sanitizer, out_dir):
gn = os.path.join(DART_ROOT, 'buildtools',
'gn.exe' if utils.IsWindows() else 'gn')
if not os.path.isfile(gn):
raise Exception("Couldn't find the gn binary at path: " + gn)
# TODO(infra): Re-enable --check. Many targets fail to use
# public_deps to re-expose header files to their dependents.
# See dartbug.com/32364
command = [gn, 'gen', out_dir]
gn_args = ToCommandLine(
ToGnArgs(args, mode, arch, target_os, sanitizer, args.verify_sdk_hash))
gn_args += GetGNArgs(args)
if args.ide:
command.append(ide_switch(HOST_OS))
command.append('--args=%s' % ' '.join(gn_args))
return command
def RunGnOnConfiguredConfigurations(args):
commands = []
for target_os in args.os:
for mode in args.mode:
for arch in args.arch:
for sanitizer in args.sanitizer:
out_dir = GetOutDir(mode, arch, target_os, sanitizer)
commands.append(
BuildGnCommand(args, mode, arch, target_os, sanitizer,
out_dir))
if args.verbose:
print("gn gen --check in %s" % out_dir)
active_commands = []
def cleanup(command):
print("Command failed: " + ' '.join(command))
for (_, process) in active_commands:
process.terminate()
for command in commands:
try:
process = subprocess.Popen(command, cwd=DART_ROOT)
active_commands.append([command, process])
except Exception as e:
print('Error: %s' % e)
cleanup(command)
return 1
while active_commands:
time.sleep(0.1)
for active_command in active_commands:
(command, process) = active_command
if process.poll() is not None:
active_commands.remove(active_command)
if process.returncode != 0:
cleanup(command)
return 1
return 0
def Main(argv):
starttime = time.time()
args = parse_args(argv)
result = RunGnOnConfiguredConfigurations(args)
endtime = time.time()
if args.verbose:
print("GN Time: %.3f seconds" % (endtime - starttime))
return result
if __name__ == '__main__':
sys.exit(Main(sys.argv))
| 13,723 | 0 | 480 |
db12233688360f22dcf1d4689bdedc09c0930566 | 4,581 | py | Python | lean_proof_recording/modifier.py | dselsam/lean_proof_recording | bbae0e08609f56f4203b48ef21277d9fc509c34d | [
"Apache-2.0"
] | 7 | 2020-10-12T20:09:07.000Z | 2022-03-14T14:09:07.000Z | lean_proof_recording/modifier.py | dselsam/lean_proof_recording | bbae0e08609f56f4203b48ef21277d9fc509c34d | [
"Apache-2.0"
] | 1 | 2021-09-30T02:35:27.000Z | 2021-09-30T10:11:27.000Z | lean_proof_recording/modifier.py | dselsam/lean_proof_recording | bbae0e08609f56f4203b48ef21277d9fc509c34d | [
"Apache-2.0"
] | 3 | 2022-01-12T19:39:38.000Z | 2022-03-28T10:15:38.000Z | from pathlib import Path
from typing import Dict, List, Optional, Set
| 39.491379 | 91 | 0.557957 | from pathlib import Path
from typing import Dict, List, Optional, Set
class LeanModifier:
lean_path: Path
deletions: Set[int]
additions: Dict[int, List[str]]
end_addition: Optional[List[str]]
def __init__(self, lean_path: Path):
self.lean_path = lean_path
self.deletions = set()
self.additions = {}
self.end_addition = None
def delete_lines(self, start_line_ix: int, end_line_ix: int) -> None:
"""
Delete (comment out) lines of code
start_line_ix: Line index in the original file. Inclusive. 0-indexed.
end_line_ix: Line index in the original file. Exclusive. 0-indexed.
"""
self._comment_out_lines(start_line_ix, end_line_ix)
def replace_lines(self, start_line_ix: int, end_line_ix: int, new_lines: str) -> None:
"""
Replace lines of code
start_line_ix: Line index in the original file. Inclusive. 0-indexed.
end_line_ix: Line index in the original file. Exclusive. 0-indexed.
new_lines: New code lines to add to file.
Single string with new lines. Must end in a newline
"""
assert new_lines.endswith("\n")
self._comment_out_lines(start_line_ix, end_line_ix)
self._insert_lines(end_line_ix, new_lines)
def add_lines(self, start_line_ix: int, new_lines: str) -> None:
"""
Add lines of code
start_line_ix: Line index in the original file to begin insert.
new_lines: New code lines to add to file.
Single string with new lines. Must end in a newline
"""
assert new_lines.endswith("\n")
self._insert_lines(start_line_ix, new_lines)
def add_lines_at_end(self, new_lines: str) -> None:
"""
Add lines of code to the end of the file.
new_lines: New code lines to add to file.
Single string with new lines. Must end in a newline
"""
assert new_lines.endswith("\n")
self._insert_lines_at_end(new_lines)
def _comment_out_lines(self, start: int, end: int):
for ix in range(start, end):
assert ix not in self.deletions, f"Can't make multiple deletions to line {ix}."
self.deletions.add(ix)
def _insert_lines(self, ix: int, lines: str):
assert ix not in self.additions, f"Can't make multiple additions to line {ix}."
self.additions[ix] = lines[:-1].split("\n")
def _insert_lines_at_end(self, lines: str):
assert self.end_addition is None, f"Can't make multiple additions to end of file."
self.end_addition = lines[:-1].split("\n")
def build_file(self, dryrun: bool = False, verbose=False):
"""
Apply all edits and replace the current lean file.
"""
if dryrun:
verbose = True
if verbose:
print(f"Modifications to file {self.lean_path}:")
new_lines = []
with open(self.lean_path, "r") as f:
for ix, line in enumerate(f):
if ix in self.additions:
new_lines.append("--PR BEGIN CODE INSERT\n")
if verbose:
print("--PR BEGIN CODE INSERT")
for new_line in self.additions[ix]:
new_lines.append(new_line + "\n")
if verbose:
print(new_line)
new_lines.append("--PR END CODE INSERT\n")
if verbose:
print("--PR END CODE INSERT")
if ix in self.deletions:
new_lines.append("--PR REMOVE LINE: " + line)
if verbose:
print("--PR REMOVE LINE: " + line.rstrip())
else:
new_lines.append(line)
if self.end_addition is not None:
new_lines.append("--PR BEGIN CODE INSERT\n")
if verbose:
print("--PR BEGIN CODE INSERT")
for new_line in self.end_addition:
new_lines.append(new_line + "\n")
if verbose:
print(new_line)
new_lines.append("--PR END CODE INSERT\n")
if verbose:
print("--PR END CODE INSERT")
if not dryrun:
self.lean_path.chmod(0o644) # set file permissions to -rw-r--r--
with open(self.lean_path, "w") as f:
f.writelines(new_lines)
| 664 | 3,823 | 23 |
c901dac9a8cb1afaa1e65b45c374f87b1bba735c | 2,845 | py | Python | pages/process.py | ewuerfel66/medicare_app | 3e515481b5882ce74762198faa42d8fc64813305 | [
"MIT"
] | null | null | null | pages/process.py | ewuerfel66/medicare_app | 3e515481b5882ce74762198faa42d8fc64813305 | [
"MIT"
] | null | null | null | pages/process.py | ewuerfel66/medicare_app | 3e515481b5882ce74762198faa42d8fc64813305 | [
"MIT"
] | null | null | null | import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Process
This web app uses a random forest regressor to estimate out-of-pocket medical costs based on [Medicare Payment Data](https://www.cms.gov/Research-Statistics-Data-and-Systems/Statistics-Trends-and-Reports/Medicare-Provider-Charge-Data/Inpatient.html).
Consumers often have no clue how much a diagnosis or procedure could end up costing them beforehand. The complicated nature of the human body usually means that doctors don't even have a clue how much it might end up costing you. This uncertainty and lack of transparency is just one of the reasons our health care costs in the United States are so high (about 17% of our GDP compared with the average of about 10-12%).
This tool takes in a category of medical diagnosis and your health care provider's state, then returns an estimate of the out-of-pocket costs one would expect with Medicare coverage.
To create this, I sorted the top 100 most common diagnoses and procedures into the general categories:
* Cardiac/Circulatory
* Cranial/Neurological
* Digestive
* Orthopedic
* Respiratory
* Other
These categories were tuned to be general enough so that one could reasonably guess which category your diagnosis might fall into without a doctor's opinion, yet detailed enough to capture the wide variance among out-of-pocket costs.
Out-of-pocket costs can be found by subtracting medicare coverage from net price for each diagnosis. Using the category of diagnosis and the provider's state, I trained a random forest regressor to predict the out-of-pocket costs.
Due to the simplicity of the inputs, this model has a mean absolute error of about $680. So take these estimates with a grain of salt. This tool does well to get you in the ballpark of what you might expect to pay. Models trained with uncategorized diagnoses (assumes the consumer knows their exact diagnosis) only reduced mean absolute error to about $630, so we didn't lose much resolution by categorizing the diagnoses.
There's a remarkable lack of transparency in the health care system, which is further confounded by the inherent uncertainty in the nature of medicine. Hopefully this tool can provide a bit more information to Medicare beneficiaries about their expected costs.
"""
),
],
)
layout = dbc.Row([column1]) | 64.659091 | 434 | 0.70123 | import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Process
This web app uses a random forest regressor to estimate out-of-pocket medical costs based on [Medicare Payment Data](https://www.cms.gov/Research-Statistics-Data-and-Systems/Statistics-Trends-and-Reports/Medicare-Provider-Charge-Data/Inpatient.html).
Consumers often have no clue how much a diagnosis or procedure could end up costing them beforehand. The complicated nature of the human body usually means that doctors don't even have a clue how much it might end up costing you. This uncertainty and lack of transparency is just one of the reasons our health care costs in the United States are so high (about 17% of our GDP compared with the average of about 10-12%).
This tool takes in a category of medical diagnosis and your health care provider's state, then returns an estimate of the out-of-pocket costs one would expect with Medicare coverage.
To create this, I sorted the top 100 most common diagnoses and procedures into the general categories:
* Cardiac/Circulatory
* Cranial/Neurological
* Digestive
* Orthopedic
* Respiratory
* Other
These categories were tuned to be general enough so that one could reasonably guess which category your diagnosis might fall into without a doctor's opinion, yet detailed enough to capture the wide variance among out-of-pocket costs.
Out-of-pocket costs can be found by subtracting medicare coverage from net price for each diagnosis. Using the category of diagnosis and the provider's state, I trained a random forest regressor to predict the out-of-pocket costs.
Due to the simplicity of the inputs, this model has a mean absolute error of about $680. So take these estimates with a grain of salt. This tool does well to get you in the ballpark of what you might expect to pay. Models trained with uncategorized diagnoses (assumes the consumer knows their exact diagnosis) only reduced mean absolute error to about $630, so we didn't lose much resolution by categorizing the diagnoses.
There's a remarkable lack of transparency in the health care system, which is further confounded by the inherent uncertainty in the nature of medicine. Hopefully this tool can provide a bit more information to Medicare beneficiaries about their expected costs.
"""
),
],
)
layout = dbc.Row([column1]) | 0 | 0 | 0 |
53f55b9d3820636cb1d465d6cf8047ed4193435c | 3,294 | py | Python | nor2qml/validation/validationTools.py | MrCubanfrog/nor2qml | ad20ac086c191e7a2e57a7da1b71df192743554c | [
"MIT"
] | null | null | null | nor2qml/validation/validationTools.py | MrCubanfrog/nor2qml | ad20ac086c191e7a2e57a7da1b71df192743554c | [
"MIT"
] | null | null | null | nor2qml/validation/validationTools.py | MrCubanfrog/nor2qml | ad20ac086c191e7a2e57a7da1b71df192743554c | [
"MIT"
] | null | null | null | import math
import logging
from datetime import date
nTypes = {0: "Nordic Event",
1: "Nordic Main Header",
2: "Nordic Macroseismic Header",
3: "Nordic Comment Header",
5: "Nordic Error Header",
6: "Nordic Waveform Header",
8: "Nordic Phase Data"}
| 28.643478 | 94 | 0.660595 | import math
import logging
from datetime import date
nTypes = {0: "Nordic Event",
1: "Nordic Main Header",
2: "Nordic Macroseismic Header",
3: "Nordic Comment Header",
5: "Nordic Error Header",
6: "Nordic Waveform Header",
8: "Nordic Phase Data"}
class values():
maxInt = 9223372036854775807
def validateInteger(val, valueName, low, high, limits, nType):
if val == "":
return True
try:
int(val)
except:
msg = "Validation Error - {0}: {1} is not an integer! ({2})"
logging.error(msg.format(nTypes[nType], valueName, val))
return False
if int(val) < low and limits:
msg = "Validation Error - {0}: {1} is smaller than {2}! ({3})"
logging.error(msg.format(nTypes[nType], valueName, low, val))
return False
if int(val) > high and limits:
msg = "Validation Error - {0}: {1} is larger than {2}! ({3})"
logging.error(msg.format(nTypes[nType], valueName, high, val))
return False
return True
def validateFloat(val, valueName, low, high, limits, nType):
if val == "":
return True
try:
float(val)
except:
msg = "Validation Error - {0}: {1} is not an float! ({2})"
logging.error(msg.format(nTypes[nType], valueName, val))
return False
if math.isnan(float(val)):
msg = "Validation Error - {0}: {1} is {2} which is not allowed!"
logging.error(msg.format(nTypes[nType], valueName, val))
return False
if math.isinf(float(val)):
msg = "Validation Error - {0}: {1} is {2} which is not allowed!"
logging.error(msg.format(nTypes[nType], valueName, val))
return False
if float(val) < low and limits:
msg = "Validation Error - {0}: {1} is smaller than {2}! ({3})"
logging.error(msg.format(nTypes[nType], valueName, low, val))
return False
if float(val) > high and limits:
msg = "Validation Error - {0}: {1} is larger than {2}! ({3})"
logging.error(msg.format(nTypes[nType], valueName, high, val))
return False
return True
def validateString(string, stringName, minlen, maxlen, listOfAllowed, isList, nType):
if string is "":
return True
if string not in listOfAllowed and isList:
msg = "Validation Error - {0}: {1} not in the list of allowed strings! ({2})\nAllowed:\n"
for allowed in listOfAllowed:
msg += " -" + allowed + "\n"
logging.error(msg.format(nTypes[nType], stringName, string))
return False
if minlen > -1 and len(string) < minlen:
msg = "Validation Error - {0}: {1} is shorter than the minimum allowed length {2}! ({3})"
logging.error(msg.format(nTypes[nType], stringName, minlen, string))
return False
if minlen > -1 and len(string) > maxlen:
msg = "Validation Error - {0}: {1} is longer than the maximum allowed length {2}! ({3})"
logging.error(msg.format(nTypes[nType], stringName, maxlen, string))
return False
return True
def validateDate(dateS, dateName, nType):
if dateS == "":
return True
try:
date(year=int(dateS[:4].strip()), month=int(dateS[5:7].strip()), day=int(dateS[8:].strip()))
except:
msg = "Validation Error - {0}: {1} is not parsable into date!({2})"
logging.error(msg.format(nTypes[nType], dateName, dateS))
return False
return True
def fixDate(nordic):
if nordic.date[5] == " ":
nordic.date = nordic.date[:5] + "0" + nordic.date[6:]
if nordic.date[8] == " ":
nordic.date = nordic.date[:8] + "0" + nordic.date[9:]
| 2,869 | 24 | 139 |
d5758350f447ec11f34faed8eb03aea196a39321 | 6,249 | py | Python | conanfile.py | TUM-CONAN/conan-generators | 227d838b12d197f41a2cb25298bc0a31c142f13c | [
"MIT"
] | null | null | null | conanfile.py | TUM-CONAN/conan-generators | 227d838b12d197f41a2cb25298bc0a31c142f13c | [
"MIT"
] | null | null | null | conanfile.py | TUM-CONAN/conan-generators | 227d838b12d197f41a2cb25298bc0a31c142f13c | [
"MIT"
] | null | null | null | import glob
import os
import shutil
from conans import ConanFile
from conans.model import Generator
| 32.21134 | 117 | 0.538166 | import glob
import os
import shutil
from conans import ConanFile
from conans.model import Generator
class env(Generator):
def __init__(self, conanfile):
super().__init__(conanfile)
@property
def filename(self):
return "env.sh"
@property
def content(self):
# Set environment from env_info
content = 'export PKG_CONFIG_PATH="{}"\n'.format(self.output_path)
for var, val in self.conanfile.env.items():
if isinstance(val, str):
val = [val]
if len(val) > 1 or (var in os.environ and os.pathsep in os.environ[var]):
content += 'export {0}={1}"${{{0}:+:${0}}}"\n'.format(var, os.pathsep.join('"%s"' % p for p in val))
else:
content += 'export {0}={1}\n'.format(var, '"%s"' % val[0])
return content
class direnv(Generator):
def __init__(self, conanfile):
super().__init__(conanfile)
@property
def filename(self):
return ".envrc"
@property
def content(self):
# Set environment from env_info
content = ""
for var, val in self.conanfile.env.items():
if isinstance(val, str):
val = [val]
if len(val) > 1 or (var in os.environ and os.pathsep in os.environ[var]):
content += 'export {0}={1}"${{{0}:+:${0}}}"\n'.format(var, os.pathsep.join('"%s"' % p for p in val))
else:
content += 'export {0}={1}\n'.format(var, '"%s"' % val[0])
return content
class gdb(Generator):
def __init__(self, conanfile):
super().__init__(conanfile)
@property
def filename(self):
pass
@property
def content(self):
content = {".gdbinit": ""}
if not "SOURCE_MAP" in self.conanfile.env:
return {}
for map in self.conanfile.env["SOURCE_MAP"]:
if not "|" in map:
continue
content[".gdbinit"] += "set substitute-path %s %s\n" % tuple(map.split("|"))
return content
class tools(Generator):
def __init__(self, conanfile):
super().__init__(conanfile)
@property
def filename(self):
pass
@property
def content(self):
if not os.path.isdir(self.output_path):
os.makedirs(self.output_path)
# Generate wrapper bins
env_vars = ""
for var, val in self.conanfile.env.items():
if isinstance(val, str):
val = [val]
if len(val) > 1 or (var in os.environ and os.pathsep in os.environ[var]):
env_vars += 'export {0}={1}"${{{0}:+:${0}}}"\n'.format(var, os.pathsep.join('"%s"' % p for p in val))
else:
env_vars += 'export {0}={1}\n'.format(var, '"%s"' % val[0])
# Find rootpath
# 'dependencies' is not indexable
for _, cpp_info in self.deps_build_info.dependencies:
rootpath = cpp_info.rootpath
break
# Generate executable wrappers
bin_path = os.path.join(rootpath, "bin")
if not os.path.isdir(bin_path):
return {}
for exe_name in os.listdir(bin_path):
exe_path = os.path.join(bin_path, exe_name)
exe_out_path = os.path.join(self.output_path, exe_name)
with open(exe_out_path, "w") as exe:
exe.write("#!/usr/bin/env sh\n")
exe.write(env_vars)
exe.write('exec %s "$@"' % exe_path)
os.chmod(exe_out_path, 0o775)
return {}
def replace_prefix_in_pc_file(pc_file, prefix):
with open(pc_file) as f:
old_prefix = ""
# Get old prefix
for l in f:
if l == "prefix=":
return f.read().replace("prefix=", "prefix=%s".format(prefix))
if "prefix=" in l:
old_prefix = l.split("=")[1][:-1]
break
f.seek(0)
if not old_prefix:
for l in f:
if "libdir=" in l:
old_prefix = l.split("=")[1][:-5]
break
if "includedir=" in l:
old_prefix = l.split("=")[1][:-9]
break
if not old_prefix:
raise Exception("Could not find package prefix in '%s'" % pc_file)
f.seek(0)
return f.read().replace(old_prefix, prefix)
class pkgconf(Generator):
def __init__(self, conanfile):
super().__init__(conanfile)
@property
def filename(self):
pass
@property
def content(self):
files = {}
# Generate pc files
for _, cpp_info in self.deps_build_info.dependencies:
pc_paths = [
os.path.join(cpp_info.rootpath, "lib", "pkgconfig"),
os.path.join(cpp_info.rootpath, "share", "pkgconfig"),
]
for pc_path in pc_paths:
if not os.path.isdir(pc_path):
continue
for pc in os.listdir(pc_path):
files[pc] = replace_prefix_in_pc_file(os.path.join(pc_path, pc), cpp_info.rootpath)
# Generate pc files from PKG_CONFIG_SYSTEM_PATH
if hasattr(self.conanfile, "system_pcs") and "PKG_CONFIG_SYSTEM_PATH" in os.environ:
if isinstance(self.conanfile.system_pcs, str):
self.conanfile.system_pcs = set([self.conanfile.system_pcs])
system_pcs = set(self.conanfile.system_pcs)
for pc_path in os.environ["PKG_CONFIG_SYSTEM_PATH"].split(os.pathsep):
for pc in os.listdir(pc_path):
pc_name = os.path.splitext(pc)[0]
if not pc_name in self.conanfile.system_pcs:
continue
system_pcs.remove(pc_name)
with open(os.path.join(pc_path, pc), "r") as pc_file:
files[pc] = pc_file.read()
if len(system_pcs):
raise Exception("'%s' not available in system pkg-config directories" % ", ".join(system_pcs))
return files
class GeneratorsPackage(ConanFile):
name = "generators"
version = "1.0.0"
license = "MIT"
description = "Conan generators"
| 5,314 | 666 | 161 |
db2e6b0e5e562c808cd68e220181d5607da5a025 | 209 | py | Python | src/python/twitter/common/zookeeper/group/kazoo_cli.py | zhouyijiaren/commons | 10df6fb63547baa9047782aa7ad4edf354914b10 | [
"Apache-2.0"
] | 1,143 | 2015-01-05T04:19:24.000Z | 2019-12-11T12:02:23.000Z | src/python/twitter/common/zookeeper/group/kazoo_cli.py | zhouyijiaren/commons | 10df6fb63547baa9047782aa7ad4edf354914b10 | [
"Apache-2.0"
] | 144 | 2015-01-06T05:05:07.000Z | 2019-12-12T18:02:37.000Z | src/python/twitter/common/zookeeper/group/kazoo_cli.py | zhouyijiaren/commons | 10df6fb63547baa9047782aa7ad4edf354914b10 | [
"Apache-2.0"
] | 426 | 2015-01-08T08:33:41.000Z | 2019-12-09T13:15:40.000Z | from twitter.common import app
from kazoo.client import KazooClient
from twitter.common.zookeeper.group.kazoo_group import KazooGroup
app.main()
| 16.076923 | 65 | 0.784689 | from twitter.common import app
from kazoo.client import KazooClient
from twitter.common.zookeeper.group.kazoo_group import KazooGroup
def main():
import code
code.interact(local=globals())
app.main()
| 37 | 0 | 23 |
a875657ad67d487ebed163d89301717c4340b8d3 | 948 | py | Python | dev/Gems/CloudGemComputeFarm/v1/AWS/lambda-code/ServiceLambda/api/cancel.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 1,738 | 2017-09-21T10:59:12.000Z | 2022-03-31T21:05:46.000Z | dev/Gems/CloudGemComputeFarm/v1/AWS/lambda-code/ServiceLambda/api/cancel.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 427 | 2017-09-29T22:54:36.000Z | 2022-02-15T19:26:50.000Z | dev/Gems/CloudGemComputeFarm/v1/AWS/lambda-code/ServiceLambda/api/cancel.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 671 | 2017-09-21T08:04:01.000Z | 2022-03-29T14:30:07.000Z | from __future__ import print_function
import boto3
import json
import CloudCanvas
import service
from cgf_utils import aws_utils
from cgf_utils import custom_resource_utils
# import errors
#
# raise errors.ClientError(message) - results in HTTP 400 response with message
# raise errors.ForbiddenRequestError(message) - results in 403 response with message
# raise errors.NotFoundError(message) - results in HTTP 404 response with message
#
# Any other exception results in HTTP 500 with a generic internal service error message.
workflow = custom_resource_utils.get_embedded_physical_id(CloudCanvas.get_setting('Workflow'))
workflow_domain_name = workflow + '-domain'
swf_client = boto3.client('swf', region_name=aws_utils.current_region)
@service.api
| 27.882353 | 94 | 0.794304 | from __future__ import print_function
import boto3
import json
import CloudCanvas
import service
from cgf_utils import aws_utils
from cgf_utils import custom_resource_utils
# import errors
#
# raise errors.ClientError(message) - results in HTTP 400 response with message
# raise errors.ForbiddenRequestError(message) - results in 403 response with message
# raise errors.NotFoundError(message) - results in HTTP 404 response with message
#
# Any other exception results in HTTP 500 with a generic internal service error message.
workflow = custom_resource_utils.get_embedded_physical_id(CloudCanvas.get_setting('Workflow'))
workflow_domain_name = workflow + '-domain'
swf_client = boto3.client('swf', region_name=aws_utils.current_region)
@service.api
def post(request, workflow_id):
response = swf_client.terminate_workflow_execution(
domain=workflow_domain_name,
workflowId=workflow_id
)
return "success"
| 166 | 0 | 22 |
d2159342c7d35eb7023c5f218af35daef0f7dfda | 241 | py | Python | src/devices/esp32-test02/deploy.py | hwinther/lanot | f6700cacb3946535081624467b746fdfd38e021d | [
"Apache-2.0"
] | null | null | null | src/devices/esp32-test02/deploy.py | hwinther/lanot | f6700cacb3946535081624467b746fdfd38e021d | [
"Apache-2.0"
] | null | null | null | src/devices/esp32-test02/deploy.py | hwinther/lanot | f6700cacb3946535081624467b746fdfd38e021d | [
"Apache-2.0"
] | null | null | null | import prometheus.tftpd
'''
prime the device with:
nc64 -c 192.168.4.1 9195
connect ssid:password
to run:
set PYTHONPATH=p:\lanot\src\core
'''
files = [
'main.py',
'test02.py'
]
prometheus.tftpd.tftp_client('10.20.2.116', *files)
| 14.176471 | 51 | 0.680498 | import prometheus.tftpd
'''
prime the device with:
nc64 -c 192.168.4.1 9195
connect ssid:password
to run:
set PYTHONPATH=p:\lanot\src\core
'''
files = [
'main.py',
'test02.py'
]
prometheus.tftpd.tftp_client('10.20.2.116', *files)
| 0 | 0 | 0 |
6579a4fad4984096942abbe25e1bda1822405836 | 2,158 | py | Python | tests/dataverk/connectors/storage/test_storage_connector_factory.py | navikt/dataverk | 7dd803236433048686dd7a58358bc1c09565b14b | [
"MIT"
] | 3 | 2019-09-29T20:48:46.000Z | 2021-03-31T10:16:07.000Z | tests/dataverk/connectors/storage/test_storage_connector_factory.py | navikt/dataverk | 7dd803236433048686dd7a58358bc1c09565b14b | [
"MIT"
] | 148 | 2019-02-08T12:30:58.000Z | 2021-03-11T15:31:55.000Z | tests/dataverk/connectors/storage/test_storage_connector_factory.py | navikt/dataverk | 7dd803236433048686dd7a58358bc1c09565b14b | [
"MIT"
] | 1 | 2020-11-18T14:10:05.000Z | 2020-11-18T14:10:05.000Z | import os
import shutil
import tempfile
import unittest
from unittest import mock
from dataverk.connectors import NaisS3Connector, GoogleStorageConnector
from dataverk.connectors.storage import storage_connector_factory
from dataverk.connectors.storage.file_storage import FileStorageConnector
from dataverk.connectors.storage.storage_connector_factory import StorageType
from tests.dataverk.connectors.storage.test_resources.google_storage_common import GOOGLE_SERVICE_ACCOUNT, GCS_BUCKET_NAME
from tests.dataverk.connectors.storage.test_resources.mock_google_cloud_api import MockGoogleClient, MockGoogleCredentials
| 44.958333 | 122 | 0.656163 | import os
import shutil
import tempfile
import unittest
from unittest import mock
from dataverk.connectors import NaisS3Connector, GoogleStorageConnector
from dataverk.connectors.storage import storage_connector_factory
from dataverk.connectors.storage.file_storage import FileStorageConnector
from dataverk.connectors.storage.storage_connector_factory import StorageType
from tests.dataverk.connectors.storage.test_resources.google_storage_common import GOOGLE_SERVICE_ACCOUNT, GCS_BUCKET_NAME
from tests.dataverk.connectors.storage.test_resources.mock_google_cloud_api import MockGoogleClient, MockGoogleCredentials
class TestStorageConnectorFactory(unittest.TestCase):
def setUp(self):
os.environ["DATAVERK_BUCKET_ENDPOINT"] = "https://bucket-endpoint.something.com"
self.storage_dir = tempfile.mkdtemp()
self.settings = {
"bucket_storage": {
"local": {
"path": f"{self.storage_dir}"
},
"gcs": {
"credentials": GOOGLE_SERVICE_ACCOUNT
}
}
}
def tearDown(self):
shutil.rmtree(self.storage_dir)
@mock.patch("google.cloud.storage.Client", side_effect=MockGoogleClient)
@mock.patch("google.oauth2.service_account.Credentials", side_effect=MockGoogleCredentials)
def test_get_storage_connector(self, mock_client, mock_creds):
connector_types = [("nais", "opendata", NaisS3Connector),
("gcs", GCS_BUCKET_NAME, GoogleStorageConnector),
("local", None, FileStorageConnector)]
for connector_type in connector_types:
with self.subTest(msg="Testing bucket connector factory method", _input=connector_type):
self.assertIsInstance(
storage_connector_factory.get_storage_connector(storage_type=StorageType(connector_type[0]),
bucket_name=connector_type[1],
settings=self.settings),
connector_type[2])
| 1,229 | 286 | 23 |
fb2886108f82965094c45267d699bc69164f0202 | 11,853 | py | Python | visuafy.py | aryanbhajanka/visuafy | 407db9b9219cd7849f9a756d5123de61bb85f415 | [
"MIT"
] | 1 | 2021-11-26T15:48:54.000Z | 2021-11-26T15:48:54.000Z | visuafy.py | aryanbhajanka/visuafy | 407db9b9219cd7849f9a756d5123de61bb85f415 | [
"MIT"
] | null | null | null | visuafy.py | aryanbhajanka/visuafy | 407db9b9219cd7849f9a756d5123de61bb85f415 | [
"MIT"
] | null | null | null | #Visuafy a Spotify visualiser by Aryan Bhajanka
from os import name
from flask import Flask, request, url_for, session, redirect, render_template
from flask.helpers import get_template_attribute
import spotipy
from spotipy.exceptions import SpotifyException
from spotipy.oauth2 import SpotifyOAuth, SpotifyOauthError
app = Flask(__name__, static_folder='static')
app.secret_key = "7490017841visuafy7490017841"
app.config['SESSION_COOKIE_NAME'] = 'spotify-user-read-currently-playing'
scope = "user-read-currently-playing"
@app.route('/', methods =["GET", "POST"])
@app.route('/colours')
@app.route('/moonlight')
@app.route('/leo_the_cat')
@app.route('/ship')
@app.route('/homework')
@app.route('/by_the_window')
@app.route('/on_the_road')
@app.route('/comfy_night')
@app.route('/custom_image', methods =["GET", "POST"])
@app.route('/custom_theme')
@app.route('/la')
@app.route('/nyc')
@app.route('/info')
@app.route('/select_theme')
@app.route('/help')
@app.route('/credits')
if __name__ == '__main__':
app.run() | 41.589474 | 144 | 0.633173 | #Visuafy a Spotify visualiser by Aryan Bhajanka
from os import name
from flask import Flask, request, url_for, session, redirect, render_template
from flask.helpers import get_template_attribute
import spotipy
from spotipy.exceptions import SpotifyException
from spotipy.oauth2 import SpotifyOAuth, SpotifyOauthError
app = Flask(__name__, static_folder='static')
app.secret_key = "7490017841visuafy7490017841"
app.config['SESSION_COOKIE_NAME'] = 'spotify-user-read-currently-playing'
scope = "user-read-currently-playing"
@app.route('/', methods =["GET", "POST"])
def login():
if request.method == "POST":
session['token'] = request.form.get("oauth")
return redirect (url_for('select_theme', _external = True))
return render_template("token.html")
@app.route('/colours')
def colours():
try:
token = session['token']
spotify = spotipy.Spotify(token)
current_song = spotify.currently_playing(market="ES")
song_item = current_song.get("item")
song_name = (song_item['name'])
song_artist = (song_item['artists'][0]['name'])
image_result = spotify.search(q='artist:'+song_artist, type='artist')
items = image_result['artists']['items']
artist = items[0]
artist_image = (artist['images'][0]['url'])
return render_template ("index.html", name = song_name, artist = song_artist, image = artist_image)
except AttributeError:
return render_template ('none.html')
except SpotifyException:
return redirect (url_for('login', _external = True))
except NameError:
return redirect (url_for('login', _external = True))
@app.route('/moonlight')
def moon():
try:
token = session['token']
spotify = spotipy.Spotify(token)
current_song = spotify.currently_playing(market="ES")
song_item = current_song.get("item")
song_name = (song_item['name'])
song_artist = (song_item['artists'][0]['name'])
image_result = spotify.search(q='artist:'+song_artist, type='artist')
items = image_result['artists']['items']
artist = items[0]
artist_image = (artist['images'][0]['url'])
return render_template ("city.html", name = song_name, artist = song_artist, image = artist_image)
except AttributeError:
return render_template ('none.html')
except SpotifyException:
return redirect (url_for('login', _external = True))
except NameError:
return redirect (url_for('login', _external = True))
@app.route('/leo_the_cat')
def leo_the_cat():
try:
token = session['token']
spotify = spotipy.Spotify(token)
current_song = spotify.currently_playing(market="ES")
song_item = current_song.get("item")
song_name = (song_item['name'])
song_artist = (song_item['artists'][0]['name'])
image_result = spotify.search(q='artist:'+song_artist, type='artist')
items = image_result['artists']['items']
artist = items[0]
artist_image = (artist['images'][0]['url'])
return render_template ("leo_the_cat.html", name = song_name, artist = song_artist, image = artist_image)
except AttributeError:
return render_template ('none.html')
except SpotifyException:
return redirect (url_for('login', _external = True))
except NameError:
return redirect (url_for('login', _external = True))
@app.route('/ship')
def lofi_ship():
try:
token = session['token']
spotify = spotipy.Spotify(token)
current_song = spotify.currently_playing(market="ES")
song_item = current_song.get("item")
song_name = (song_item['name'])
song_artist = (song_item['artists'][0]['name'])
image_result = spotify.search(q='artist:'+song_artist, type='artist')
items = image_result['artists']['items']
artist = items[0]
artist_image = (artist['images'][0]['url'])
return render_template ("ship.html", name = song_name, artist = song_artist, image = artist_image)
except AttributeError:
return render_template ('none.html')
except SpotifyException:
return redirect (url_for('login', _external = True))
except NameError:
return redirect (url_for('login', _external = True))
@app.route('/homework')
def homework():
try:
token = session['token']
spotify = spotipy.Spotify(token)
current_song = spotify.currently_playing(market="ES")
song_item = current_song.get("item")
song_name = (song_item['name'])
song_artist = (song_item['artists'][0]['name'])
image_result = spotify.search(q='artist:'+song_artist, type='artist')
items = image_result['artists']['items']
artist = items[0]
artist_image = (artist['images'][0]['url'])
return render_template ("homework.html", name = song_name, artist = song_artist, image = artist_image)
except AttributeError:
return render_template ('none.html')
except SpotifyException:
return redirect (url_for('login', _external = True))
except NameError:
return redirect (url_for('login', _external = True))
@app.route('/by_the_window')
def by_the_window():
try:
token = session['token']
spotify = spotipy.Spotify(token)
current_song = spotify.currently_playing(market="ES")
song_item = current_song.get("item")
song_name = (song_item['name'])
song_artist = (song_item['artists'][0]['name'])
image_result = spotify.search(q='artist:'+song_artist, type='artist')
items = image_result['artists']['items']
artist = items[0]
artist_image = (artist['images'][0]['url'])
return render_template ("by_the_window.html", name = song_name, artist = song_artist, image = artist_image)
except AttributeError:
return render_template ('none.html')
except SpotifyException:
return redirect (url_for('login', _external = True))
except NameError:
return redirect (url_for('login', _external = True))
@app.route('/on_the_road')
def on_the_road():
try:
token = session['token']
spotify = spotipy.Spotify(token)
current_song = spotify.currently_playing(market="ES")
song_item = current_song.get("item")
song_name = (song_item['name'])
song_artist = (song_item['artists'][0]['name'])
image_result = spotify.search(q='artist:'+song_artist, type='artist')
items = image_result['artists']['items']
artist = items[0]
artist_image = (artist['images'][0]['url'])
return render_template ("on_the_road.html", name = song_name, artist = song_artist, image = artist_image)
except AttributeError:
return render_template ('none.html')
except SpotifyException:
return redirect (url_for('login', _external = True))
except NameError:
return redirect (url_for('login', _external = True))
@app.route('/comfy_night')
def comfy_night():
try:
token = session['token']
spotify = spotipy.Spotify(token)
current_song = spotify.currently_playing(market="ES")
song_item = current_song.get("item")
song_name = (song_item['name'])
song_artist = (song_item['artists'][0]['name'])
image_result = spotify.search(q='artist:'+song_artist, type='artist')
items = image_result['artists']['items']
artist = items[0]
artist_image = (artist['images'][0]['url'])
return render_template ("comfy_night.html", name = song_name, artist = song_artist, image = artist_image)
except AttributeError:
return render_template ('none.html')
except SpotifyException:
return redirect (url_for('login', _external = True))
except NameError:
return redirect (url_for('login', _external = True))
@app.route('/custom_image', methods =["GET", "POST"])
def custom_image():
if request.method == "POST":
session['image_address'] = request.form.get("imagead")
return redirect (url_for('custom_theme', _external = True))
return render_template("custom_image.html")
@app.route('/custom_theme')
def custom_theme():
try:
token = session['token']
spotify = spotipy.Spotify(token)
current_song = spotify.currently_playing(market="ES")
song_item = current_song.get("item")
song_name = (song_item['name'])
song_artist = (song_item['artists'][0]['name'])
image_result = spotify.search(q='artist:'+song_artist, type='artist')
items = image_result['artists']['items']
artist = items[0]
artist_image = (artist['images'][0]['url'])
custom_image = session['image_address']
return render_template ("custom_theme.html", name = song_name, artist = song_artist, image = artist_image, custom_image = custom_image)
except AttributeError:
return render_template ('none.html')
except SpotifyException:
return redirect (url_for('login', _external = True))
except NameError:
return redirect (url_for('login', _external = True))
@app.route('/la')
def la():
try:
token = session['token']
spotify = spotipy.Spotify(token)
current_song = spotify.currently_playing(market="ES")
song_item = current_song.get("item")
song_name = (song_item['name'])
song_artist = (song_item['artists'][0]['name'])
image_result = spotify.search(q='artist:'+song_artist, type='artist')
items = image_result['artists']['items']
artist = items[0]
artist_image = (artist['images'][0]['url'])
return render_template ("la.html", name = song_name, artist = song_artist, image = artist_image)
except AttributeError:
return render_template ('none.html')
except SpotifyException:
return redirect (url_for('login', _external = True))
except NameError:
return redirect (url_for('login', _external = True))
@app.route('/nyc')
def nyc():
try:
token = session['token']
spotify = spotipy.Spotify(token)
current_song = spotify.currently_playing(market="ES")
song_item = current_song.get("item")
song_name = (song_item['name'])
song_artist = (song_item['artists'][0]['name'])
image_result = spotify.search(q='artist:'+song_artist, type='artist')
items = image_result['artists']['items']
artist = items[0]
artist_image = (artist['images'][0]['url'])
return render_template ("nyc.html", name = song_name, artist = song_artist, image = artist_image)
except AttributeError:
return render_template ('none.html')
except SpotifyException:
return redirect (url_for('login', _external = True))
except NameError:
return redirect (url_for('login', _external = True))
@app.route('/info')
def info():
return render_template ("info.html")
@app.route('/select_theme')
def select_theme():
return render_template ("select_theme.html")
@app.route('/help')
def help():
return render_template ("help.html")
@app.route('/credits')
def credits():
return render_template ("credits.html")
def create_spotify_oauth():
return SpotifyOAuth(
client_id = "28b4cc3000d54dcd86e32d2d59719787",
client_secret = "119bd035eea54111aa915b747ab2e204",
redirect_uri = url_for('redirectpage', _external = True),
scope = "user-read-currently-playing")
if __name__ == '__main__':
app.run() | 10,352 | 0 | 416 |
7faf24164edba88fb71f7c27d65e25b1c665e48b | 388 | py | Python | terracommon/events/migrations/0002_eventhandler_priority.py | Terralego/terra-back | 32779117ee3613b9d2e476cf445f94dbdb0f114f | [
"MIT"
] | 4 | 2019-05-07T12:34:35.000Z | 2019-11-14T10:52:11.000Z | terracommon/events/migrations/0002_eventhandler_priority.py | Terralego/terra-back | 32779117ee3613b9d2e476cf445f94dbdb0f114f | [
"MIT"
] | 16 | 2019-08-14T11:09:39.000Z | 2022-02-10T07:55:31.000Z | terracommon/events/migrations/0002_eventhandler_priority.py | Terralego/terra-back | 32779117ee3613b9d2e476cf445f94dbdb0f114f | [
"MIT"
] | 1 | 2019-04-17T09:03:02.000Z | 2019-04-17T09:03:02.000Z | # Generated by Django 2.0.9 on 2018-10-31 09:05
from django.db import migrations, models
| 20.421053 | 58 | 0.600515 | # Generated by Django 2.0.9 on 2018-10-31 09:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='eventhandler',
name='priority',
field=models.PositiveIntegerField(default=10),
),
]
| 0 | 274 | 23 |
31cb417beee09bbcf9e26529462b9495f2bb012a | 3,170 | py | Python | tenable/nessus/settings.py | tecnobabble/pyTenable | 29e7ee65cdee70fbaaade295a9a5055c4d80b17f | [
"MIT"
] | 1 | 2022-03-01T17:17:19.000Z | 2022-03-01T17:17:19.000Z | tenable/nessus/settings.py | tecnobabble/pyTenable | 29e7ee65cdee70fbaaade295a9a5055c4d80b17f | [
"MIT"
] | null | null | null | tenable/nessus/settings.py | tecnobabble/pyTenable | 29e7ee65cdee70fbaaade295a9a5055c4d80b17f | [
"MIT"
] | 1 | 2022-03-01T17:17:30.000Z | 2022-03-01T17:17:30.000Z | '''
Settings
========
Methods described in this section relate to the the Settings API.
These methods can be accessed at ``Nessus.settings``.
.. rst-class:: hide-signature
.. autoclass:: SettingsAPI
:members:
'''
from typing import List, Dict, Optional
from typing_extensions import Literal
from restfly.utils import dict_clean
from tenable.base.endpoint import APIEndpoint
from .schema.settings import SettingsListSchema
| 27.565217 | 79 | 0.539432 | '''
Settings
========
Methods described in this section relate to the the Settings API.
These methods can be accessed at ``Nessus.settings``.
.. rst-class:: hide-signature
.. autoclass:: SettingsAPI
:members:
'''
from typing import List, Dict, Optional
from typing_extensions import Literal
from restfly.utils import dict_clean
from tenable.base.endpoint import APIEndpoint
from .schema.settings import SettingsListSchema
class SettingsAPI(APIEndpoint):
_path = 'settings'
def modify(self, settings: List[Dict]) -> Dict:
'''
Modifies the advanced settings on the Nessus scanner. Settings objects
must contain an action and a name field. They may also require a value
field and/or an id field depending on the nature of the change.
Args:
settings (list[dict]): List of settings change objects
Examples:
Adding a new value:
>>> nessus.settings.modify([{
... 'action': 'add',
... 'name': 'new_value',
... 'value': 'value_contents'
... }])
Updating a default setting value:
>>> nessus.settings.modify([{
... 'action': 'edit',
... 'name': 'allow_post_scan_editing',
... 'value': 'no'
... }])
Removing a setting:
>>> nessus.settings.modify([{
... 'action': 'remove',
... 'name': 'old_setting',
... 'id': 'abcdef1234567890abcdef'
... }])
'''
schema = SettingsListSchema()
payload = schema.dump(schema.load({'settings': settings}))
return self._put('advanced', json=payload)
def list(self) -> List[Dict]:
'''
Returns the list of advanced settings
Returns:
List[Dict]:
List of settings objects.
Example:
>>> nessus.settings.list()
'''
return self._get('advanced')['preferences']
def health(self) -> Dict:
'''
Returns the current health statistics fore the Nessus scanner
Returns:
Dict:
Health stats information
Example:
>>> nessus.settings.health()
'''
return self._get('health/stats')
def alerts(self,
start_time: Optional[int] = None,
end_time: Optional[int] = None
) -> List[Dict]:
'''
Returns the list of health alerts generated by the scanner
Args:
start_time (int, optional):
Start time to query the historical data for. Defaults to 24hrs
ago.
end_time (int, optional):
End time to query the historicat data for. Defaults to now.
Returns:
List[Dict]:
List of alert objects matching the specified time range
Example:
>>> nessus.settings.alerts()
'''
return self._get('health/alerts', params=dict_clean({
'start_time': start_time,
'end_time': end_time
}))
| 0 | 2,718 | 23 |
6ff30e1703915da6d05542e6733883640be99706 | 3,488 | py | Python | frontend/lambda_function.py | lylus/tesla_cmds_aws_lambda_functions | b7778bb6f0172e96eba8e99916f123ba573114ee | [
"BSD-2-Clause"
] | 7 | 2021-01-31T00:27:10.000Z | 2022-03-10T07:49:06.000Z | frontend/lambda_function.py | lylus/tesla_cmds_aws_lambda_functions | b7778bb6f0172e96eba8e99916f123ba573114ee | [
"BSD-2-Clause"
] | 1 | 2021-05-21T01:35:31.000Z | 2021-05-22T14:34:23.000Z | frontend/lambda_function.py | lylus/tesla_cmds_aws_lambda_functions | b7778bb6f0172e96eba8e99916f123ba573114ee | [
"BSD-2-Clause"
] | 4 | 2021-02-07T00:09:34.000Z | 2022-03-28T14:36:25.000Z | #!/usr/bin/env python3
import boto3,json,os,urllib3
| 32.598131 | 138 | 0.640195 | #!/usr/bin/env python3
import boto3,json,os,urllib3
def lambda_handler(event, context):
########################################### Global Variables #####################################################
BASE_URL = "https://owner-api.teslamotors.com/api/1/vehicles/"
EVENT_BODY = json.loads(event["body"])
EVENT_HEADERS = event["headers"]
TOKEN = EVENT_BODY["TOKEN"]
VEHICLE_ID = EVENT_BODY["VEHICLE_ID"]
INPUT_CMD = EVENT_BODY["INPUT_CMD"]
PARAMETER_1 = EVENT_BODY["PARAMETER_1"]
PARAMETER_2 = EVENT_BODY["PARAMETER_2"]
SNS_CLIENT = boto3.client('sns')
TOPIC_ARN = os.environ.get('SNS_TOPIC_ARN')
# If X-Forwarded-For exists then set CLIENT_IP_ADDRESS accordingly
if "X-Forwarded-For" in EVENT_HEADERS:
CLIENT_IP_ADDRESS = EVENT_HEADERS["X-Forwarded-For"]
else:
CLIENT_IP_ADDRESS = "127.0.0.1"
##################################################################################################################
# Function that retrieves the vehicle's status and returns it
def GetVehicleState(BASE_URL, VEHICLE_ID):
# Variables
HEADERS = {
'Authorization': "Bearer " + TOKEN,
'Content-Type': 'application/json',
'User-Agent': 'None'
}
URL = BASE_URL + VEHICLE_ID
HTTP = urllib3.PoolManager()
HTTP_REQUEST = HTTP.request(
'GET',
URL,
headers=HEADERS
)
HTTP_REQUEST_STATUS_CODE = HTTP_REQUEST.status
if HTTP_REQUEST_STATUS_CODE == 200:
VEHICLE_DATA = json.loads(HTTP_REQUEST.data.decode('utf-8'))
VEHICLE_STATE = VEHICLE_DATA["response"]["state"]
return(VEHICLE_STATE)
if INPUT_CMD == "test_command":
INITIAL_VEHICLE_STATE = "testing"
else:
# Capture the INITIAL_VEHICLE_STATE to verify that the vehicle is awake
INITIAL_VEHICLE_STATE = GetVehicleState(BASE_URL, VEHICLE_ID)
if INITIAL_VEHICLE_STATE is not None:
MESSAGE_BODY = {
"TOKEN": TOKEN,
"BASE_URL": BASE_URL,
"VEHICLE_ID": VEHICLE_ID,
"INPUT_CMD": INPUT_CMD,
"PARAMETER_1": PARAMETER_1,
"PARAMETER_2": PARAMETER_2,
"INITIAL_VEHICLE_STATE": INITIAL_VEHICLE_STATE,
"CLIENT_IP_ADDRESS": CLIENT_IP_ADDRESS
}
MESSAGE_BODY_STR = json.dumps(MESSAGE_BODY)
MESSAGE_BODY_STR_ENC = MESSAGE_BODY_STR.encode('utf-8')
# When you publish a message, the client returns a future.
SNS_RESPONSE = SNS_CLIENT.publish(
TopicArn=TOPIC_ARN,
Message=MESSAGE_BODY_STR,
)
MESSAGE_ID = SNS_RESPONSE["MessageId"]
print("Queueing the " + INPUT_CMD + " command for vehicle ID #" + VEHICLE_ID + " on behalf of " + CLIENT_IP_ADDRESS)
RETURN_DATA = {
"statusCode": 200,
"BASE_URL": BASE_URL,
"VEHICLE_ID": VEHICLE_ID,
"INPUT_CMD": INPUT_CMD,
"PARAMETER_1": PARAMETER_1,
"PARAMETER_2": PARAMETER_2,
"INITIAL_VEHICLE_STATE": INITIAL_VEHICLE_STATE
}
RETURN_DATA_STR = json.dumps(RETURN_DATA)
else:
print("ERROR: Exiting as communication with Tesla's APIs failed for vehicle ID #" + VEHICLE_ID + " on behalf of " + CLIENT_IP_ADDRESS)
RETURN_DATA = {
"statusCode": 400,
"BASE_URL": BASE_URL,
"VEHICLE_ID": VEHICLE_ID,
"INPUT_CMD": INPUT_CMD,
"PARAMETER_1": PARAMETER_1,
"PARAMETER_2": PARAMETER_2,
"INITIAL_VEHICLE_STATE": INITIAL_VEHICLE_STATE
}
RETURN_DATA_STR = json.dumps(RETURN_DATA)
return {
'headers': {'Content-Type': 'application/json'},
'body': RETURN_DATA_STR
}
| 3,412 | 0 | 23 |
abc9d9fc06d605b36b30567b88698cc842fd9d4b | 594 | py | Python | example/echo_service.py | nightfade/protobuf-RPC | 5c6084f6d5a6b9affc56cddab6413b4b662e973b | [
"MIT"
] | 15 | 2015-04-02T08:48:18.000Z | 2021-08-04T08:28:35.000Z | example/echo_service.py | nightfade/protobuf-RPC | 5c6084f6d5a6b9affc56cddab6413b4b662e973b | [
"MIT"
] | 2 | 2016-09-12T01:39:07.000Z | 2021-07-22T09:31:43.000Z | example/echo_service.py | nightfade/protobuf-RPC | 5c6084f6d5a6b9affc56cddab6413b4b662e973b | [
"MIT"
] | 5 | 2018-02-09T01:41:59.000Z | 2020-10-12T06:06:06.000Z | __author__ = 'nightfade'
from example.echo_service_pb2 import IEchoService, IEchoClient_Stub
import logger
| 33 | 73 | 0.718855 | __author__ = 'nightfade'
from example.echo_service_pb2 import IEchoService, IEchoClient_Stub
import logger
class EchoService(IEchoService):
def echo(self, rpc_controller, echo_string, callback):
""" called by RpcChannel.receive when a complete request reached.
"""
logger.get_logger('EchoService').info('echo service is called')
echo_string.message = echo_string.message
client_stub = IEchoClient_Stub(rpc_controller.rpc_channel)
client_stub.respond(rpc_controller, echo_string, callback=None)
if callback:
callback()
| 0 | 462 | 23 |
cdf2f2fa1ca8c7e8fdb87369a49e10280581f20e | 533 | py | Python | core_admin/des/migrations/0033_auto_20210722_1449.py | linea-it/tno | f973381280504ceb1b606b5b3ccc79b6b8c2aa4f | [
"MIT"
] | null | null | null | core_admin/des/migrations/0033_auto_20210722_1449.py | linea-it/tno | f973381280504ceb1b606b5b3ccc79b6b8c2aa4f | [
"MIT"
] | 112 | 2018-04-24T19:10:55.000Z | 2022-02-26T16:55:02.000Z | core_admin/des/migrations/0033_auto_20210722_1449.py | linea-it/tno | f973381280504ceb1b606b5b3ccc79b6b8c2aa4f | [
"MIT"
] | null | null | null | # Generated by Django 2.2.13 on 2021-07-22 14:49
from django.db import migrations, models
| 28.052632 | 193 | 0.589118 | # Generated by Django 2.2.13 on 2021-07-22 14:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('des', '0032_auto_20210713_2127'),
]
operations = [
migrations.AlterField(
model_name='astrometryjob',
name='status',
field=models.IntegerField(choices=[(1, 'Idle'), (2, 'Running'), (3, 'Completed'), (4, 'Failed'), (5, 'Aborted'), (6, 'Warning'), (7, 'Launched')], default=1, verbose_name='Status'),
),
]
| 0 | 418 | 23 |
605d26591aae91ae8eef7279efb53fd4073fdf69 | 1,994 | py | Python | unreleased/azure-keyvault/azure/keyvault/models/key_create_parameters.py | CharaD7/azure-sdk-for-python | 9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c | [
"MIT"
] | 1 | 2017-10-29T15:14:35.000Z | 2017-10-29T15:14:35.000Z | unreleased/azure-keyvault/azure/keyvault/models/key_create_parameters.py | Berryliao84/Python-Azure | a96ed6e8bbf4290372980a2919b31110da90b164 | [
"MIT"
] | null | null | null | unreleased/azure-keyvault/azure/keyvault/models/key_create_parameters.py | Berryliao84/Python-Azure | a96ed6e8bbf4290372980a2919b31110da90b164 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class KeyCreateParameters(Model):
"""The key create parameters.
:param kty: The type of key to create. Valid key types, see
JsonWebKeyType. Supported JsonWebKey key types (kty) for Elliptic Curve,
RSA, HSM, Octet. Possible values include: 'EC', 'RSA', 'RSA-HSM', 'oct'
:type kty: str or :class:`JsonWebKeyType
<azure.keyvault.models.JsonWebKeyType>`
:param key_size: The key size in bytes. e.g. 1024 or 2048.
:type key_size: int
:param key_ops:
:type key_ops: list of str or :class:`JsonWebKeyOperation
<azure.keyvault.models.JsonWebKeyOperation>`
:param key_attributes:
:type key_attributes: :class:`KeyAttributes
<azure.keyvault.models.KeyAttributes>`
:param tags: Application-specific metadata in the form of key-value pairs
:type tags: dict
"""
_validation = {
'kty': {'required': True, 'min_length': 1},
}
_attribute_map = {
'kty': {'key': 'kty', 'type': 'str'},
'key_size': {'key': 'key_size', 'type': 'int'},
'key_ops': {'key': 'key_ops', 'type': '[JsonWebKeyOperation]'},
'key_attributes': {'key': 'attributes', 'type': 'KeyAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
| 37.622642 | 89 | 0.607322 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class KeyCreateParameters(Model):
"""The key create parameters.
:param kty: The type of key to create. Valid key types, see
JsonWebKeyType. Supported JsonWebKey key types (kty) for Elliptic Curve,
RSA, HSM, Octet. Possible values include: 'EC', 'RSA', 'RSA-HSM', 'oct'
:type kty: str or :class:`JsonWebKeyType
<azure.keyvault.models.JsonWebKeyType>`
:param key_size: The key size in bytes. e.g. 1024 or 2048.
:type key_size: int
:param key_ops:
:type key_ops: list of str or :class:`JsonWebKeyOperation
<azure.keyvault.models.JsonWebKeyOperation>`
:param key_attributes:
:type key_attributes: :class:`KeyAttributes
<azure.keyvault.models.KeyAttributes>`
:param tags: Application-specific metadata in the form of key-value pairs
:type tags: dict
"""
_validation = {
'kty': {'required': True, 'min_length': 1},
}
_attribute_map = {
'kty': {'key': 'kty', 'type': 'str'},
'key_size': {'key': 'key_size', 'type': 'int'},
'key_ops': {'key': 'key_ops', 'type': '[JsonWebKeyOperation]'},
'key_attributes': {'key': 'attributes', 'type': 'KeyAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, kty, key_size=None, key_ops=None, key_attributes=None, tags=None):
self.kty = kty
self.key_size = key_size
self.key_ops = key_ops
self.key_attributes = key_attributes
self.tags = tags
| 221 | 0 | 27 |
5f9c3fde3437bdd5ca9486c84e0d914fc40c75bf | 12,044 | py | Python | surveyor/podman.py | paradise-fi/surveyor | 227a678b4f64bc79d44f77a3799bfc00a04ec9fa | [
"MIT"
] | null | null | null | surveyor/podman.py | paradise-fi/surveyor | 227a678b4f64bc79d44f77a3799bfc00a04ec9fa | [
"MIT"
] | null | null | null | surveyor/podman.py | paradise-fi/surveyor | 227a678b4f64bc79d44f77a3799bfc00a04ec9fa | [
"MIT"
] | null | null | null | import os
import json
import time
import subprocess
import contextlib
import dateutil.parser
import datetime
import logging
from tempfile import TemporaryDirectory
# See https://github.com/containers/podman/issues/10173
CGROUP_WORKAROUND = False
RUNTIME = "crun"
def invokePodmanCommandPoll(command, output):
"""
Invoke podman command and continuously output stdout and stderr via a callback
"""
command = podmanBaseCommand(command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(p.stdout.readline, b''):
output(line.decode("utf-8"))
output(p.stdout.read().decode("utf-8"))
exitcode = p.wait()
if exitcode != 0:
raise PodmanError(f"{' '.join(command)}")
def imageExists(name):
"""
Return if given image exists
"""
p = subprocess.run(["podman", "image", "exists", name],
capture_output=True)
return p.returncode == 0
def buildImage(dockerfile, tag, args, cpuLimit=None, memLimit=None, noCache=False, onOutput=None):
"""
Build image for given dockerfile (string). Return the logs of the build.
"""
with TemporaryDirectory() as d:
dockerfilePath = os.path.join(d, "Dockerfile")
with open(dockerfilePath, "w") as f:
f.write(dockerfile)
command = ["build", "-t", tag]
for k, v in args.items():
command.extend(["--build-arg", f"{k}={v}"])
if memLimit is not None:
command.extend(["--memory", str(memLimit)])
if cpuLimit is not None:
command.extend(["--cpu-period", "100000"])
command.extend(["--cpu-quota", str(100000 * cpuLimit)])
if noCache:
command.append("--no-cache")
# Use docker format to support extensions like SHELL
command.extend(["--format", "docker"])
command.extend(["-f", dockerfilePath])
command.append(d)
if onOutput is not None:
return invokePodmanCommandPoll(command, onOutput)
else:
return invokePodmanCommand(command)[0]
def createContainer(image, command, mounts=[], cpuLimit=None, memLimit=None,
cgroup=None, name=None):
"""
Create container, return its identifier
"""
podmanCmd = ["container", "create", "--runtime", RUNTIME]
for m in mounts:
podmanCmd.extend(["--mount", f"type=bind,src={m['source']},target={m['target']}"])
if cpuLimit is not None:
podmanCmd.extend(["--cpus", str(cpuLimit)])
if memLimit is not None:
podmanCmd.extend(["--memory", str(memLimit)])
podmanCmd.extend(["--memory-swap", str(memLimit)])
if cgroup is not None:
podmanCmd.extend(["--cgroup-parent", cgroup.path])
if name is not None:
podmanCmd.extend(["--name", name])
podmanCmd.append(image)
podmanCmd.extend(command)
if CGROUP_WORKAROUND:
r, w = os.pipe()
pid = os.fork()
if pid > 0:
os.close(w)
with os.fdopen(r) as r:
os.waitpid(pid, 0)
s = r.read()
return s.strip()
else:
os.close(r)
cgroup.addProcess(pid)
with os.fdopen(w, 'w') as w:
res = invokePodmanCommand(podmanCmd)[0]
w.write(res)
w.close()
os._exit(0)
else:
return invokePodmanCommand(podmanCmd)[0].strip()
def containerRunTime(inspection):
"""
Return container runtime in microseconds
"""
started = dateutil.parser.parse(inspection["State"]["StartedAt"])
finished = dateutil.parser.parse(inspection["State"]["FinishedAt"])
if datetime.datetime.timestamp(finished) < 0:
finished = datetime.datetime.now(datetime.timezone.utc)
delta = finished - started
return delta.seconds * 1000000 + delta.microseconds
def runAndWatch(container, cgroup, watchCgroup, notify=None, wallClockLimit=None,
cpuClockLimit=None, pollInterval=1, notifyInterval=10):
"""
Run a container and watch it for time limits. Returns a dictionary with
container statistics.
"""
inspection = inspectContainer(container)
command = ["container", "start", "--runtime", RUNTIME, container]
if CGROUP_WORKAROUND:
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
cgroup.addProcess(pid)
invokePodmanCommand(command)
os._exit(0)
else:
invokePodmanCommand(command)
timeout = False
ticks = 0
maxMemoryUsage = 0
while True:
time.sleep(pollInterval)
ticks += 1
if ticks % notifyInterval == 0 and notify is not None:
notify()
inspection = inspectContainer(container)
if containerStatus(inspection) != "running":
break
wTime = containerRunTime(inspection)
maxMemoryUsage = max(maxMemoryUsage, watchCgroup.currentMemoryUsage())
cTime = watchCgroup.cpuStats()["usage_usec"]
if wTime >= wallClockLimit * 1000000 or cTime >= cpuClockLimit * 1000000:
stopContainer(container, timeout=1)
timeout = True
inspection = inspectContainer(container)
stats = {
"cpuStat": watchCgroup.cpuStats(),
"memStat": watchCgroup.memoryStats(),
"maxMemory": maxMemoryUsage,
"wallTime": containerRunTime(inspection),
"exitCode": containerExitCode(inspection),
"outOfMemory": containerOomKilled(inspection),
"timeout": timeout,
"output": containerLogs(container)
}
return stats
| 33.455556 | 151 | 0.603454 | import os
import json
import time
import subprocess
import contextlib
import dateutil.parser
import datetime
import logging
from tempfile import TemporaryDirectory
# See https://github.com/containers/podman/issues/10173
CGROUP_WORKAROUND = False
RUNTIME = "crun"
class Cgroup:
def __init__(self, path=None):
self.path = path
self.dummyProc = None
def __str__(self):
return f"<Cgroup {self.path}>"
def occupy(self):
"""
Add a dummy process to the group, so it does not die
"""
self.dummyProc = subprocess.Popen(["sleep", "infinity"])
self.addProcess(self.dummyProc.pid)
def release(self):
"""
Release the group - kill the dummy process and
"""
if self.dummyProc is not None:
self.dummyProc.kill()
self.dummyProc.wait()
try:
os.rmdir(self.fsPath)
except:
# The group was already cleaned up
pass
@property
def fsPath(self):
return os.path.join("/sys/fs/cgroup/", self.path[1:])
def addProcess(self, pid):
"""
Add a process to the group
"""
with open(os.path.join(self.fsPath, "cgroup.procs"), "w") as f:
f.write(str(pid) + "\n")
def enableControllers(self, controllers):
try:
with open(os.path.join(self.fsPath, "cgroup.subtree_control"), "w") as f:
f.write(" ".join([f"+{x}" for x in controllers]))
except FileNotFoundError as e:
logging.info(f"Attempt: {os.path.join(self.fsPath, 'cgroup.subtree_control')}")
logging.info(f"Exception: {e}")
logging.info(f"Filename {e.filename}, {e.strerror}, {e.errno}")
logging.info(f"{group.fsPath}")
raise
@staticmethod
def processGroup():
"""
Get a handle for process cgroup
"""
with open(f"/proc/{os.getpid()}/cgroup") as f:
path = f.read().split("::")[1].strip()
return Cgroup(path)
@staticmethod
def createScope(scopeName):
"""
Use systemd to create a new user cgroup scope. Put the current process
into it under subgroup 'manager' (when the process is directly in the
scope, you cannot set cgroup.subtree_control). Return Cgroup object of
the scope.
"""
# Inspiration: https://unix.stackexchange.com/questions/525740/how-do-i-create-a-systemd-scope-for-an-already-existing-process-from-the-command
command = ["busctl", "call", "--user",
"org.freedesktop.systemd1", "/org/freedesktop/systemd1",
"org.freedesktop.systemd1.Manager", "StartTransientUnit",
"ssa(sv)a(sa(sv))", scopeName + ".scope",
"fail", "4", "PIDs", "au", "1",
str(os.getpid()), "Delegate", "b", "1",
"MemoryAccounting", "b", "1", "CPUAccounting", "b", "1",
"0"]
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if p.returncode != 0:
raise RuntimeError(p.stdout.decode("utf-8"))
with open(f"/proc/{os.getpid()}/cgroup") as f:
path = f.read().split("::")[1].strip()
cgroup = Cgroup(path)
managerPath = os.path.join(cgroup.path, "manager")
managerDirPath = os.path.join(cgroup.fsPath, "manager")
os.mkdir(managerDirPath)
managerGroup = Cgroup(path=managerPath)
managerGroup.addProcess(os.getpid())
return cgroup
@contextlib.contextmanager
def newGroup(self, name, controllers=["cpu", "memory"]):
"""
Context manager for creating new sub-groups.
"""
groupPath = os.path.join(self.path, name)
dirPath = os.path.join(self.fsPath, name)
try:
os.mkdir(dirPath)
group = Cgroup(path=groupPath)
group.enableControllers(controllers)
yield group
finally:
group.release()
def moveIntoSubgroup(self, name):
"""
Move current process into a subgroup
"""
subPath = os.path.join(self.path, name)
subDirPath = os.path.join(self.fsPath, name)
os.mkdir(subDirPath)
subGroup = Cgroup(path=subPath)
subGroup.addProcess(os.getpid())
return subGroup
def _readGroupfile(self, filename):
with open(os.path.join(self.fsPath, filename)) as f:
lines = f.readlines()
d = {}
for l in lines:
s = [x.strip() for x in l.split()]
d[s[0]] = s[1]
return d
def cpuStats(self):
s = self._readGroupfile("cpu.stat")
return {k: int(v) for k, v in s.items()}
def memoryStats(self):
s = self._readGroupfile("memory.stat")
return {k: int(v) for k, v in s.items()}
def currentMemoryUsage(self):
with open(os.path.join(self.fsPath, "memory.current")) as f:
return int(f.read())
class PodmanError(RuntimeError):
def __init__(self, message, log):
super().__init__(message)
self.log = log
def podmanBaseCommand(command):
return ["podman", "--cgroup-manager", "cgroupfs", "--log-level", "error"] + command
def invokePodmanCommand(command, **kwargs):
command = podmanBaseCommand(command)
p = subprocess.run(command, capture_output=True, **kwargs)
stdout = p.stdout.decode("utf-8")
stderr = p.stderr.decode("utf-8")
if p.returncode != 0:
raise PodmanError(f"{' '.join(command)}", stdout + "\n" + stderr)
return stdout, stderr
def invokePodmanCommandPoll(command, output):
"""
Invoke podman command and continuously output stdout and stderr via a callback
"""
command = podmanBaseCommand(command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(p.stdout.readline, b''):
output(line.decode("utf-8"))
output(p.stdout.read().decode("utf-8"))
exitcode = p.wait()
if exitcode != 0:
raise PodmanError(f"{' '.join(command)}")
def imageExists(name):
"""
Return if given image exists
"""
p = subprocess.run(["podman", "image", "exists", name],
capture_output=True)
return p.returncode == 0
def containerExists(name):
p = subprocess.run(["podman", "container", "exists", name],
capture_output=True)
return p.returncode == 0
def buildImage(dockerfile, tag, args, cpuLimit=None, memLimit=None, noCache=False, onOutput=None):
"""
Build image for given dockerfile (string). Return the logs of the build.
"""
with TemporaryDirectory() as d:
dockerfilePath = os.path.join(d, "Dockerfile")
with open(dockerfilePath, "w") as f:
f.write(dockerfile)
command = ["build", "-t", tag]
for k, v in args.items():
command.extend(["--build-arg", f"{k}={v}"])
if memLimit is not None:
command.extend(["--memory", str(memLimit)])
if cpuLimit is not None:
command.extend(["--cpu-period", "100000"])
command.extend(["--cpu-quota", str(100000 * cpuLimit)])
if noCache:
command.append("--no-cache")
# Use docker format to support extensions like SHELL
command.extend(["--format", "docker"])
command.extend(["-f", dockerfilePath])
command.append(d)
if onOutput is not None:
return invokePodmanCommandPoll(command, onOutput)
else:
return invokePodmanCommand(command)[0]
def createContainer(image, command, mounts=[], cpuLimit=None, memLimit=None,
cgroup=None, name=None):
"""
Create container, return its identifier
"""
podmanCmd = ["container", "create", "--runtime", RUNTIME]
for m in mounts:
podmanCmd.extend(["--mount", f"type=bind,src={m['source']},target={m['target']}"])
if cpuLimit is not None:
podmanCmd.extend(["--cpus", str(cpuLimit)])
if memLimit is not None:
podmanCmd.extend(["--memory", str(memLimit)])
podmanCmd.extend(["--memory-swap", str(memLimit)])
if cgroup is not None:
podmanCmd.extend(["--cgroup-parent", cgroup.path])
if name is not None:
podmanCmd.extend(["--name", name])
podmanCmd.append(image)
podmanCmd.extend(command)
if CGROUP_WORKAROUND:
r, w = os.pipe()
pid = os.fork()
if pid > 0:
os.close(w)
with os.fdopen(r) as r:
os.waitpid(pid, 0)
s = r.read()
return s.strip()
else:
os.close(r)
cgroup.addProcess(pid)
with os.fdopen(w, 'w') as w:
res = invokePodmanCommand(podmanCmd)[0]
w.write(res)
w.close()
os._exit(0)
else:
return invokePodmanCommand(podmanCmd)[0].strip()
def inspectContainer(container):
command = ["inspect", container]
return json.loads(invokePodmanCommand(command)[0])[0]
def containerRunTime(inspection):
"""
Return container runtime in microseconds
"""
started = dateutil.parser.parse(inspection["State"]["StartedAt"])
finished = dateutil.parser.parse(inspection["State"]["FinishedAt"])
if datetime.datetime.timestamp(finished) < 0:
finished = datetime.datetime.now(datetime.timezone.utc)
delta = finished - started
return delta.seconds * 1000000 + delta.microseconds
def containerStatus(inspection):
return inspection["State"]["Status"]
def containerExitCode(inspection):
return inspection["State"]["ExitCode"]
def containerOomKilled(inspection):
return inspection["State"]["OOMKilled"]
def containerCgroup(inspection):
name = inspection["HostConfig"]["CgroupParent"]
return Cgroup(path=name)
def stopContainer(container, timeout=None):
command = ["stop", container]
if timeout is not None:
command.extend(["--timeout", str(timeout)])
return invokePodmanCommand(command)[0]
def removeContainer(container):
command = ["container", "rm", "-f", container]
return invokePodmanCommand(command)[0]
def containerLogs(container):
command = ["logs", container]
return invokePodmanCommand(command)[0]
def runAndWatch(container, cgroup, watchCgroup, notify=None, wallClockLimit=None,
cpuClockLimit=None, pollInterval=1, notifyInterval=10):
"""
Run a container and watch it for time limits. Returns a dictionary with
container statistics.
"""
inspection = inspectContainer(container)
command = ["container", "start", "--runtime", RUNTIME, container]
if CGROUP_WORKAROUND:
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
cgroup.addProcess(pid)
invokePodmanCommand(command)
os._exit(0)
else:
invokePodmanCommand(command)
timeout = False
ticks = 0
maxMemoryUsage = 0
while True:
time.sleep(pollInterval)
ticks += 1
if ticks % notifyInterval == 0 and notify is not None:
notify()
inspection = inspectContainer(container)
if containerStatus(inspection) != "running":
break
wTime = containerRunTime(inspection)
maxMemoryUsage = max(maxMemoryUsage, watchCgroup.currentMemoryUsage())
cTime = watchCgroup.cpuStats()["usage_usec"]
if wTime >= wallClockLimit * 1000000 or cTime >= cpuClockLimit * 1000000:
stopContainer(container, timeout=1)
timeout = True
inspection = inspectContainer(container)
stats = {
"cpuStat": watchCgroup.cpuStats(),
"memStat": watchCgroup.memoryStats(),
"maxMemory": maxMemoryUsage,
"wallTime": containerRunTime(inspection),
"exitCode": containerExitCode(inspection),
"outOfMemory": containerOomKilled(inspection),
"timeout": timeout,
"output": containerLogs(container)
}
return stats
| 2,547 | 3,530 | 325 |
5b030e5d90f4f4e54a1b5cf396607fe4b37dbf22 | 809 | py | Python | striped/common/importer.py | ivmfnal/striped | eef1a4d544fa1b97fde39d7ee5ef779071218891 | [
"BSD-3-Clause"
] | 1 | 2019-07-01T15:19:43.000Z | 2019-07-01T15:19:43.000Z | striped/common/importer.py | ivmfnal/striped | eef1a4d544fa1b97fde39d7ee5ef779071218891 | [
"BSD-3-Clause"
] | null | null | null | striped/common/importer.py | ivmfnal/striped | eef1a4d544fa1b97fde39d7ee5ef779071218891 | [
"BSD-3-Clause"
] | 1 | 2020-04-21T21:18:01.000Z | 2020-04-21T21:18:01.000Z | import sys, time, os
| 35.173913 | 95 | 0.520396 | import sys, time, os
def import_from_text(module_text, names, module_name=None, tmp="/tmp"):
saved_path = sys.path[:]
imported = None
try:
if not tmp in sys.path:
sys.path.insert(0, tmp)
module_name = module_name or "m_%s_%d" % (os.getpid(), int(time.time()*1000.0))
assert not "/" in module_name
module_file = "%s/%s.py" % (tmp, module_name)
open(module_file, "w").write(module_text)
imported = __import__(module_name, {}, {}, names)
try:
os.unlink(module_file)
os.unlink(module_file+"c")
except:
pass
finally:
sys.path = saved_path
del sys.modules[module_name]
return imported
| 765 | 0 | 23 |
cc30f6cf29ac186a3c2f042722711e0d22c572f4 | 1,216 | py | Python | script_2_inverse_optimization_step_different_costs.py | sallen7/IO_GNEP | e6f207113c857690a1d6d7b68673dba09a2dcf2a | [
"BSD-3-Clause"
] | 2 | 2021-02-18T23:48:02.000Z | 2021-10-11T09:04:09.000Z | script_2_inverse_optimization_step_different_costs.py | sallen7/IO_GNEP | e6f207113c857690a1d6d7b68673dba09a2dcf2a | [
"BSD-3-Clause"
] | null | null | null | script_2_inverse_optimization_step_different_costs.py | sallen7/IO_GNEP | e6f207113c857690a1d6d7b68673dba09a2dcf2a | [
"BSD-3-Clause"
] | null | null | null | ########## Script 2: Different Costs ###################
import sys
from RK_IO_model import RK_IO_methods
from Generalized_RK_Framework import generalized_RK_framework
import pdb #for debugging
import numpy as np
import pyomo.environ as pyo
from pyomo.opt import SolverFactory
from pyomo.opt import SolverStatus, TerminationCondition
import pyomo.mpec as pyompec #for the complementarity
import math
from scipy.io import savemat, loadmat
import pandas
import time
import matplotlib.pyplot as plt
import pickle
############### Step 1: Importing the class object ######################
#https://www.datacamp.com/community/tutorials/pickle-python-tutorial
file_to_be_read = open("class_object_1","rb")
generalized_framework_object = pickle.load(file_to_be_read)
file_to_be_read.close()
############# Step 2: Inverse Optimization Step #####################
generalized_framework_object.running_IO_code_to_obtain_costs_different_costs()
########### Step 3: Saving the Object Again ###################
#https://www.datacamp.com/community/tutorials/pickle-python-tutorial
name_of_file = "class_object_2"
test = open(name_of_file,'wb')
pickle.dump(generalized_framework_object,test)
test.close()
| 33.777778 | 78 | 0.728618 | ########## Script 2: Different Costs ###################
import sys
from RK_IO_model import RK_IO_methods
from Generalized_RK_Framework import generalized_RK_framework
import pdb #for debugging
import numpy as np
import pyomo.environ as pyo
from pyomo.opt import SolverFactory
from pyomo.opt import SolverStatus, TerminationCondition
import pyomo.mpec as pyompec #for the complementarity
import math
from scipy.io import savemat, loadmat
import pandas
import time
import matplotlib.pyplot as plt
import pickle
############### Step 1: Importing the class object ######################
#https://www.datacamp.com/community/tutorials/pickle-python-tutorial
file_to_be_read = open("class_object_1","rb")
generalized_framework_object = pickle.load(file_to_be_read)
file_to_be_read.close()
############# Step 2: Inverse Optimization Step #####################
generalized_framework_object.running_IO_code_to_obtain_costs_different_costs()
########### Step 3: Saving the Object Again ###################
#https://www.datacamp.com/community/tutorials/pickle-python-tutorial
name_of_file = "class_object_2"
test = open(name_of_file,'wb')
pickle.dump(generalized_framework_object,test)
test.close()
| 0 | 0 | 0 |
cd022bc18d1e82e11d9448620c3bdf9ea96f5329 | 722 | py | Python | snake_game/sprites/game_over.py | carlosjasso/pygame-snake | 417f3515b2343a9ed3a1e39f7ca22159faaf3731 | [
"Unlicense"
] | null | null | null | snake_game/sprites/game_over.py | carlosjasso/pygame-snake | 417f3515b2343a9ed3a1e39f7ca22159faaf3731 | [
"Unlicense"
] | null | null | null | snake_game/sprites/game_over.py | carlosjasso/pygame-snake | 417f3515b2343a9ed3a1e39f7ca22159faaf3731 | [
"Unlicense"
] | null | null | null | import pygame
from pygame.font import Font
from sprites import Sprite
from utils.types import Color, WindowSize, SpritePosition
| 32.818182 | 77 | 0.635734 | import pygame
from pygame.font import Font
from sprites import Sprite
from utils.types import Color, WindowSize, SpritePosition
class GameOver(Sprite):
_font : Font
_field : WindowSize
@property
def font_color(self):
return Color(0, 0, 0) # HEX color #ffffff
def __init__(self, window_size: WindowSize) -> None:
super().__init__(None)
self._field = window_size
self._font = pygame.font.SysFont("arial", 180, bold=True)
self.surface = self._font.render("GAME OVER", True, self.font_color)
self.position = SpritePosition(
X = (window_size.WIDTH - self.width) / 2,
Y = (window_size.HEIGHT - self.height) / 2)
| 448 | 118 | 24 |
f3afc6269851ae3c48dc3f2cc3108ca06286db59 | 2,044 | py | Python | apps/mancave_lights.py | jougs/yaada | 5ec13e2b3828e772d4b3ca2827152d51788e7b81 | [
"MIT"
] | null | null | null | apps/mancave_lights.py | jougs/yaada | 5ec13e2b3828e772d4b3ca2827152d51788e7b81 | [
"MIT"
] | null | null | null | apps/mancave_lights.py | jougs/yaada | 5ec13e2b3828e772d4b3ca2827152d51788e7b81 | [
"MIT"
] | 1 | 2021-10-01T13:58:22.000Z | 2021-10-01T13:58:22.000Z | from scene_manager import SceneManager
| 39.307692 | 108 | 0.521037 | from scene_manager import SceneManager
class MancaveLights(SceneManager):
area = "Mancave"
lights = {
"light.mancave_ceiling_long_wall": {"state": "on", "brightness": 255},
"light.mancave_ceiling_short_wall": {"state": "on", "brightness": 255},
"light.mancave_corner_left": {"state": "on", "brightness": 255},
"light.mancave_corner_right": {"state": "on", "brightness": 255},
"light.mancave_boulder": {"state": "on", "brightness": 255},
"light.mancave_base_camp": {"state": "on", "brightness": 255},
"light.mancave_death_zone": {"state": "on", "brightness": 255},
"light.mancave_table": {"state": "on", "brightness": 255},
}
scenes = {
"ambient": {
'name': 'ambient',
'icon': 'mdi:weather-night',
'replaces': [],
'lights': {}
},
"rehearsal": {
'name': 'rehearsal',
'icon': 'mdi:guitar-electric',
'replaces': [],
'lights': {
"light.mancave_corner_left": {"state": "on", "brightness": 255, "rgbw_color": (0,0,255,0)},
"light.mancave_corner_right": {"state": "on", "brightness": 255, "rgbw_color": (0,255,0,0)},
"light.mancave_table": {"state": "on", "brightness": 72},
"light.mancave_death_zone": {"state": "on", "brightness": 255, "rgbw_color": (255,0,0,0)},
}
},
"climbing": {
'name': 'climbing',
'icon': 'mdi:carabiner',
'replaces': [],
'lights': {
"light.mancave_ceiling_long_wall": {"state": "on", "brightness": 255},
"light.mancave_ceiling_short_wall": {"state": "on", "brightness": 255},
"light.mancave_death_zone": {"state": "on", "brightness": 255, "rgbw_color": (255,0,0,255)},
}
},
}
buttons = {
"mancave_hallway_door_right_short": "rehearsal",
"mancave_hallway_door_right_long": "climbing",
}
| 0 | 1,982 | 23 |
63ba86a5e46cd7da257f1283982d4260af4086a9 | 1,330 | py | Python | bn_rpc.py | usc-isi-bass/bn_rpc | 7f896327cc8b28e0dc181989440b173429df720e | [
"MIT"
] | null | null | null | bn_rpc.py | usc-isi-bass/bn_rpc | 7f896327cc8b28e0dc181989440b173429df720e | [
"MIT"
] | null | null | null | bn_rpc.py | usc-isi-bass/bn_rpc | 7f896327cc8b28e0dc181989440b173429df720e | [
"MIT"
] | null | null | null | import json
import os
import socket
import sys
if len(sys.argv) != 2:
print("Usage: python bn_rpyc.py <script>")
exit(1)
script = sys.argv[1]
if os.path.exists(script):
script = os.path.abspath(script)
else:
print("Can't find: %s" % script)
exit(1)
py3 = sys.version_info[0] >= 3
if not py3:
input = raw_input
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(os.path.expanduser('~/.bn_rpc.sock'))
if py3:
sin = s.makefile('r', buffering=1, encoding='utf8')
else:
sin = s.makefile('r', bufsize=1)
done = False
while True:
m = recv()
cmd = m['cmd']
if cmd == 'prompt':
if done:
s.shutdown(socket.SHUT_RDWR)
break
prompt = m['prompt']
try:
line = "exec(open(\"%s\").read())" % script
send('input', text=line + '\n')
except KeyboardInterrupt:
send('reset')
except EOFError:
s.shutdown(socket.SHUT_RDWR)
break
done = True
elif cmd == 'print':
print(m['text'].rstrip('\n'))
elif cmd == 'exit':
break
print
| 21.111111 | 55 | 0.554135 | import json
import os
import socket
import sys
if len(sys.argv) != 2:
print("Usage: python bn_rpyc.py <script>")
exit(1)
script = sys.argv[1]
if os.path.exists(script):
script = os.path.abspath(script)
else:
print("Can't find: %s" % script)
exit(1)
py3 = sys.version_info[0] >= 3
if not py3:
input = raw_input
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(os.path.expanduser('~/.bn_rpc.sock'))
if py3:
sin = s.makefile('r', buffering=1, encoding='utf8')
else:
sin = s.makefile('r', bufsize=1)
def send(cmd, **m):
m['cmd'] = cmd
s.send((json.dumps(m) + '\n').encode('utf8'))
def recv():
line = sin.readline()
if not py3:
line = line.decode('utf8')
return json.loads(line)
done = False
while True:
m = recv()
cmd = m['cmd']
if cmd == 'prompt':
if done:
s.shutdown(socket.SHUT_RDWR)
break
prompt = m['prompt']
try:
line = "exec(open(\"%s\").read())" % script
send('input', text=line + '\n')
except KeyboardInterrupt:
send('reset')
except EOFError:
s.shutdown(socket.SHUT_RDWR)
break
done = True
elif cmd == 'print':
print(m['text'].rstrip('\n'))
elif cmd == 'exit':
break
print
| 162 | 0 | 46 |
657662e3cf82f5f63c82a7a653c0337660555931 | 1,098 | py | Python | server.py | maneeshd/todo-list | e8576c121f1e60d58046812d76cd8cb944d8ad59 | [
"MIT"
] | null | null | null | server.py | maneeshd/todo-list | e8576c121f1e60d58046812d76cd8cb944d8ad59 | [
"MIT"
] | 4 | 2021-03-10T03:50:38.000Z | 2022-02-18T18:07:12.000Z | server.py | maneeshd/todo-list | e8576c121f1e60d58046812d76cd8cb944d8ad59 | [
"MIT"
] | null | null | null | # ASGI server using starlette & uvicorn
from starlette.applications import Starlette
from starlette.responses import FileResponse
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.staticfiles import StaticFiles
from os import path, getenv
from uvicorn import run as serve_app
CUR_DIR = path.realpath(path.dirname(__file__))
app = Starlette(debug=True, middleware=[Middleware(GZipMiddleware)])
app.mount("/static", StaticFiles(directory="dist/static", html=True), name="static")
@app.route("/favicon.ico", methods=["GET"])
@app.route("/", methods=["GET"])
if __name__ == "__main__":
serve_app(app, host="127.0.0.1", port=getenv("PORT", 8080))
| 28.153846 | 84 | 0.710383 | # ASGI server using starlette & uvicorn
from starlette.applications import Starlette
from starlette.responses import FileResponse
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.staticfiles import StaticFiles
from os import path, getenv
from uvicorn import run as serve_app
CUR_DIR = path.realpath(path.dirname(__file__))
app = Starlette(debug=True, middleware=[Middleware(GZipMiddleware)])
app.mount("/static", StaticFiles(directory="dist/static", html=True), name="static")
@app.route("/favicon.ico", methods=["GET"])
async def favicon(request):
return FileResponse(
path="dist/favicon.ico",
headers={"Content-Type": "image/x-icon"},
media_type="image/x-icon"
)
@app.route("/", methods=["GET"])
async def homepage(request):
return FileResponse(
path.join(CUR_DIR, "dist", "index.html"),
media_type="text/html",
headers={"Content-Type": "text/html;charset=UTF-8"},
)
if __name__ == "__main__":
serve_app(app, host="127.0.0.1", port=getenv("PORT", 8080))
| 335 | 0 | 44 |
8510c56d0874d2ec6ff8f75f20dd9376177e88ae | 6,879 | py | Python | datagristle/file_io.py | sidhu177/DataGristle | d9dd383e146c13a2a9a8cd265330122a9c565609 | [
"BSD-3-Clause"
] | null | null | null | datagristle/file_io.py | sidhu177/DataGristle | d9dd383e146c13a2a9a8cd265330122a9c565609 | [
"BSD-3-Clause"
] | null | null | null | datagristle/file_io.py | sidhu177/DataGristle | d9dd383e146c13a2a9a8cd265330122a9c565609 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
""" Contains standard io reading & writing code
See the file "LICENSE" for the full license governing this code.
Copyright 2017 Ken Farmer
"""
import os
import sys
import csv
import io
import random
import errno
from os.path import isfile
from pprint import pprint
from typing import Union, Dict, List, Tuple, Any, Optional
from pprint import pprint as pp
import datagristle.csvhelper as csvhelper
import datagristle.file_type as file_type
class OutputHandler(object):
""" Handles all aspects of writing to output files: opening file,
writing records, managing random writes, keeping counts,
closing the file, etc.
"""
def write_rec(self,
record: List[str]) -> None:
""" Write a record to output.
If silent arg was provided, then write is suppressed.
If randomout arg was provided, then randomly determine
whether or not to write record.
"""
if self.dry_run:
return
if self.random_out != 1.0:
if random.random() > self.random_out:
return
try:
self.writer.writerow(record)
except csv.Error:
print('Invalid record: %s' % record)
raise
| 30.986486 | 110 | 0.565635 | #!/usr/bin/env python
""" Contains standard io reading & writing code
See the file "LICENSE" for the full license governing this code.
Copyright 2017 Ken Farmer
"""
import os
import sys
import csv
import io
import random
import errno
from os.path import isfile
from pprint import pprint
from typing import Union, Dict, List, Tuple, Any, Optional
from pprint import pprint as pp
import datagristle.csvhelper as csvhelper
import datagristle.file_type as file_type
class InputHandler(object):
def __init__(self,
files: List[str],
delimiter: Optional[str],
quoting: Optional[str],
quotechar: Optional[str],
has_header: Optional[bool]) -> None:
self.files = files
self.files_read = 0
self.rec_cnt = 0
self.curr_file_rec_cnt = 0
self.infile = None
self.dialect = self._get_dialect(files,
delimiter,
quoting,
quotechar,
has_header)
self._open_next_input_file()
def _open_next_input_file(self):
if self.files[0] == '-' and self.files_read == 0:
if os.isatty(0): # checks if data was pipped into stdin
#raise ValueError, "No files or stdin provided"
sys.exit(errno.ENODATA)
input_stream = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8', newline='')
self.csv_reader = csv.reader(input_stream, dialect=self.dialect)
self.infile = sys.stdin
self.files_read = 1
self.curr_file_rec_cnt = 1
elif self.files_read < len(self.files):
self.infile = open(self.files[self.files_read - 1], 'rt', newline='', encoding='utf-8')
self.csv_reader = csv.reader(self.infile, dialect=self.dialect)
self.files_read += 1
self.curr_file_rec_cnt = 1
else:
raise StopIteration
def _get_dialect(self,
infiles: List[str],
delimiter: Optional[str],
quoting: Optional[str],
quotechar: Optional[str],
has_header: Optional[bool]) -> csvhelper.Dialect:
def overrider(dialect):
dialect.delimiter = delimiter or dialect.delimiter
if quoting:
dialect.quoting = file_type.get_quote_number(quoting) if quoting else dialect.quoting
elif dialect.quoting:
pass
else:
dialect.quoting = file_type.get_quote_number('quote_none')
dialect.quotechar = quotechar or dialect.quotechar
try:
dialect.has_header = has_header if has_header is not None else dialect.has_header
except AttributeError:
dialect.has_header = False
dialect.lineterminator = '\n'
return dialect
if infiles[0] == '-':
dialect = overrider(csvhelper.Dialect)
else:
for infile in infiles:
my_file = file_type.FileTyper(infile)
try:
dialect = my_file.analyze_file()
dialect = overrider(dialect)
break
except file_type.IOErrorEmptyFile:
continue
else:
raise EOFError
return dialect
# correct behavior?
# if has_header - generally ignore header in processing:
# slicer
# freaker
# determinator
# differ
# if has_header - then record counts generally start at 2nd record
# if has_header - then ignore header in each file if multiple files
# if has_header - then copy it on output when output is also a csv:
# slicer? not unless we have an output-header column?
# differ? not unless we have a output-header column?
def __iter__(self):
return self
def __next__(self):
""" Returns the next input record. Can handle data piped in as well
as multiple input files. All data is assumed to be csv files.
"""
while True:
try:
rec = self._read_next_rec()
return rec
except StopIteration: # end of file, loop around and get another
self.infile.close()
self._open_next_input_file() # will raise StopIteration if out of files
def _read_next_rec(self):
if self.curr_file_rec_cnt == 0 and self.dialect.has_header:
self.header = self.csv_reader.__next__()
rec = self.csv_reader.__next__()
self.rec_cnt += 1
self.curr_file_rec_cnt += 1
return rec
def close(self):
if self.files[0] != '-' and self.infile:
self.infile.close()
class OutputHandler(object):
""" Handles all aspects of writing to output files: opening file,
writing records, managing random writes, keeping counts,
closing the file, etc.
"""
def __init__(self,
output_filename: str,
dialect: csvhelper.Dialect,
default_output = sys.stdout,
dry_run: bool = False,
random_out: float = 1.0):
assert default_output in (sys.stdout, sys.stderr), "invalid default_output: {}".format(default_output)
assert 0.0 <= random_out <= 1.0
self.output_filename = output_filename
self.dry_run = dry_run
self.random_out = random_out
self.dialect = dialect
if self.output_filename == '-':
self.outfile = default_output
else:
self.outfile = open(output_filename, "wt", encoding='utf-8')
if dialect:
self.writer = csv.writer(self.outfile, dialect=dialect)
else:
self.writer = None
def write_rec(self,
record: List[str]) -> None:
""" Write a record to output.
If silent arg was provided, then write is suppressed.
If randomout arg was provided, then randomly determine
whether or not to write record.
"""
if self.dry_run:
return
if self.random_out != 1.0:
if random.random() > self.random_out:
return
try:
self.writer.writerow(record)
except csv.Error:
print('Invalid record: %s' % record)
raise
def write_csv_rec(self,
record: List[str]) -> None:
self.write_rec(record)
def write_text_rec(self,
record: str) -> None:
self.outfile.write(record)
def close(self):
if self.output_filename != '-':
self.outfile.close()
| 4,335 | 1,124 | 131 |
4cc703d7689c7cc6539c5461d3ef646de5443e09 | 1,609 | py | Python | src/apps/calendar/schema.py | creimers/graphene-advent | 0160ec9b02a73679825a3a57b9b373e0b071c6d7 | [
"MIT"
] | null | null | null | src/apps/calendar/schema.py | creimers/graphene-advent | 0160ec9b02a73679825a3a57b9b373e0b071c6d7 | [
"MIT"
] | null | null | null | src/apps/calendar/schema.py | creimers/graphene-advent | 0160ec9b02a73679825a3a57b9b373e0b071c6d7 | [
"MIT"
] | null | null | null | from graphene import relay, AbstractType, String
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from .models import Calendar, Day
class CalendarNode(DjangoObjectType):
"""
how does this work?
"""
class CalendarQuery(AbstractType):
"""
how does this work?
"""
calendar = relay.Node.Field(CalendarNode)
calendars = DjangoFilterConnectionField(CalendarNode)
class DayNode(DjangoObjectType):
"""
how does this work?
"""
image_large_url = String()
image_small_url = String()
def resolve_image_large_url(self, args, context, info):
"""
self is the Day instance
"""
return DayNode.get_absolute_image_url(
context, self.get_image_large_url()
)
def resolve_image_small_url(self, args, context, info):
"""
self is the Day instance
"""
return DayNode.get_absolute_image_url(
context, self.get_image_small_url()
)
class DayQuery(AbstractType):
"""
how does this work?
"""
day = relay.Node.Field(DayNode)
days = DjangoFilterConnectionField(DayNode)
| 24.378788 | 73 | 0.633313 | from graphene import relay, AbstractType, String
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from .models import Calendar, Day
class CalendarNode(DjangoObjectType):
"""
how does this work?
"""
class Meta:
model = Calendar
filter_fields = {
'uuid': ['exact', ]
}
interfaces = (relay.Node, )
class CalendarQuery(AbstractType):
"""
how does this work?
"""
calendar = relay.Node.Field(CalendarNode)
calendars = DjangoFilterConnectionField(CalendarNode)
class DayNode(DjangoObjectType):
"""
how does this work?
"""
class Meta:
model = Day
interfaces = (relay.Node, )
exclude_fields = ('image', 'image_small', 'image_large')
image_large_url = String()
image_small_url = String()
def resolve_image_large_url(self, args, context, info):
"""
self is the Day instance
"""
return DayNode.get_absolute_image_url(
context, self.get_image_large_url()
)
def resolve_image_small_url(self, args, context, info):
"""
self is the Day instance
"""
return DayNode.get_absolute_image_url(
context, self.get_image_small_url()
)
def get_absolute_image_url(context, relative_url):
return context.scheme + '://' + context.get_host() + relative_url
class DayQuery(AbstractType):
"""
how does this work?
"""
day = relay.Node.Field(DayNode)
days = DjangoFilterConnectionField(DayNode)
| 103 | 234 | 79 |
b0b855e6c97c9f7a10c53d2de21ef3f09b751c17 | 12,710 | py | Python | scholar_scraper.py | albornet/ntds_2019_team_32 | c5cff5ca40e9894287f30e02f74da84952ff7d6b | [
"MIT"
] | null | null | null | scholar_scraper.py | albornet/ntds_2019_team_32 | c5cff5ca40e9894287f30e02f74da84952ff7d6b | [
"MIT"
] | null | null | null | scholar_scraper.py | albornet/ntds_2019_team_32 | c5cff5ca40e9894287f30e02f74da84952ff7d6b | [
"MIT"
] | null | null | null | import random
import time
import re
from urllib.request import Request, urlopen
from urllib.error import HTTPError
from unidecode import unidecode
from nordvpn_randomizer import logIn, chooseRandom, getCountries
# Used for some scientists in the list that have special characters in their names
translator = {'á': ('%C3%A1', '=aacute='), #, '\xc3\xa1'),
'é': ('%C3%A9', '=eacute='), #, '\xc3\xa9'),
'í': ('%C3%AD', '=iacute='), #, '\xc3\xad'),
'ó': ('%C3%B3', '=oacute='), #, '\xc3\xb3'),
'ú': ('%C3%BA', '=uacute='), #, '\xc3\xba'),
'ý': ('%C3%BD', '=yacute='), #, '\xc3\xbd'),
'è': ('%C3%A8', '=egrave='), #, '\xc3\xa8'),
'ò': ('%C3%B2', '=ograve='), #, '\xc3\xb2'),
'ê': ('%C3%AA', '=ecirc=' ), #, '\xc3\xaa'),
'ô': ('%C3%B4', '=ocirc=' ), #, '\xc3\xb4'),
'ä': ('%C3%A4', '=auml=' ), #, '\xc3\xa4'),
'ë': ('%C3%AB', '=euml=' ), #, '\xc3\xab'),
'ï': ('%C3%AF', '=iuml=' ), #, '\xc3\xaf'),
'ö': ('%C3%B6', '=ouml=' ), #, '\xc3\xb6'),
'ü': ('%C3%BC', '=uuml=' ), #, '\xc3\xbc'),
'ã': ('%C3%A3', '=atilde='), #, '\xc3\xa3'),
'õ': ('%C3%B5', '=otilde='), #, '\xc3\xb5'),
'ñ': ('%C3%B1', '=ntilde=')} #, '\xc3\xb1')}
# List of different "user agents" (mimics browser usage)
user_agents = ['Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:71.0) Gecko/20100101 Firefox/71.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Safari/605.1.15',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; rv:68.0) Gecko/20100101 Firefox/68.0 Firefox 68.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:70.0) Gecko/20100101 Firefox/70.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36']
# Initialize some useful variables
user_agent = user_agents[0] # Initialize user agent (changed if human/robot test)
delays = [7, 4, 6, 2, 10, 19] # Random delays between url requests, to avoid overloading google servers (and avoid human/robot test)
countries = getCountries()[1:] # List of countries for different IPs (changed if human/robot test), using NordVPN
countries[0] = countries[0][1:] # The first country (Albania) starts with a weird character
# Get number of citations per each year, for any scientist featured on google scholar
# Try to find scientific has a dedicated scholar publication page
# Make sure names with special characters can be found
| 51.877551 | 141 | 0.600079 | import random
import time
import re
from urllib.request import Request, urlopen
from urllib.error import HTTPError
from unidecode import unidecode
from nordvpn_randomizer import logIn, chooseRandom, getCountries
# Used for some scientists in the list that have special characters in their names
translator = {'á': ('%C3%A1', '=aacute='), #, '\xc3\xa1'),
'é': ('%C3%A9', '=eacute='), #, '\xc3\xa9'),
'í': ('%C3%AD', '=iacute='), #, '\xc3\xad'),
'ó': ('%C3%B3', '=oacute='), #, '\xc3\xb3'),
'ú': ('%C3%BA', '=uacute='), #, '\xc3\xba'),
'ý': ('%C3%BD', '=yacute='), #, '\xc3\xbd'),
'è': ('%C3%A8', '=egrave='), #, '\xc3\xa8'),
'ò': ('%C3%B2', '=ograve='), #, '\xc3\xb2'),
'ê': ('%C3%AA', '=ecirc=' ), #, '\xc3\xaa'),
'ô': ('%C3%B4', '=ocirc=' ), #, '\xc3\xb4'),
'ä': ('%C3%A4', '=auml=' ), #, '\xc3\xa4'),
'ë': ('%C3%AB', '=euml=' ), #, '\xc3\xab'),
'ï': ('%C3%AF', '=iuml=' ), #, '\xc3\xaf'),
'ö': ('%C3%B6', '=ouml=' ), #, '\xc3\xb6'),
'ü': ('%C3%BC', '=uuml=' ), #, '\xc3\xbc'),
'ã': ('%C3%A3', '=atilde='), #, '\xc3\xa3'),
'õ': ('%C3%B5', '=otilde='), #, '\xc3\xb5'),
'ñ': ('%C3%B1', '=ntilde=')} #, '\xc3\xb1')}
# List of different "user agents" (mimics browser usage)
user_agents = ['Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:71.0) Gecko/20100101 Firefox/71.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Safari/605.1.15',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; rv:68.0) Gecko/20100101 Firefox/68.0 Firefox 68.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:70.0) Gecko/20100101 Firefox/70.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36']
# Initialize some useful variables
user_agent = user_agents[0] # Initialize user agent (changed if human/robot test)
delays = [7, 4, 6, 2, 10, 19] # Random delays between url requests, to avoid overloading google servers (and avoid human/robot test)
countries = getCountries()[1:] # List of countries for different IPs (changed if human/robot test), using NordVPN
countries[0] = countries[0][1:] # The first country (Albania) starts with a weird character
# Get number of citations per each year, for any scientist featured on google scholar
def get_citation_statistics(real_name, dblp_url):
# Assess whether the scientific has a dedicated scholar publication page
scientist_ID, coauthors_list = get_user_ID_and_coauthors(real_name, dblp_url)
if scientist_ID is not None:
# Go to the scholar publication page of the scientist
scholar_url = 'https://scholar.google.com/citations?user=' + scientist_ID
scholar_req = Request(scholar_url, headers={'User-Agent': user_agent})
scholar_src = urlopen(scholar_req).read().decode('utf-8')
time.sleep(random.choice(delays))
# Get [number of citations vs. year] from the citation histogram of the scientist's scholar page
try:
max_year = max([int(line.split('">' )[1].split('</')[0]) for line in scholar_src.split('gsc_g_t' )[3:]])
citations = [int(line.split('">' )[1].split('</')[0]) for line in scholar_src.split('gsc_g_al')[6:]]
z_indexes = [int(line.split('z-index:')[1].split('">')[0]) for line in scholar_src.split('gsc_g_al')[5:-1]]
years = [max_year-z+1 for z in z_indexes]
except ValueError:
years, citations = [], []
# Get h-indexes and i10-indexes (in total and from 2014 to now only)
try:
cite_ctrl = int(scholar_src.split('i10')[ 0].split('<td class="gsc_rsb_std">')[-4].split('</td')[0])
h_id = int(scholar_src.split('i10')[ 0].split('<td class="gsc_rsb_std">')[-2].split('</td')[0])
h_id_2014 = int(scholar_src.split('i10')[ 0].split('<td class="gsc_rsb_std">')[-1].split('</td')[0])
i_id = int(scholar_src.split('i10')[-1].split('<td class="gsc_rsb_std">')[ 1].split('</td')[0])
i_id_2014 = int(scholar_src.split('i10')[-1].split('<td class="gsc_rsb_std">')[ 2].split('</td')[0])
except (ValueError, IndexError):
cite_ctrl, h_id, h_id_2014, i_id, i_id_2014 = 0, 0, 0, 0, 0
# Some young scientists don't have a citations histogram because they only have citations over the last year
if cite_ctrl != 0 and citations == []:
citations, years = [cite_ctrl], [2019]
# Return empty list if the scientist is not famous enough for a scholar publication page
else:
h_id, h_id_2014, i_id, i_id_2014, years, citations = 0, 0, 0, 0, [], []
# Build a list to be published in the tsv file
print('%s citations to analyse for %s' % (sum(citations), real_name))
years_to_write = range(1980, 2020)
cites_to_write = [0 for y in years_to_write]
for i, y in enumerate(years_to_write):
if y in years:
cites_to_write[i] = citations[years.index(y)]
# Return the citations to the program
return h_id, h_id_2014, i_id, i_id_2014, cites_to_write, coauthors_list
# Try to find scientific has a dedicated scholar publication page
def get_user_ID_and_coauthors(real_name, dblp_url):
# Initialize and go to the dblp page of the scientist (if it exists)
global user_agent
search_name, last_name, dblp_url = check_for_special_characters(real_name, dblp_url)
dblp_req = Request(dblp_url, headers={'User-Agent': user_agent})
try:
dblp_src = urlopen(dblp_req).read().decode('utf-8')
# In rare cases, the scientist name was changed and is linked in the 404 error page
except HTTPError as error:
error_src = error.read().decode('utf-8')
key_string = 'Did you mean:</p><ul><li><a href="'
if key_string in error_src:
dblp_url = error_src.split(key_string)[1].split('">')[0]
dblp_req = Request(dblp_url, headers={'User-Agent': user_agent})
dblp_src = urlopen(dblp_req).read().decode('utf-8')
else:
return None, []
# Find all coauthors for this scientist
try:
coauthors = [p.split('">')[1].split('</')[0] for p in dblp_src.split('"coauthor-section"')[1].split('"person"')[1:]]
except IndexError:
coauthors = []
# Search for this scientist's last publication with first authorship on google scholar
try:
scholar_url = 'https://scholar' + dblp_src.split('au=' + search_name)[1].split('https://scholar')[1].split('">')[0]
except IndexError:
try:
scholar_url = 'https://scholar' + dblp_src.split('"publ-section"')[1].split('https://scholar')[1].split('">')[0]
except IndexError:
return None, []
# Try to connect to google scholar, avoiding robot issues
scholar_says_Im_robot = True
stop_trying_count = 0
while scholar_says_Im_robot:
# Stops abusing google scholar in case it is too angry
if stop_trying_count > 5:
print('Already many IP were tried... Try to find a manual solution!')
exit()
stop_trying_count += 1
# Tries to open the scholar url obtained from the dblp wep page
try:
scholar_req = Request(scholar_url + '+' + unidecode(last_name), headers={'User-Agent': user_agent})
scholar_src = urlopen(scholar_req).read().decode('utf-8')
time.sleep(random.choice(delays))
except UnicodeEncodeError as error:
print('The last name induced an url encoding error. Skipping this scientist...')
scholar_src = []
# Check for 429 error (too many requests), in which case the VPN IP address is changed
except HTTPError:
print('Google scholar HTTPError 429: IP address is changed...')
user_agent = random.choice(user_agents)
try:
logIn(chooseRandom(countries))
time.sleep(10)
except:
print('NordVPN is not installed on this computer. Try to find a manual solution!')
exit()
continue
# Check for the human/robot captcha test, in which case the VPN IP address is changed
if '"gs_captcha_f"' in scholar_src:
print('Google scholar Human/Robot test: IP address is changed...')
user_agent = random.choice(user_agents)
try:
logIn(chooseRandom(countries))
time.sleep(10)
except:
print('NordVPN is not installed on this computer. Try to find a manual solution!')
exit()
continue
# If everything went fine, continue below this while loop
scholar_says_Im_robot = False
# Search for an existing scholar ID of the scientist
user_ID = None
try:
crucial_str = scholar_src.split('<div class="gs_a">')[1]
crucial_str = re.split(last_name.lower(), crucial_str, flags=re.IGNORECASE)[0]
crucial_str = crucial_str.split(',')[-1].split('">')[0].split('&hl=')[0]
if len(crucial_str) > 28 and crucial_str[-28:-12] == '/citations?user=':
user_ID = crucial_str[-12:]
except:
print('Error in the search, probably because the scientist has no scholar page.')
# Return the user ID and the coauthors list to the program
return user_ID, coauthors
# Make sure names with special characters can be found
def check_for_special_characters(real_name, dblp_url):
# Strings cannot be modified, lists can
last_name = list(real_name.split(' ')[-1])
probe_name = real_name.split(' ')
probe_name.insert(0, probe_name.pop(-1))
probe_name = list(' '.join(probe_name))
probe_dblp = list(dblp_url.replace('==','='))
# Special case for apostrophies
if "'" in real_name:
probe_dblp[probe_dblp.index('=')+1] = probe_dblp[probe_dblp.index('=')+1].capitalize()
probe_name[probe_name.index("'")+1] = probe_name[probe_name.index("'")+1].capitalize()
probe_name[probe_name.index("'")] = '%27'
if '' in last_name:
last_name[ last_name .index("'")] = '''
# Special case for dashes
if '-' in real_name:
probe_dblp[probe_dblp.index('=')+1] = probe_dblp[probe_dblp.index('=')+1].capitalize()
probe_name[probe_name.index('-')+1] = probe_name[probe_name.index('-')+1].capitalize()
if '-' in last_name:
last_name = last_name[:last_name.index('-')]
# Go through every characters and modify if necessary
else:
is_special = False
for i, c_name in enumerate(probe_name):
for c_special in translator.keys():
if c_special == c_name:
probe_name[i ] = translator[c_special][0]
probe_dblp[i+35] = translator[c_special][1]
is_special = True
# Regenerate the corrected name and url
search_name = ''.join(probe_name).split(' ')
search_name.append(search_name.pop(0))
search_name = '+'.join(search_name)
last_name = ''.join(last_name)
dblp_url = ''. join(probe_dblp)
return search_name, last_name, dblp_url
| 8,730 | 0 | 66 |
6e12a4570a68fba2464465c8a85e777bc5a1f367 | 2,080 | py | Python | seagulls-rpg-demo/src/seagulls/rpg_demo/_fit_to_screen.py | codeghetti/seagulls-py | fd406a762b63368130125547f53e30672cec6754 | [
"MIT"
] | 2 | 2021-10-17T22:06:30.000Z | 2022-02-10T03:15:56.000Z | seagulls-rpg-demo/src/seagulls/rpg_demo/_fit_to_screen.py | codeghetti/seagulls-py | fd406a762b63368130125547f53e30672cec6754 | [
"MIT"
] | 80 | 2021-10-10T23:45:30.000Z | 2022-03-24T05:18:38.000Z | seagulls-space-shooter-demo/src/seagulls/space_shooter/_fit_to_screen.py | codeghetti/seagulls-py | fd406a762b63368130125547f53e30672cec6754 | [
"MIT"
] | null | null | null | from functools import lru_cache
from typing import Tuple
import pygame
| 28.493151 | 89 | 0.650481 | from functools import lru_cache
from typing import Tuple
import pygame
class FitToScreen:
@lru_cache()
def get_x_boundaries(self) -> Tuple[float, float]:
return (
self.get_x_padding(),
self.get_x_padding() + self.get_actual_surface_width()
)
@lru_cache()
def get_y_boundaries(self) -> Tuple[float, float]:
return (
self.get_y_padding(),
self.get_y_padding() + self.get_actual_surface_height()
)
@lru_cache()
def get_x_padding(self) -> float:
return (self._get_current_window_width() - self.get_actual_surface_width()) / 2
@lru_cache()
def get_y_padding(self) -> float:
return (self._get_current_window_height() - self.get_actual_surface_height()) / 2
@lru_cache()
def get_actual_surface_width(self) -> float:
if self._is_too_wide():
actual_width = self.get_actual_surface_height() * 1.6
else:
actual_width = self._get_current_window_width()
return actual_width
@lru_cache()
def get_actual_surface_height(self) -> float:
if self._is_too_thin():
actual_height = self._get_current_window_width() / 1.6
else:
actual_height = self._get_current_window_height()
return actual_height
@lru_cache()
def _is_too_wide(self) -> bool:
return self._get_current_aspect_ratio() > 1.6
@lru_cache()
def _is_too_thin(self) -> bool:
return self._get_current_aspect_ratio() < 1.6
@lru_cache()
def _is_ideal_aspect_ratio(self) -> bool:
# aspect ratio should be 16:10
return self._get_current_aspect_ratio() == 1.6
@lru_cache()
def _get_current_aspect_ratio(self) -> float:
return self._get_current_window_width() / self._get_current_window_height()
@lru_cache()
def _get_current_window_width(self) -> int:
return pygame.display.Info().current_w
@lru_cache()
def _get_current_window_height(self) -> int:
return pygame.display.Info().current_h
| 1,459 | 525 | 23 |
72ab7dc437598ea6023e67802d5de3132fc78b1c | 1,195 | py | Python | snapshot of face/sourcecode.py | The-SocialLion/FaceDetection_using_opencv | 9cea434a0f4dc15f2333afec171a51f7e88a3e08 | [
"Apache-2.0"
] | null | null | null | snapshot of face/sourcecode.py | The-SocialLion/FaceDetection_using_opencv | 9cea434a0f4dc15f2333afec171a51f7e88a3e08 | [
"Apache-2.0"
] | null | null | null | snapshot of face/sourcecode.py | The-SocialLion/FaceDetection_using_opencv | 9cea434a0f4dc15f2333afec171a51f7e88a3e08 | [
"Apache-2.0"
] | null | null | null | import cv2
import os # handling directories
alg="haarcascade_frontalface_default.xml"# importing algorithm
har=cv2.CascadeClassifier(alg)# reading & storing the algorithm in a variable
cam=cv2.VideoCapture(0)
dataset="dataset"
name="sociallion"
path=os.path.join(dataset,name)
if not os.path.isdir(path):
os.makedirs(path)#creates a new directory for the sequence of folder
# resizing image using cv2
(width,height)=(150,150)
count=0
n=int(input("enter number of pictures to be taken"))
while (count<=n):
_,img=cam.read()
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=har.detectMultiScale(gray,1.3,4)#detecting the face and scakling the image
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
only_face=gray[y:y+h,x:x+w]#only used to process and store the face part from image
res=cv2.resize(only_face,(width,height))
cv2.imwrite("%s/%s.jpg" %(path,count),res)# having thwo %s , 1%s-path,2.%s-represents the number(count)
count+=1
print(count)
cv2.imshow("face detection",img)
key=cv2.waitKey(10)
if key == 27:# press escape button to exit
break
cam.release()
cv2.destroyAllWindows()
| 37.34375 | 111 | 0.699582 | import cv2
import os # handling directories
alg="haarcascade_frontalface_default.xml"# importing algorithm
har=cv2.CascadeClassifier(alg)# reading & storing the algorithm in a variable
cam=cv2.VideoCapture(0)
dataset="dataset"
name="sociallion"
path=os.path.join(dataset,name)
if not os.path.isdir(path):
os.makedirs(path)#creates a new directory for the sequence of folder
# resizing image using cv2
(width,height)=(150,150)
count=0
n=int(input("enter number of pictures to be taken"))
while (count<=n):
_,img=cam.read()
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=har.detectMultiScale(gray,1.3,4)#detecting the face and scakling the image
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
only_face=gray[y:y+h,x:x+w]#only used to process and store the face part from image
res=cv2.resize(only_face,(width,height))
cv2.imwrite("%s/%s.jpg" %(path,count),res)# having thwo %s , 1%s-path,2.%s-represents the number(count)
count+=1
print(count)
cv2.imshow("face detection",img)
key=cv2.waitKey(10)
if key == 27:# press escape button to exit
break
cam.release()
cv2.destroyAllWindows()
| 0 | 0 | 0 |
d20827c1ce6558be02d9ecd5769be7c015018cca | 5,257 | py | Python | wrappers/python/indy/wallet.py | nveskovic/indy-sdk | 24a8b49165969cdb016c679b343eb169d004fd1d | [
"Apache-2.0"
] | null | null | null | wrappers/python/indy/wallet.py | nveskovic/indy-sdk | 24a8b49165969cdb016c679b343eb169d004fd1d | [
"Apache-2.0"
] | null | null | null | wrappers/python/indy/wallet.py | nveskovic/indy-sdk | 24a8b49165969cdb016c679b343eb169d004fd1d | [
"Apache-2.0"
] | null | null | null | from .libindy import do_call, create_cb
from typing import Optional
from ctypes import *
import logging
async def create_wallet(pool_name: str,
name: str,
xtype: Optional[str],
config: Optional[str],
credentials: str) -> None:
"""
Creates a new secure wallet with the given unique name.
:param pool_name: Name of the pool that corresponds to this wallet.
:param name: Name of the wallet.
:param xtype: (optional) Type of the wallet. Defaults to 'default'.
Custom types can be registered with indy_register_wallet_type call.
:param config: (optional) Wallet configuration json. List of supported keys are defined by wallet type.
if NULL, then default config will be used.
:param credentials: Wallet credentials json: {
"key": <wallet_key>
}
:return: Error code
"""
logger = logging.getLogger(__name__)
logger.debug("create_wallet: >>> pool_name: %r, name: %r, xtype: %r, config: %r, credentials: %r",
pool_name,
name,
xtype,
config,
credentials)
if not hasattr(create_wallet, "cb"):
logger.debug("create_wallet: Creating callback")
create_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_pool_name = c_char_p(pool_name.encode('utf-8'))
c_name = c_char_p(name.encode('utf-8'))
c_xtype = c_char_p(xtype.encode('utf-8')) if xtype is not None else None
c_config = c_char_p(config.encode('utf-8')) if config is not None else None
c_credentials = c_char_p(credentials.encode('utf-8'))
await do_call('indy_create_wallet',
c_pool_name,
c_name,
c_xtype,
c_config,
c_credentials,
create_wallet.cb)
logger.debug("create_wallet: <<<")
async def open_wallet(name: str,
runtime_config: Optional[str],
credentials: str) -> int:
"""
Opens the wallet with specific name.
Wallet with corresponded name must be previously created with indy_create_wallet method.
It is impossible to open wallet with the same name more than once.
:param name: Name of the wallet.
:param runtime_config: (optional) Runtime wallet configuration json.
if NULL, then default runtime_config will be used. Example:
{
"freshness_time": string (optional), Amount of minutes to consider wallet value as fresh. Defaults to 24*60.
... List of additional supported keys are defined by wallet type.
}
:param credentials: Wallet credentials json: {
"key": <wallet_key>
}
:return: Handle to opened wallet to use in methods that require wallet access.
"""
logger = logging.getLogger(__name__)
logger.debug("open_wallet: >>> name: %r, runtime_config: %r, credentials: %r",
name,
runtime_config,
credentials)
if not hasattr(open_wallet, "cb"):
logger.debug("open_wallet: Creating callback")
open_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_int32))
c_name = c_char_p(name.encode('utf-8'))
c_runtime_config = c_char_p(runtime_config.encode('utf-8')) if runtime_config is not None else None
c_credentials = c_char_p(credentials.encode('utf-8'))
res = await do_call('indy_open_wallet',
c_name,
c_runtime_config,
c_credentials,
open_wallet.cb)
logger.debug("open_wallet: <<< res: %r", res)
return res
async def close_wallet(handle: int) -> None:
"""
Closes opened wallet and frees allocated resources.
:param handle: wallet handle returned by indy_open_wallet.
:return: Error code
"""
logger = logging.getLogger(__name__)
logger.debug("close_wallet: >>> handle: %i", handle)
if not hasattr(close_wallet, "cb"):
logger.debug("close_wallet: Creating callback")
close_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_handle = c_int32(handle)
await do_call('indy_close_wallet',
c_handle,
close_wallet.cb)
logger.debug("close_wallet: <<<")
async def delete_wallet(name: str,
credentials: str) -> None:
"""
Deletes created wallet.
:param name: Name of the wallet to delete.
:param credentials: Wallet credentials json: {
"key": <wallet_key>
}
:return:
"""
logger = logging.getLogger(__name__)
logger.debug("delete_wallet: >>> name: %r, credentials: %r",
name,
credentials)
if not hasattr(delete_wallet, "cb"):
logger.debug("delete_wallet: Creating callback")
delete_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_name = c_char_p(name.encode('utf-8'))
c_credentials = c_char_p(credentials.encode('utf-8'))
await do_call('indy_delete_wallet',
c_name,
c_credentials,
delete_wallet.cb)
logger.debug("delete_wallet: <<<")
| 33.484076 | 120 | 0.610424 | from .libindy import do_call, create_cb
from typing import Optional
from ctypes import *
import logging
async def create_wallet(pool_name: str,
name: str,
xtype: Optional[str],
config: Optional[str],
credentials: str) -> None:
"""
Creates a new secure wallet with the given unique name.
:param pool_name: Name of the pool that corresponds to this wallet.
:param name: Name of the wallet.
:param xtype: (optional) Type of the wallet. Defaults to 'default'.
Custom types can be registered with indy_register_wallet_type call.
:param config: (optional) Wallet configuration json. List of supported keys are defined by wallet type.
if NULL, then default config will be used.
:param credentials: Wallet credentials json: {
"key": <wallet_key>
}
:return: Error code
"""
logger = logging.getLogger(__name__)
logger.debug("create_wallet: >>> pool_name: %r, name: %r, xtype: %r, config: %r, credentials: %r",
pool_name,
name,
xtype,
config,
credentials)
if not hasattr(create_wallet, "cb"):
logger.debug("create_wallet: Creating callback")
create_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_pool_name = c_char_p(pool_name.encode('utf-8'))
c_name = c_char_p(name.encode('utf-8'))
c_xtype = c_char_p(xtype.encode('utf-8')) if xtype is not None else None
c_config = c_char_p(config.encode('utf-8')) if config is not None else None
c_credentials = c_char_p(credentials.encode('utf-8'))
await do_call('indy_create_wallet',
c_pool_name,
c_name,
c_xtype,
c_config,
c_credentials,
create_wallet.cb)
logger.debug("create_wallet: <<<")
async def open_wallet(name: str,
runtime_config: Optional[str],
credentials: str) -> int:
"""
Opens the wallet with specific name.
Wallet with corresponded name must be previously created with indy_create_wallet method.
It is impossible to open wallet with the same name more than once.
:param name: Name of the wallet.
:param runtime_config: (optional) Runtime wallet configuration json.
if NULL, then default runtime_config will be used. Example:
{
"freshness_time": string (optional), Amount of minutes to consider wallet value as fresh. Defaults to 24*60.
... List of additional supported keys are defined by wallet type.
}
:param credentials: Wallet credentials json: {
"key": <wallet_key>
}
:return: Handle to opened wallet to use in methods that require wallet access.
"""
logger = logging.getLogger(__name__)
logger.debug("open_wallet: >>> name: %r, runtime_config: %r, credentials: %r",
name,
runtime_config,
credentials)
if not hasattr(open_wallet, "cb"):
logger.debug("open_wallet: Creating callback")
open_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_int32))
c_name = c_char_p(name.encode('utf-8'))
c_runtime_config = c_char_p(runtime_config.encode('utf-8')) if runtime_config is not None else None
c_credentials = c_char_p(credentials.encode('utf-8'))
res = await do_call('indy_open_wallet',
c_name,
c_runtime_config,
c_credentials,
open_wallet.cb)
logger.debug("open_wallet: <<< res: %r", res)
return res
async def close_wallet(handle: int) -> None:
"""
Closes opened wallet and frees allocated resources.
:param handle: wallet handle returned by indy_open_wallet.
:return: Error code
"""
logger = logging.getLogger(__name__)
logger.debug("close_wallet: >>> handle: %i", handle)
if not hasattr(close_wallet, "cb"):
logger.debug("close_wallet: Creating callback")
close_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_handle = c_int32(handle)
await do_call('indy_close_wallet',
c_handle,
close_wallet.cb)
logger.debug("close_wallet: <<<")
async def delete_wallet(name: str,
credentials: str) -> None:
"""
Deletes created wallet.
:param name: Name of the wallet to delete.
:param credentials: Wallet credentials json: {
"key": <wallet_key>
}
:return:
"""
logger = logging.getLogger(__name__)
logger.debug("delete_wallet: >>> name: %r, credentials: %r",
name,
credentials)
if not hasattr(delete_wallet, "cb"):
logger.debug("delete_wallet: Creating callback")
delete_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_name = c_char_p(name.encode('utf-8'))
c_credentials = c_char_p(credentials.encode('utf-8'))
await do_call('indy_delete_wallet',
c_name,
c_credentials,
delete_wallet.cb)
logger.debug("delete_wallet: <<<")
| 0 | 0 | 0 |
f7b326676f48a33a9787afac3c35bb90f7e33e0e | 23,888 | py | Python | aggregator_extraction.py | guillaume-thiry/OECD-Chatbot | 3c8c280b8a2cb9b1bd8314265aee933ba8abac1a | [
"MIT"
] | 1 | 2020-09-09T12:25:13.000Z | 2020-09-09T12:25:13.000Z | aggregator_extraction.py | guillaume-thiry/OECD-Chatbot | 3c8c280b8a2cb9b1bd8314265aee933ba8abac1a | [
"MIT"
] | null | null | null | aggregator_extraction.py | guillaume-thiry/OECD-Chatbot | 3c8c280b8a2cb9b1bd8314265aee933ba8abac1a | [
"MIT"
] | 1 | 2021-01-19T09:30:13.000Z | 2021-01-19T09:30:13.000Z | # @ Copyright Inria, Ecole Polytechnique
# Shared under the MIT license https://opensource.org/licenses/mit-license.php
# This file contains all the functions that are used in the comparison/aggregation detection
# The main part of the code is the function find_aggregators, that will be used elsewhere in the code
# The other functions are auxiliary that are being used in the main one
### IMPORT
# Python libraries import
import nltk
from nltk.parse import CoreNLPParser
from nltk.parse.corenlp import CoreNLPDependencyParser
from nltk.tag.stanford import StanfordNERTagger
import os
# Utils import
from parsing_analysis import get_nodes, get_subtrees
from utils import catch_words, cut_after, get_index
# Generic analysis functions import
from area_extraction import find_areas
from time_extraction import find_time, date_figures
### PARSERS
#Generic path
path = os.getcwd()
#Java path (to be changed)
java_path = "C:/Program Files (x86)/Java/jre1.8.0_251/bin/java.exe"
os.environ['JAVAHOME'] = java_path
#Files of the NER
jar = os.path.join(path, "Stanford_NER/stanford-ner-4.0.0/stanford-ner.jar")
model = os.path.join(path, "Stanford_NER/stanford-ner-4.0.0/classifiers/english.muc.7class.distsim.crf.ser.gz")
#Loading the parsers
parser = CoreNLPParser(url='http://localhost:9000')
dep_parser = CoreNLPDependencyParser(url='http://localhost:9000')
ner_tagger = StanfordNERTagger(model, jar, encoding='utf8')
pos_tagger = CoreNLPParser(url='http://localhost:9000', tagtype='pos')
### FUNCTIONS
# The function cut_in_clause take the whole sentence (as a list of tokens named tok)
# and the comparative words that have been detected so far (comp_words)
# and try to cut the sentence into as many clauses as there are comparative words
# so that each clause contain one and only one comparison.
# The cuts have to be made at specific words (specified in cut_words)
cut_words = ["and", "or", "but", "while"]
# The function get_threshold take as input a comparative word (cp_word)
# And look if the word is associated with a numerical value (a threshold)
# To do that, we look at the contextual words around cp_word to find a number
# We also make sure that the number is not already tagged as a date (in date_figures)
# Finally, we check if the number is potentially linked with a unit multiplier
unit_m = {"hundred" : 100, "hundreds" : 100, "thousand" : 1000, "thousands" : 1000, "million" : 1000000, "millions" : 1000000, "billion" : 1000000000, "billions" : 1000000000,
"k" : 1000, 'm' : 1000000, "b" : 1000000000, "bn" : 1000000000, "bil" : 1000000000}
# The function find_aggregators takes the parses of the sentence
# and try to find every comparison and aggregation in it.
# It also takes as input the type of return the user wants (list of countries or list of years)
# and the words in the sentence giving that information
| 43.670932 | 176 | 0.477604 | # @ Copyright Inria, Ecole Polytechnique
# Shared under the MIT license https://opensource.org/licenses/mit-license.php
# This file contains all the functions that are used in the comparison/aggregation detection
# The main part of the code is the function find_aggregators, that will be used elsewhere in the code
# The other functions are auxiliary that are being used in the main one
### IMPORT
# Python libraries import
import nltk
from nltk.parse import CoreNLPParser
from nltk.parse.corenlp import CoreNLPDependencyParser
from nltk.tag.stanford import StanfordNERTagger
import os
# Utils import
from parsing_analysis import get_nodes, get_subtrees
from utils import catch_words, cut_after, get_index
# Generic analysis functions import
from area_extraction import find_areas
from time_extraction import find_time, date_figures
### PARSERS
#Generic path
path = os.getcwd()
#Java path (to be changed)
java_path = "C:/Program Files (x86)/Java/jre1.8.0_251/bin/java.exe"
os.environ['JAVAHOME'] = java_path
#Files of the NER
jar = os.path.join(path, "Stanford_NER/stanford-ner-4.0.0/stanford-ner.jar")
model = os.path.join(path, "Stanford_NER/stanford-ner-4.0.0/classifiers/english.muc.7class.distsim.crf.ser.gz")
#Loading the parsers
parser = CoreNLPParser(url='http://localhost:9000')
dep_parser = CoreNLPDependencyParser(url='http://localhost:9000')
ner_tagger = StanfordNERTagger(model, jar, encoding='utf8')
pos_tagger = CoreNLPParser(url='http://localhost:9000', tagtype='pos')
### FUNCTIONS
# The function cut_in_clause take the whole sentence (as a list of tokens named tok)
# and the comparative words that have been detected so far (comp_words)
# and try to cut the sentence into as many clauses as there are comparative words
# so that each clause contain one and only one comparison.
# The cuts have to be made at specific words (specified in cut_words)
cut_words = ["and", "or", "but", "while"]
def cut_in_clause(tok, comp_words,cut_words):
res = [] #store the list of clauses
s = [] #store the current clause
cuts = [] #store the cut_words found
c = False
#We read the sentence and look for a comparative word.
#Only once found, we look for a cut word and do the cut
#Then, we start a new clause, looking again for a comp word and then a cut word
for t in tok:
if t in comp_words:
c = True
if (c and t.lower() in cut_words):
cuts.append(t.lower())
res.append(s)
s = []
c = False
else:
s.append(t)
if (s != []):
res.append(s)
return (res,cuts)
# The function get_threshold take as input a comparative word (cp_word)
# And look if the word is associated with a numerical value (a threshold)
# To do that, we look at the contextual words around cp_word to find a number
# We also make sure that the number is not already tagged as a date (in date_figures)
# Finally, we check if the number is potentially linked with a unit multiplier
unit_m = {"hundred" : 100, "hundreds" : 100, "thousand" : 1000, "thousands" : 1000, "million" : 1000000, "millions" : 1000000, "billion" : 1000000000, "billions" : 1000000000,
"k" : 1000, 'm' : 1000000, "b" : 1000000000, "bn" : 1000000000, "bil" : 1000000000}
def get_threshold(tok,cp_word,date_figures):
parse = next(parser.parse(tok)) #First, we parse the whole clause
# And then we search the grammatical context of cp_word
# This is most of the time a Prepositional Phrase (PP), a Nominal Phrase (NP) or a Quantifier Phrase (NP)
pp = None
sub = parse.subtrees()
for s in sub:
if (s.label() == "PP" and s.leaves()[0] == cp_word):
pp = s
if pp == None:
pps = get_subtrees(parse, "PP")
for p in pps:
if cp_word in p.leaves():
pp = p
if pp == None:
nps = get_subtrees(parse, "NP")
for n in nps:
if cp_word in n.leaves():
pp = n
if pp == None:
qps = get_subtrees(parse, "QP")
for q in qps:
if cp_word in q.leaves():
pp = q
#If a context is found, we look for the first number appearing after cp_word and not being a date
if pp != None:
i = get_index(pp.leaves(),cp_word) #position of the comp word in the context
fig = get_nodes(pp, "CD") #list of all numbers appearing in the context
n = 0
for f in fig:
if (n==0 and get_index(pp.leaves(),f)>i and (f not in date_figures)):
n=f
#and if that number exists, we check if an unit multiplier is written just after
if n != 0:
k = get_index(tok, n) #position of the number in the clause
mult = 1
try:
mult = unit_m[tok[k+1].lower()]
except:
pass
return(float(n)*mult)
return None
# The function find_aggregators takes the parses of the sentence
# and try to find every comparison and aggregation in it.
# It also takes as input the type of return the user wants (list of countries or list of years)
# and the words in the sentence giving that information
def find_aggregators(parse,parse_d,returned,agg_words):
tok = parse.leaves()
ner = ner_tagger.tag(tok)
pos = pos_tagger.tag(tok)
dep = list(dep_parser.parse(tok))[0]
# We store the numbers in the sentence that are dates, as it is useful when looking for a threshold
figures = date_figures(ner, pos, dep)
# When a comparison or aggregation is in the sentence, the user normally wants a list of something
# But sometimes, there is not any words specifing the type of the list and so the return is set as a value by default
# Here, we set temporarly that return value to a list of countries
# Thus will be useful if a comparison/aggregation is found
# An example query for such a case would be "Highest GDPs in the world"
if returned == "Value":
returned = "Agr_Area"
## Comparative words
# Some comparative words are "threshold-only" and do not require a construction with "than"
th_words = ["over", "under", "below", "above"]
th_inf = ["under", "below"]
# We detect these words
th_ = catch_words(tok,th_words)
th = []
# And just make sure that a threshold is linked to each one (as these words can appear is other contexts)
for t in th_:
if get_threshold(tok, t, figures) != None:
th.append(t)
# The other comparative words (that we will name comp words) require a structure with "than"
# Some of them have to be specified (like "superior") but most of them are recognizied easily
# thanks to specific tags for comparison in the POS tags
cp_words = ["superior", "inferior"]
cp_inf = ["less", "lower", "inferior", "poorer"]
comp_ = get_nodes(parse, "RBR") + get_nodes(parse, "JJR") + catch_words(tok, cp_words)
comp = []
# Then, we only keep the comparative words followed by a "than"
# And we also reorder the words at the same time, adding the threshold words in the common list
k = 0 #determines if a comp word has already been found (used when a "than" is found)
cp = "" #current comp word
for t in tok:
if t in comp_:
if k == 0:
k=1
cp = t
if k == 1:
cp = t
elif t in th:
if k == 1: #this case happens if a threshold word is found after a comp word but before a potential than
#in that case, we cannot reasonably consider the comp word as it would create nested comparisons
k = 0
cp = ""
comp.append(t)
elif t == "than":
if k == 0:
raise Exception("Error 0 : than alone") #in case a "than" is found but without a comp word before
elif k == 1:
k = 0
comp.append(cp)
cp = ""
## Comparisons
# Now that we have all the comparative words, we try to cut the sentence in clauses
# Each clause must contain only one comparison (often there is just one clause)
comparisons = []
n_comp = len(comp)
clauses, cuts = cut_in_clause(tok, comp, cut_words)
if n_comp>0:
if len(clauses) == n_comp:
b = True
for i in range(n_comp):
if comp[i] not in clauses[i]:
b = False
if not b:
raise Exception("Error 1 : problem with clauses")
# Else, everything is okay and we will now treat each clause separately
else:
for i in range(n_comp):
clause = clauses[i]
word = comp[i]
# We parse the clause. That way, we only consider the words of the clause and nothing else
# And of course, the result can differ from the parsing of the whole sentence
clause_sent = " ".join(clause)
clause_parse = next(parser.parse(clause))
clause_dep = list(dep_parser.parse(clause))[0]
clause_ner = ner_tagger.tag(clause)
# Then, we execute the functions find_areas and find_time for the clause
areas = find_areas(clause_sent)
times = find_time(clause_ner, clause_parse,clause_dep)
than_time = times[2]
to_time = times[1]
in_time = times[0]
than_area = areas[2]
in_area = areas[0]
# Here, we initialize the different variables that describe a comparison
comp_type = None #what is the comparator (a threshold, another country/year, or something else)
sens = 'sup' #is the comparison a "more than" or a "less than"
V1 = {} #elements of Value1 (the first value of the comparison, before "than")
V2 = {} #elements of Value2 (the second value of the comparison, after "than")
V = {} #some elements are not part of the comparison and belongs to both values
# Example : "Countries with more population than Germany in 2010" -> we compare everything at the year 2010
# Now, we differentiate the treatment between "list of countries" and "list of years"
# Countries list
if returned == 'Agr_Area':
# If the comparative word is "threshold-only"
if word in th_words:
if word.lower() in th_inf:
sens = "inf"
# Search of a threshold
threshold = get_threshold(clause,word,[])
if threshold == None:
raise Exception("Error 2 : No threshold found")
else:
comp_type = "Threshold"
V2["THRESHOLD"] = threshold
# Search of a time indicator (as we compare values, we cannot have a time series)
if ((in_time != None) and (in_time == to_time)):
V["TIME"] = in_time
# Search of a location indicator
# As the used wants a list of countries, he cannot specify a country in the query
# But he can give a region ("What countries in Asia ...")
region = True
r = []
for c in in_area:
if c[1] == 'country':
region = False
if not region:
raise Exception("Error 3 : Country was mentioned")
else:
for c in in_area:
r.append(c[0])
V["AREA"] = r
# Else, the comparative word must belong to a "than" structure
else:
if 'than' in clause:
if word.lower() in cp_inf:
sens = "inf"
idx = get_index(clause, "than") #position of the "than", useful to fill V1 & V2
# First, we look at the locations
# Here, it is possible to mention a country if it is the comparator
if len(than_area) == 1:
if than_area[0][1] == "country":
V2["AREA"] = than_area[0][0]
comp_type = "Country"
else:
raise Exception("Error 4 : Comparison with a region")
elif len(than_area)>1:
raise Exception("Error 5 : Too many area mentioned")
# It is also possible to mention a region, as before
region = True
r = []
for c in in_area:
if c[1] == 'country':
region = False
if not region:
raise Exception("Error 3 : Country mentioned")
else:
for c in in_area:
r.append(c[0])
V["AREA"] = r
# Then, the time indicators
# If two dates are found on both sides of "than", the first one go in V1 and the other in V2
has_than_time = False
if (len(than_time)==1):
if in_time != None:
if (get_index(clause,str(in_time)) < idx):
V1["TIME"] = in_time
V2["TIME"] = than_time[0]
has_than_time = True
if comp_type == None:
comp_type = "Two"
# Else, the year is general (goes in V)
if not has_than_time:
if len(than_time)==1:
V["TIME"] = than_time[0]
elif ((in_time != None) and (in_time == to_time)):
V["TIME"] = in_time
else: #in case no date is given, either we raise an error or ask the user, or take a default one (to see later)
#raise Exception("Error 6 : Must precise time period")
pass
# If we haven't found yet the type of comparison, we try to find a threshold
# If there is not, the comparison is of type "two" (two different values compared)
if comp_type == None:
thres = get_threshold(clause, 'than', than_time)
if thres != None:
comp_type = "Threshold"
V2["THRESHOLD"] = thres
if comp_type == None:
comp_type = "Two"
else:
raise Exception("Error 7 : comparison without 'than'")
# Years list
elif returned == 'Agr_Time':
# If threshold word
if word in th_words:
if word.lower() in th_inf:
sens = "inf"
threshold = get_threshold(clause,word,[])
if threshold == None:
raise Exception("Error 2 : No threshold found")
else:
comp_type = "Threshold"
V2["THRESHOLD"] = threshold
# As we have a list of years here, we can only have time indicators as a time period (more than one year)
if ((in_time != None) and (to_time != None) and (in_time != to_time)):
V["TIME"] = [in_time,to_time]
else:
V["TIME"] = None
# And conversely, the location indicators can only give one country (to be able to compare)
if (len(in_area) > 1 or (len(in_area) == 1 and in_area[0][1] == 'region')):
raise Exception("Error 5 : Too many area mentioned")
else:
if len(in_area) == 1:
V["AREA"] = in_area[0][0]
else:
V["AREA"] = None
# If than construction
else:
if 'than' in clause:
if word.lower() in cp_inf:
sens = "inf"
idx = get_index(clause, "than")
# Get countries
# We accept if two countries are given on both sides of "than" : goes in V1 & V2
# Else it goes in V and can only be one country
if len(than_area) == 1:
if than_area[0][1] == "country":
if (len(in_area) == 1 and in_area[0][1] == "country"):
V2["AREA"] = than_area[0][0]
V1["AREA"] = in_area[0][0]
comp_type = "Two"
elif (len(in_area) == 0):
V["AREA"] = than_area[0][0]
else:
raise Exception("Error 5 : Too many area mentioned")
else:
raise Exception("Error 4 : Comparison with a region")
elif len(than_area)>1:
raise Exception("Error 5 : Too many area mentioned")
elif (len(than_area) == 0):
if (len(in_area) > 1 or (len(in_area) == 1 and in_area[0][1] == 'region')):
raise Exception("Error 5 : Too many area mentioned")
else:
if len(in_area) == 1:
V["AREA"] = in_area[0][0]
else:
V["AREA"] = None
# Get times
#A specific year can be given by the user as the comparator (comp_type -> "Time")
if (len(than_time)==1):
V2["TIME"] = than_time[0]
comp_type = "Time"
elif(len(than_time)>1):
raise Exception("Error 8 : Too many times mentioned")
#Else, we accept only a time period
if ((in_time != None) and (to_time != None) and (in_time != to_time)):
V["TIME"] = [in_time,to_time]
else:
V["TIME"] = None
# If nothing, we do as before and look for a threshold
if comp_type == None:
thres = get_threshold(clause, 'than', than_time)
if thres != None:
comp_type = "Threshold"
V2["THRESHOLD"] = thres
if comp_type == None:
comp_type = "Two"
else:
raise Exception("Error 7 : comparison without 'than'")
# At the end, we gather everything for that clause and add this to the comparisons list
comparisons.append([comp_type,sens,V,V1,V2])
else:
raise Exception("Error 9 : number of words and clauses")
## Superlative words
# Aggregation words (or superlative words) are mostly found with their specific tag
# Nonetheless, some have to be specified
sp_words = ["top", "minimum", "maximum"]
sup = get_nodes(parse, "RBS") + get_nodes(parse, "JJS") + catch_words(tok, sp_words)
## Aggregations
aggreg = None
sens_sup = None #sense of the aggregation (max or min)
n_sup = 1 #number of items to display
sup_neg = ["least", "lowest", "worst", "minimum"]
#we also need to know the plural form of the words that could be linked to the aggregation
agg_plural = ["areas", "countries", "places", "states", "nations", "years"]
#Sense of the aggregation
if (sup != []):
for s in sup:
if s.lower() in sup_neg:
sens_sup = 'inf'
if sens_sup == None:
sens_sup = 'sup'
# For the number of items, we look at the context of the superlative words + the words linked to them
# These words usually form a context as a Nominal Phrase (NP)
# And in the context, we look for numerical values
sup_ = sup + agg_words
nps = get_subtrees(parse, "NP")
for s in sup_:
for np in nps:
if s in np.leaves():
for a in np.leaves():
try:
n_sup = int(a)
except:
pass
# If no number was found, we look at a potential plural form
# That would correspond to a default value of 10 items
if n_sup == 1:
for w in agg_words:
if w.lower() in agg_plural:
n_sup = 10
if (sup != []):
aggreg = [sens_sup,n_sup]
#Finally, we return all the information found
# 1) The list of comparison (one for each clause)
# 2) The sense and value of the aggregation (if any)
return(comparisons,aggreg)
| 20,961 | 0 | 69 |
89f87c332127a666f8104861a60a4daee9b3667f | 592 | py | Python | src/multinavigation/conf.py | adlh/django-multinavigation | 040b8f2d5857f3d1321f81c166576f46e86f56cb | [
"MIT"
] | 4 | 2016-08-01T11:08:05.000Z | 2020-09-22T20:47:27.000Z | src/multinavigation/conf.py | adlh/django-multinavigation | 040b8f2d5857f3d1321f81c166576f46e86f56cb | [
"MIT"
] | 4 | 2019-11-20T19:53:03.000Z | 2020-10-20T22:41:39.000Z | src/multinavigation/conf.py | adlh/django-multinavigation | 040b8f2d5857f3d1321f81c166576f46e86f56cb | [
"MIT"
] | null | null | null | from collections import namedtuple
Node = namedtuple('Node', 'url_name label parent context')
""" Represents a node or item in a navigation
url_name -- (string) The name of a named-urlpattern
label -- (string) The label to be used in the item
parent -- (string) the url_name of its parent or ''. Extra kwargs to be
met on the parent may be defined through:
'url_name|kw1:val1,kw2:val2'
context -- (dict, optional) Contains extra context for the items, to be
used on the templates (if needed) for customization purposes.
"""
| 42.285714 | 77 | 0.665541 | from collections import namedtuple
Node = namedtuple('Node', 'url_name label parent context')
""" Represents a node or item in a navigation
url_name -- (string) The name of a named-urlpattern
label -- (string) The label to be used in the item
parent -- (string) the url_name of its parent or ''. Extra kwargs to be
met on the parent may be defined through:
'url_name|kw1:val1,kw2:val2'
context -- (dict, optional) Contains extra context for the items, to be
used on the templates (if needed) for customization purposes.
"""
| 0 | 0 | 0 |
8ab2d19ea32cb1db292148c9e0dbd28cf5dac977 | 21,856 | py | Python | microsa/cellspat.py | VGeorgii/Mesa | 6ae075e141e12895e7b60b5d7fa955b1802516f4 | [
"MIT"
] | null | null | null | microsa/cellspat.py | VGeorgii/Mesa | 6ae075e141e12895e7b60b5d7fa955b1802516f4 | [
"MIT"
] | null | null | null | microsa/cellspat.py | VGeorgii/Mesa | 6ae075e141e12895e7b60b5d7fa955b1802516f4 | [
"MIT"
] | null | null | null | try:
import numpy as np
import pandas as pd
from skimage import morphology
from skimage.measure import regionprops
from skimage.measure import label
from scipy import ndimage
from skimage.graph import route_through_array
from scipy.ndimage import binary_closing, binary_hit_or_miss
from scipy.spatial import distance
except ImportError:
print ('Import error!')
| 27.049505 | 281 | 0.553624 | try:
import numpy as np
import pandas as pd
from skimage import morphology
from skimage.measure import regionprops
from skimage.measure import label
from scipy import ndimage
from skimage.graph import route_through_array
from scipy.ndimage import binary_closing, binary_hit_or_miss
from scipy.spatial import distance
except ImportError:
print ('Import error!')
def cells_cells_neigh (cells_coords_list, radius):
###
#Function returns list of cell neighbors indexes
#arguments:
#- cells_coords_list: list of cells coords
#- radius: radius of neighborhood outline
#function returns:
#'cell_neigh_index': array that contains indexes of neighboring cells
#'len_cell_neigh_num': array that contains number of neighboring cells
###
cells_coords_list = list(cells_coords_list)
cells_cells_dist_bool = []
for i in np.arange(len(cells_coords_list)):
cells_cells_dist = np.logical_and((np.sqrt(np.sum((np.array(cells_coords_list[i]) - np.array(cells_coords_list)) ** 2, axis=1))) > 0,
(np.sqrt(np.sum((np.array(cells_coords_list[i]) - np.array(cells_coords_list)) ** 2, axis=1))) < radius)
cells_cells_dist_bool.append(cells_cells_dist)
cell_neigh_index = []
for j in np.arange(len(cells_cells_dist_bool)):
neigh_index = []
cell_neigh_index.append(neigh_index)
for i in np.arange(len(cells_cells_dist_bool[j])):
if cells_cells_dist_bool[j][i] == True:
neigh_index.append(i)
cell_neigh_num = []
for i in np.arange(len(cell_neigh_index)):
neigh_num = len(cell_neigh_index[i])
cell_neigh_num .append(neigh_num)
return ({'cell_neigh_index': cell_neigh_index,
'cell_neigh_num': cell_neigh_num})
def cell_cell_type_neigbor (cells_neigh_index, cell_type_list, cell_type):
###
#Function returns list of cell neighbors indexes
#arguments:
#- cells_neigh_index: array that contains indexes of neighboring cells
#- cell_type_list: list of cell types
#- cell_type: cell type of interest
#function returns:
#'cell_cell_type': array that contain indexes of cells of certain type,
#'len_cell_cell_type': array that contains total number of neighboring cells of certain type
###
cell_cell_type = []
for j in np.arange(len(cells_neigh_index)):
c_type = []
cell_cell_type.append(c_type)
for i in cells_neigh_index[j]:
if cell_type_list[i] == cell_type:
c_type.append(i)
len_cell_cell_type = []
for i in np.arange(len(cell_cell_type)):
len_cell_type = len(cell_cell_type[i])
len_cell_cell_type.append(len_cell_type)
return ({'cell_cell_type': cell_cell_type,
'len_cell_cell_type': len_cell_cell_type})
def cell_cell_neigh_features (cell_neigh_index, cell_feature_list):
###
#Function returns average feature of interest of neighboring cells
#arguments:
#- cell_neigh_index: array with indexis of neighboring cells
#- cell_feature_list: array with features of segmented cells
#function returns:
#- mean_result: average feature of interest of neighboring cells
###
feature_result = []
for j in np.arange(len(cell_neigh_index)):
feature_list = []
feature_result.append(feature_list)
for i in cell_neigh_index[j]:
feature = cell_feature_list[i]
feature_list.append(feature)
mean_result = []
for i in feature_result:
result = np.mean(np.array(i))
mean_result.append(result)
return (mean_result)
def cell_cell_spatial (cells_coords_list, radius, cell_type = 'None', cell_type_list = 'None', cell_feature_list = 'None'):
###
#Function returns dataframe which contains information about neighboring cells, their type, and features
#arguments:
#- cells_coords_list: list of cells coords
#- radius: radius of neighborhood outline
#- cell_type: default 'None', list of cell types for spatial analysis. 'All' make function perform calculation for all types of cell
#- cell_type_list: list(column) with cell types
#- cell_feature_list: list(colum) of feature of cells
#function returns:
# - pd.DataFrame with calculatedd spatial information of neighboring cells
###
neighbors = cells_cells_neigh (cells_coords_list, radius)
dicts_neigh_result = {}
dicts_feature_result = {}
if cell_type != 'None':
if cell_type == 'All':
types = []
type_index_list = []
type_neighbors_list = []
for i in set(cell_type_list):
type_neigbors = cell_cell_type_neigbor (neighbors['cell_neigh_index'], cell_type_list, i)
type_index_list.append(type_neigbors['cell_cell_type'])
type_neighbors_list.append(type_neigbors['len_cell_cell_type'])
types.append(i)
dicts_typs = {}
for i, j in zip(types, type_neighbors_list):
dicts_typs[i] = j
dicts_neighbors = {'cells_number' : neighbors['cell_neigh_num']}
dicts_neigh_result = {**dicts_neighbors, **dicts_typs}
else:
types = []
type_index_list = []
type_neighbors_list = []
for i in cell_type:
type_neighbors = cell_cell_type_neigbor (neighbors['cell_neigh_index'], cell_type_list, i)
type_index_list.append(type_neighbors['cell_cell_type'])
type_neighbors_list.append(type_neighbors['len_cell_cell_type'])
types.append(i)
dicts_typs = {}
for i, j in zip(types, type_neighbors_list):
dicts_typs[i] = j
dicts_neighbors = {'cell_neighbors' : neighbors['cell_neigh_num']}
dicts_neigh_result = {**dicts_neighbors, **dicts_typs}
if cell_type == 'None':
dicts_neigh_result = {'cell_neighbors' : neighbors['cell_neigh_num']}
if cell_feature_list != 'None':
if cell_type != 'None':
if cell_type == 'All':
feature_result = []
var_names = []
for i in np.arange (len(cell_feature_list)):
neighbors_feature = cell_cell_neigh_features (neighbors['cell_neigh_index'], cell_feature_list[i])
feature_result.append(neighbors_feature)
var_name = 'common_feature_' + '%s' % i
var_names.append(var_name)
dicts_feature_result = {}
for i, j in zip(var_names, feature_result):
dicts_feature_result[i] = j
else:
feature_result = []
var_names = []
for i in np.arange (len(cell_feature_list)):
for j in np.arange (len(type_index_list)):
neighbors_feature = cell_cell_neigh_features (type_index_list[j], cell_feature_list[i])
feature_result.append(neighbors_feature)
var_name = '%s' %types[j] + '_feature_' + '%s' % i
var_names.append(var_name)
dicts_feature_result = {}
for i, j in zip(var_names, feature_result):
dicts_feature_result[i] = j
if cell_type == 'None':
feature_result = []
var_names = []
for i in np.arange (len(cell_feature_list)):
neighbors_feature = cell_cell_neigh_features (neighbors['cell_neigh_index'], cell_feature_list[i])
feature_result.append(neighbors_feature)
var_name = 'common_feature_' + '%s' % i
var_names.append(var_name)
dicts_feature_result = {}
for i, j in zip(var_names, feature_result):
dicts_feature_result[i] = j
if cell_feature_list == 'None':
pass
return (pd.DataFrame({**dicts_neigh_result, **dicts_feature_result}))
def cell_fibs_neigh (executed_fibs, cells_coords_list, radius):
###
#Function returns number of fibs neighbors
#arguments:
#- props_pruned: array with calculated features of labeled fibers
#- cells_coords_list: list of cells coords
#- radius: radius of neighborhood outline
#function returns:
#'fibs_neigh_index': array with indexes of neighboring fibers,
#'fibs_neigh_num': array with number of neighboring fibers,
###
fibers_coords = []
for j in np.arange(len(executed_fibs['props_pruned'])):
coords = np.array(executed_fibs['props_pruned'][j].coords)
fibers_coords.append(coords)
cells_fibcoords_dist_bool = []
for i in np.arange(len(cells_coords_list)):
cells_fibcoords_dist = []
cells_fibcoords_dist_bool.append(cells_fibcoords_dist)
for j in np.arange(len(fibers_coords)):
cells_fibcoords = np.sqrt(np.sum((np.array(cells_coords_list[i]) - np.array(fibers_coords[j])) ** 2, axis=1)) < radius
cells_fibcoords_dist.append(cells_fibcoords)
cell_fibs_neigh_index = []
for k in np.arange(len(cells_fibcoords_dist_bool)):
cell_fibs_neigh = []
cell_fibs_neigh_index.append(cell_fibs_neigh)
for j in np.arange(len(cells_fibcoords_dist_bool[k])):
if any (cells_fibcoords_dist_bool[k][j]):
cell_fibs_neigh.append(j)
cell_fibs_neigh_num = []
for i in np.arange(len(cell_fibs_neigh_index)):
cell_fibs_neigh = len(cell_fibs_neigh_index[i])
cell_fibs_neigh_num.append(cell_fibs_neigh)
return ({'fibs_neigh_index': cell_fibs_neigh_index,
'fibs_neigh_num': cell_fibs_neigh_num})
def cell_fibs_neigh_length (executed_fibs, cell_fibs_neigh_index):
###
#Function calculates average length of neighboring fibers
#arguments:
#- props_pruned: array with calculated features of labeled fibers
#- cell_fibs_neigh_index: array with indexes of neighboring fibers
#function returns:
#cell_fibs_neigh_length array with mean length of neighboring fibers
###
fibs_length = []
for i in np.arange(len(executed_fibs['props_pruned'])):
cell_fibs = executed_fibs['props_pruned'][i].perimeter
fibs_length.append(cell_fibs)
length = []
for j in np.arange(len(cell_fibs_neigh_index)):
pre_length = []
length.append(pre_length)
if len(cell_fibs_neigh_index[j]) > 0:
for i in cell_fibs_neigh_index[j]:
pre_length.append(fibs_length[i])
mean_result = []
for i in length:
result = np.mean(np.array(i))
mean_result.append(result)
return (mean_result)
def cell_fibs_neigh_angle (executed_fibs, cell_fibs_neigh_index):
###
#Function returns average angle of neighboring fibers
#arguments:
#- props_pruned: array with calculated features of labeled fibers
#- cell_fibs_neigh_index: array with indexes of neighboring fibers
#function returns:
#cell_fibs_neigh_angle
###
cell_fibs_neigh_angle = []
for j in np.arange(len(cell_fibs_neigh_index)):
cell_fibs_angle = []
cell_fibs_neigh_angle.append(cell_fibs_angle)
for i in cell_fibs_neigh_index[j]:
cell_fibs = np.rad2deg(np.arctan2((executed_fibs['props_pruned'][i].coords[np.lexsort(np.array(executed_fibs['props_pruned'][i].coords).T[:])[-1]][0] - executed_fibs['props_pruned'][i].coords[np.lexsort(np.array(executed_fibs['props_pruned'][i].coords).T[:])[0]][0]),
(executed_fibs['props_pruned'][i].coords[np.lexsort(np.array(executed_fibs['props_pruned'][i].coords).T[:])[-1]][1] - executed_fibs['props_pruned'][i].coords[np.lexsort(np.array(executed_fibs['props_pruned'][i].coords).T[:])[0]][1])))
cell_fibs_angle.append(cell_fibs)
mean_result = []
for i in cell_fibs_neigh_angle:
result = np.mean(np.array(i))
mean_result.append(result)
return (mean_result)
def cell_fibs_neigh_strightness (executed_fibs, cell_fibs_neigh_index):
###
#Function returns average straightness of neighboring fibers
#arguments:
#- props_pruned: array with calculated features of labeled fibers
#- cell_fibs_neigh_index: array with indexes of neighboring fibers
#function returns:
#cell_fibs_neigh_strightness
###
cell_fibs_neigh_strightness = []
for j in np.arange(len(cell_fibs_neigh_index)):
cell_fibs_strightness = []
cell_fibs_neigh_strightness.append(cell_fibs_strightness)
for i in cell_fibs_neigh_index[j]:
fibs_strightness = distance.euclidean(executed_fibs['props_pruned'][i].coords[np.lexsort(np.array(executed_fibs['props_pruned'][i].coords).T[:])[0]],
executed_fibs['props_pruned'][i].coords[np.lexsort(np.array(executed_fibs['props_pruned'][i].coords).T[:])[-1]]) / executed_fibs['props_pruned'][i].perimeter
cell_fibs_strightness.append(fibs_strightness)
mean_result = []
for i in cell_fibs_neigh_strightness:
result = np.mean(np.array(i))
mean_result.append(result)
return (mean_result)
def cell_fibs_neigh_thikness (executed_fibs, cell_fibs_neigh_index):
###
#Function returns average thickness of neighboring fibers
#arguments:
#- props_pruned: array with calculated features of labeled fibers
#- cell_fibs_neigh_index: array with indexes of neighboring fibers
#function returns:
#cell_fibs_neigh_thikness
###
thikness_matrix = []
for i in np.arange(len(executed_fibs['skeleton'])):
thikness = executed_fibs['skeleton'][i] * executed_fibs['distance'][i]
thikness_matrix.append(thikness)
labels_thikness_row = []
for j in np.arange(len(executed_fibs['props_pruned'])):
labels = []
labels_thikness_row.append(labels)
for i in np.arange(len(executed_fibs['props_pruned'][j].coords)):
labels.append(thikness_matrix[executed_fibs['props_pruned'][j].coords[i][0]][executed_fibs['props_pruned'][j].coords[i][1]])
labels_thikness = []
for i in np.arange(len(executed_fibs['props_pruned'])):
thikness = np.sum(labels_thikness_row[i]) / executed_fibs['props_pruned'][i].area
labels_thikness.append(thikness)
mean_thikness = []
for j in np.arange(len(cell_fibs_neigh_index)):
fibers = []
mean_thikness.append(fibers)
if len(cell_fibs_neigh_index[j]) > 0:
for i in cell_fibs_neigh_index[j]:
fibers.append(labels_thikness[i])
mean_result = []
for i in mean_thikness:
result = np.mean(np.array(i))
mean_result.append(result)
return (mean_result)
def cell_fibs_neigh_alignment(executed_fibs, cell_fibs_neigh_index):
###
#Function returns average alignment of neighboring fibers
#arguments:
#- props_pruned: array with calculated features of labeled fibers
#- cell_fibs_neigh_index: array with indexes of neighboring fibers
#function returns:
#cell_fibs_neigh_alignment
###
alignment_res = []
for i in np.arange(len(cell_fibs_neigh_index)):
fibs_cos = []
alignment_res.append(fibs_cos)
for j in cell_fibs_neigh_index[i]:
fibs_alignment_mean = []
fibs_cos.append(fibs_alignment_mean)
for k in cell_fibs_neigh_index[i]:
if j != k:
alignment_list = np.cos(abs(np.rad2deg(np.arctan2(
executed_fibs['props_pruned'][j].coords[np.lexsort(np.array(executed_fibs['props_pruned'][j].coords).T[:])[-1]][0] -
executed_fibs['props_pruned'][j].coords[np.lexsort(np.array(executed_fibs['props_pruned'][j].coords).T[:])[0]][0],
executed_fibs['props_pruned'][j].coords[np.lexsort(np.array(executed_fibs['props_pruned'][j].coords).T[:])[-1]][1] -
executed_fibs['props_pruned'][j].coords[np.lexsort(np.array(executed_fibs['props_pruned'][j].coords).T[:])[0]][1]))
- np.rad2deg(np.arctan2(
executed_fibs['props_pruned'][k].coords[np.lexsort(np.array(executed_fibs['props_pruned'][k].coords).T[:])[-1]][0] -
executed_fibs['props_pruned'][k].coords[np.lexsort(np.array(executed_fibs['props_pruned'][k].coords).T[:])[0]][0],
executed_fibs['props_pruned'][k].coords[np.lexsort(np.array(executed_fibs['props_pruned'][k].coords).T[:])[-1]][1] -
executed_fibs['props_pruned'][k].coords[np.lexsort(np.array(executed_fibs['props_pruned'][k].coords).T[:])[0]][1]))))
fibs_alignment_mean.append(abs(alignment_list))
else:
pass
mean_fibs_alignment = []
for i in alignment_res:
mean_lin = np.mean(i)
mean_fibs_alignment.append(mean_lin)
return (mean_fibs_alignment)
def cell_fibs_spatial (executed_fibs, cells_coords_list, radius):
###
#Function returns dataframe which contains information about neighboring fibers and their features
#arguments:
#- executed_fibs: dictionary executed with fiber_executer function that contains information about segmented fibers
#- cells_coords_list: list with cells coordinates (y,x)
#- radius: radius of neighborhood outline
#function returns:
# - pd.DataFrame with calculatedd spatial information of neighboring fibers
###
cell_fibs = cell_fibs_neigh (executed_fibs, cells_coords_list, radius)
cell_length = cell_fibs_neigh_length (executed_fibs, cell_fibs['fibs_neigh_index'])
cell_fibs_angle = cell_fibs_neigh_angle (executed_fibs, cell_fibs['fibs_neigh_index'])
cell_fibs_strightness = cell_fibs_neigh_strightness (executed_fibs, cell_fibs['fibs_neigh_index'])
cell_fibs_thikness = cell_fibs_neigh_thikness (executed_fibs, cell_fibs['fibs_neigh_index'])
cell_fibs_alignment = cell_fibs_neigh_alignment (executed_fibs, cell_fibs['fibs_neigh_index'])
return (pd.DataFrame({'fibs_neigh_num' : cell_fibs['fibs_neigh_num'],
'length' : cell_length,
'angle' : cell_fibs_angle,
'strightness' : cell_fibs_strightness,
'thickness' : cell_fibs_thikness,
'alignment' : cell_fibs_alignment}))
| 21,110 | 0 | 279 |
3badc8f6a19fe4797842b0da596e36fea30ea526 | 4,563 | py | Python | tile.py | Cocodidou/projetFusee | 9ce85fb3463b1039e4a2e414c055b680ba909fb8 | [
"Unlicense"
] | null | null | null | tile.py | Cocodidou/projetFusee | 9ce85fb3463b1039e4a2e414c055b680ba909fb8 | [
"Unlicense"
] | null | null | null | tile.py | Cocodidou/projetFusee | 9ce85fb3463b1039e4a2e414c055b680ba909fb8 | [
"Unlicense"
] | null | null | null | # has complex polygon as tile shape, and lots of moving objects
import turtle
import engine
import math
WIDTH = 640
HEIGHT = 480
S = 25 # base unit size for ship
B = 50 # base unit size for tiles
GRIDCOLS = 1 + math.ceil(WIDTH / B) + 1
GRIDROWS = 1 + math.ceil(HEIGHT / B) + 1
SPEED = 3
HEADINGSTEP = 5
heading = 180
deltax = None # set based on heading and SPEED
deltay = None # set based on heading and SPEED
# add last, after tiles, and don't make it a static object
# and it stays on top, even if it's not moving - it gets
# re-rendered each time step
#
# complication: this means that tile objects in tile grid
# need to be recycled so they stay early in the object list,
# or add_obj needs to be extended to allow insertion at the
# head of the object list
# tile is a compound shape and can have multiple colors
if __name__ == '__main__':
engine.init_screen(WIDTH, HEIGHT)
engine.init_engine(delay=0) # no delay needed with so many objects!
engine.set_keyboard_handler(input_cb)
recalcdeltas()
makeshipshape()
maketileshape()
maketilegrid()
engine.add_obj(Me()) # needs to be after tile grid created
engine.engine()
| 26.224138 | 72 | 0.674775 | # has complex polygon as tile shape, and lots of moving objects
import turtle
import engine
import math
WIDTH = 640
HEIGHT = 480
S = 25 # base unit size for ship
B = 50 # base unit size for tiles
GRIDCOLS = 1 + math.ceil(WIDTH / B) + 1
GRIDROWS = 1 + math.ceil(HEIGHT / B) + 1
SPEED = 3
HEADINGSTEP = 5
heading = 180
deltax = None # set based on heading and SPEED
deltay = None # set based on heading and SPEED
class Tile(engine.GameObject):
def __init__(self, x, y):
# color is already set for compound object, so 'blue' is ignored
super().__init__(x, y, deltax, deltay, 'tile', 'blue')
def heading(self):
return 90
def move(self):
self.x += deltax
self.y += deltay
def isoob(self):
# as part of an "infinite" tile grid, it never really
# goes out of bounds - simply shift existing tile to
# entering tile's position; this keeps engine from
# deleting the object and messing up the drawing order
leftedge = -WIDTH // 2
rightedge = WIDTH // 2
topedge = HEIGHT // 2
bottomedge = -HEIGHT // 2
if self.x <= leftedge - B:
self.x += B * GRIDCOLS
elif self.x >= rightedge:
self.x -= B * GRIDCOLS
if self.y >= topedge + B:
self.y -= B * GRIDROWS
elif self.y <= bottomedge:
self.y += B * GRIDROWS
return False
class Me(engine.GameObject):
# add last, after tiles, and don't make it a static object
# and it stays on top, even if it's not moving - it gets
# re-rendered each time step
#
# complication: this means that tile objects in tile grid
# need to be recycled so they stay early in the object list,
# or add_obj needs to be extended to allow insertion at the
# head of the object list
def __init__(self):
super().__init__(10, 10, 0, 0, 'spaceship', 'red')
def heading(self):
return heading - 90
def makeshipshape():
turtle.home() # return to known location & orientation
# cockpit is a trapezoid - figure out angles and length of side
adj = (S / 3 - S / 8) / 2
thetarad = math.atan2(S, adj)
theta = math.degrees(thetarad)
hyp = S / math.sin(thetarad)
turtle.begin_poly()
turtle.bk(S * 1/2 / 2) # origin = center of wing - move to back
turtle.lt(90) # left wing
turtle.fd(S / 2)
turtle.rt(30) # left rear turret
turtle.bk(S / 8)
turtle.fd(S / 8)
turtle.rt(60)
turtle.fd(S * 1/2)
turtle.rt(60) # left front turret
turtle.fd(S / 8)
turtle.bk(S / 8)
turtle.rt(30)
turtle.fd(S / 3) # join of wing and left side of cockpit
turtle.lt(theta) # left side of cockpit
turtle.fd(hyp)
turtle.rt(theta)
turtle.fd(S / 8) # front of cockpit
turtle.rt(theta) # right side of cockpit
turtle.fd(hyp)
turtle.lt(theta) # join of right side of cockpit and wing
turtle.fd(S / 3) # right wing
turtle.rt(30) # right front turret
turtle.bk(S / 8)
turtle.fd(S / 8)
turtle.rt(60)
turtle.fd(S * 1/2)
turtle.rt(60) # right rear turret
turtle.fd(S / 8)
turtle.bk(S / 8)
turtle.rt(30)
turtle.fd(S / 2)
turtle.end_poly()
poly = turtle.get_poly()
turtle.register_shape('spaceship', poly)
# tile is a compound shape and can have multiple colors
def maketileshape():
turtle.home() # return to known location & orientation
turtle.begin_poly() # square
for i in range(4):
turtle.fd(B)
turtle.rt(90)
turtle.end_poly()
poly1 = turtle.get_poly()
# don't put this inside begin_poly...end_poly or it draws an extra line
turtle.goto( (B / 2, -B * 2/3 - B/6) )
turtle.begin_poly() # circle, inside square
turtle.circle(B / 3)
turtle.end_poly()
poly2 = turtle.get_poly()
cs = turtle.Shape('compound')
# args are poly, fill color, line color
cs.addcomponent(poly1, 'gray', 'black')
cs.addcomponent(poly2, 'blue', 'gray42')
turtle.register_shape('tile', cs)
def maketilegrid():
for row in range(GRIDROWS):
for col in range(GRIDCOLS):
x = col * B - WIDTH//2
y = HEIGHT//2 - row * B
tile = Tile(x, y)
engine.add_obj(tile)
def recalcdeltas():
global deltax, deltay
deltay = SPEED * math.sin(math.radians(heading))
deltax = SPEED * math.cos(math.radians(heading))
def input_cb(key):
global heading
if key == 'q' or key == 'Q':
engine.exit_engine()
elif key == 'Left':
heading = (heading + HEADINGSTEP) % 360
elif key == 'Right':
heading = (heading - HEADINGSTEP) % 360
recalcdeltas()
if __name__ == '__main__':
engine.init_screen(WIDTH, HEIGHT)
engine.init_engine(delay=0) # no delay needed with so many objects!
engine.set_keyboard_handler(input_cb)
recalcdeltas()
makeshipshape()
maketileshape()
maketilegrid()
engine.add_obj(Me()) # needs to be after tile grid created
engine.engine()
| 3,085 | 16 | 299 |
d2bfbf45d94b686eb9237a693c5e11ec6c6ff936 | 225 | py | Python | setup.py | Pavantelugura/mlops_main | 611bcb21ef3eaa3d00e758fb2414e589d46f1396 | [
"MIT"
] | null | null | null | setup.py | Pavantelugura/mlops_main | 611bcb21ef3eaa3d00e758fb2414e589d46f1396 | [
"MIT"
] | null | null | null | setup.py | Pavantelugura/mlops_main | 611bcb21ef3eaa3d00e758fb2414e589d46f1396 | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='To predict the values of wafer sensor',
author='Pavantelugura',
license='MIT',
)
| 20.454545 | 56 | 0.675556 | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='To predict the values of wafer sensor',
author='Pavantelugura',
license='MIT',
)
| 0 | 0 | 0 |
4563ae23f41e867d23fff79cfb460419275a4032 | 11,436 | py | Python | wf_psf/utils.py | tobias-liaudat/wf-psf | 0ff1a12d06c46bd8599061d227785393fb528d76 | [
"MIT"
] | 7 | 2022-03-10T10:49:01.000Z | 2022-03-17T16:06:12.000Z | wf_psf/utils.py | tobias-liaudat/wf-psf | 0ff1a12d06c46bd8599061d227785393fb528d76 | [
"MIT"
] | null | null | null | wf_psf/utils.py | tobias-liaudat/wf-psf | 0ff1a12d06c46bd8599061d227785393fb528d76 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
import PIL
import zernike as zk
try:
from cv2 import resize, INTER_AREA
except:
print('Problem importing opencv..')
def generate_SED_elems(SED, sim_psf_toolkit, n_bins=20):
r"""Generate the SED elements needed for using the TF_poly_PSF.
sim_psf_toolkit: An instance of the SimPSFToolkit class with the correct
initialization values.
"""
feasible_wv, SED_norm = sim_psf_toolkit.calc_SED_wave_values(SED, n_bins)
feasible_N = np.array([sim_psf_toolkit.feasible_N(_wv) for _wv in feasible_wv])
return feasible_N, feasible_wv, SED_norm
def generate_packed_elems(SED, sim_psf_toolkit, n_bins=20):
r"""Generate the packed values for using the TF_poly_PSF."""
feasible_N, feasible_wv, SED_norm = generate_SED_elems(SED, sim_psf_toolkit, n_bins=n_bins)
tf_feasible_N = tf.convert_to_tensor(feasible_N, dtype=tf.float64)
tf_feasible_wv = tf.convert_to_tensor(feasible_wv, dtype=tf.float64)
tf_SED_norm = tf.convert_to_tensor(SED_norm, dtype=tf.float64)
# returnes the packed tensors
return [tf_feasible_N, tf_feasible_wv, tf_SED_norm]
def calc_poly_position_mat(pos, x_lims, y_lims, d_max):
r""" Calculate a matrix with position polynomials.
Scale positions to the square:
[self.x_lims[0], self.x_lims[1]] x [self.y_lims[0], self.y_lims[1]]
to the square [-1,1] x [-1,1]
"""
# Scale positions
scaled_pos_x = (pos[:, 0] - x_lims[0]) / (x_lims[1] - x_lims[0])
scaled_pos_x = (scaled_pos_x - 0.5) * 2
scaled_pos_y = (pos[:, 1] - y_lims[0]) / (y_lims[1] - y_lims[0])
scaled_pos_y = (scaled_pos_y - 0.5) * 2
poly_list = []
for d in range(d_max + 1):
row_idx = d * (d + 1) // 2
for p in range(d + 1):
poly_list.append(scaled_pos_x**(d - p) * scaled_pos_y**p)
return tf.convert_to_tensor(poly_list, dtype=tf.float32)
def decimate_im(input_im, decim_f):
r"""Decimate image.
Decimated by a factor of decim_f.
Based on the PIL library using the default interpolator.
Default: PIL.Image.BICUBIC.
"""
pil_im = PIL.Image.fromarray(input_im)
(width, height) = (pil_im.width // decim_f, pil_im.height // decim_f)
im_resized = pil_im.resize((width, height))
return np.array(im_resized)
def downsample_im(input_im, output_dim):
r"""Downsample image.
Based on opencv function resize.
[doc](https://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html#void%20resize(InputArray%20src,%20OutputArray%20dst,%20Size%20dsize,%20double%20fx,%20double%20fy,%20int%20interpolation))
The input image is downsampled to the dimensions specified in `output_dim`.
The downsampling method is based on the `INTER_AREA` method.
See [tensorflow_doc](https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/resize-area)
Each output pixel is computed by first transforming the pixel's footprint
into the input tensor and then averaging the pixels that intersect the
footprint. An input pixel's contribution to the average is weighted by the
fraction of its area that intersects the footprint.
This is the same as OpenCV's INTER_AREA.
An explanation of the INTER_AREA method can be found in the next
[link](https://medium.com/@wenrudong/what-is-opencvs-inter-area-actually-doing-282a626a09b3).
This version should be consistent with the tensorflow one.
Parameters
----------
input_im: np.ndarray (dim_x, dim_y)
input image
output_dim: int
Contains the dimension of the square output image.
"""
return resize(input_im, (output_dim, output_dim), interpolation=INTER_AREA)
def zernike_generator(n_zernikes, wfe_dim):
r"""
Generate Zernike maps.
Based on the zernike github repository.
https://github.com/jacopoantonello/zernike
Parameters
----------
n_zernikes: int
Number of Zernike modes desired.
wfe_dim: int
Dimension of the Zernike map [wfe_dim x wfe_dim].
Returns
-------
zernikes: list of np.ndarray
List containing the Zernike modes.
The values outside the unit circle are filled with NaNs.
"""
# Calculate which n (from the (n,m) Zernike convention) we need
# so that we have the desired total number of Zernike coefficients
min_n = (-3 + np.sqrt(1 + 8 * n_zernikes)) / 2
n = int(np.ceil(min_n))
# Initialize the zernike generator
cart = zk.RZern(n)
# Create a [-1,1] mesh
ddx = np.linspace(-1.0, 1.0, wfe_dim)
ddy = np.linspace(-1.0, 1.0, wfe_dim)
xv, yv = np.meshgrid(ddx, ddy)
cart.make_cart_grid(xv, yv)
c = np.zeros(cart.nk)
zernikes = []
# Extract each Zernike map one by one
for i in range(n_zernikes):
c *= 0.0
c[i] = 1.0
zernikes.append(cart.eval_grid(c, matrix=True))
return zernikes
def add_noise(image, desired_SNR):
""" Add noise to an image to obtain a desired SNR. """
sigma_noise = np.sqrt((np.sum(image**2)) / (desired_SNR * image.shape[0] * image.shape[1]))
noisy_image = image + np.random.standard_normal(image.shape) * sigma_noise
return noisy_image
class NoiseEstimator(object):
""" Noise estimator.
Parameters
----------
img_dim: tuple of int
Image size
win_rad: int
window radius in pixels
"""
@staticmethod
def sigma_mad(x):
r"""Compute an estimation of the standard deviation
of a Gaussian distribution using the robust
MAD (Median Absolute Deviation) estimator."""
return 1.4826 * np.median(np.abs(x - np.median(x)))
def estimate_noise(self, image):
r"""Estimate the noise level of the image."""
# Calculate noise std dev
return self.sigma_mad(image[self.window])
class ZernikeInterpolation(object):
""" Interpolate zernikes
This class helps to interpolate zernikes using only the closest K elements
in a given dataset using a RBF interpolation.
Parameters
----------
tf_pos: Tensor (n_sources, 2)
Positions
tf_zks: Tensor (n_sources, n_zernikes)
Zernike coefficients for each position
k: int
Number of elements to use for the interpolation.
Default is 50
order: int
Order of the RBF interpolation.
Default is 2, corresponds to thin plate interp (r^2*log(r))
"""
def interpolate_zk(self, single_pos):
""" Interpolate a single position
"""
# Compute distance
dist = tf.math.reduce_euclidean_norm(self.tf_pos - single_pos, axis=1) * -1.
# Get top K elements
result = tf.math.top_k(dist, k=self.k)
# Gather useful elements from the array
rec_pos = tf.gather(
self.tf_pos, result.indices, validate_indices=None, axis=0, batch_dims=0,
)
rec_zks = tf.gather(
self.tf_zks, result.indices, validate_indices=None, axis=0, batch_dims=0,
)
# Interpolate
interp_zk = tfa.image.interpolate_spline(
train_points=tf.expand_dims(rec_pos, axis=0),
train_values=tf.expand_dims(rec_zks, axis=0),
query_points=tf.expand_dims(single_pos[tf.newaxis,:], axis=0),
order=self.order,
regularization_weight=0.0
)
# Remove extra dimension required by tfa's interpolate_spline
interp_zk = tf.squeeze(interp_zk, axis=0)
return interp_zk
def interpolate_zks(self, interp_positions):
""" Vectorize to interpolate to each position
"""
interp_zks = tf.map_fn(
self.interpolate_zk,
interp_positions,
parallel_iterations=10,
fn_output_signature=tf.float32,
swap_memory=True
)
return tf.squeeze(interp_zks, axis=1)
class IndependentZernikeInterpolation(object):
""" Interpolate each Zernike polynomial independently
The interpolation is done independently for each Zernike polynomial.
Parameters
----------
tf_pos: Tensor (n_sources, 2)
Positions
tf_zks: Tensor (n_sources, n_zernikes)
Zernike coefficients for each position
order: int
Order of the RBF interpolation.
Default is 2, corresponds to thin plate interp (r^2*log(r))
"""
def interp_one_zk(self, zk_prior):
""" Interpolate each Zerkine polynomial independently
"""
interp_zk = tfa.image.interpolate_spline(
train_points=tf.expand_dims(self.tf_pos, axis=0),
train_values=tf.expand_dims(zk_prior[:,tf.newaxis], axis=0),
query_points=tf.expand_dims(self.target_pos, axis=0),
order=self.order,
regularization_weight=0.0
)
# Remove extra dimension required by tfa's interpolate_spline
return tf.squeeze(interp_zk, axis=0)
def interpolate_zks(self, target_pos):
""" Vectorize to interpolate to each Zernike!
Each zernike is computed indepently from the others.
"""
self.target_pos = target_pos
interp_zks = tf.map_fn(
self.interp_one_zk,
tf.transpose(self.tf_zks, perm=[1,0]),
parallel_iterations=10,
fn_output_signature=tf.float32,
swap_memory=True
)
# Remove null dimension and transpose back to have batch at input
return tf.transpose(tf.squeeze(interp_zks, axis=2), perm=[1,0])
| 32.862069 | 211 | 0.649091 | import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
import PIL
import zernike as zk
try:
from cv2 import resize, INTER_AREA
except:
print('Problem importing opencv..')
def scale_to_range(input_array, old_range, new_range):
# Scale to [0,1]
input_array = (input_array - old_range[0]) / (old_range[1] - old_range[0])
# Scale to new_range
input_array = input_array * (new_range[1] - new_range[0]) + new_range[0]
return input_array
def calc_wfe(zernike_basis, zks):
wfe = np.einsum('ijk,ijk->jk', zernike_basis, zks.reshape(-1, 1, 1))
return wfe
def calc_wfe_rms(zernike_basis, zks, pupil_mask):
wfe = calc_wfe(zernike_basis, zks)
wfe_rms = np.sqrt(np.mean((wfe[pupil_mask] - np.mean(wfe[pupil_mask]))**2))
return wfe_rms
def generate_SED_elems(SED, sim_psf_toolkit, n_bins=20):
r"""Generate the SED elements needed for using the TF_poly_PSF.
sim_psf_toolkit: An instance of the SimPSFToolkit class with the correct
initialization values.
"""
feasible_wv, SED_norm = sim_psf_toolkit.calc_SED_wave_values(SED, n_bins)
feasible_N = np.array([sim_psf_toolkit.feasible_N(_wv) for _wv in feasible_wv])
return feasible_N, feasible_wv, SED_norm
def generate_packed_elems(SED, sim_psf_toolkit, n_bins=20):
r"""Generate the packed values for using the TF_poly_PSF."""
feasible_N, feasible_wv, SED_norm = generate_SED_elems(SED, sim_psf_toolkit, n_bins=n_bins)
tf_feasible_N = tf.convert_to_tensor(feasible_N, dtype=tf.float64)
tf_feasible_wv = tf.convert_to_tensor(feasible_wv, dtype=tf.float64)
tf_SED_norm = tf.convert_to_tensor(SED_norm, dtype=tf.float64)
# returnes the packed tensors
return [tf_feasible_N, tf_feasible_wv, tf_SED_norm]
def calc_poly_position_mat(pos, x_lims, y_lims, d_max):
r""" Calculate a matrix with position polynomials.
Scale positions to the square:
[self.x_lims[0], self.x_lims[1]] x [self.y_lims[0], self.y_lims[1]]
to the square [-1,1] x [-1,1]
"""
# Scale positions
scaled_pos_x = (pos[:, 0] - x_lims[0]) / (x_lims[1] - x_lims[0])
scaled_pos_x = (scaled_pos_x - 0.5) * 2
scaled_pos_y = (pos[:, 1] - y_lims[0]) / (y_lims[1] - y_lims[0])
scaled_pos_y = (scaled_pos_y - 0.5) * 2
poly_list = []
for d in range(d_max + 1):
row_idx = d * (d + 1) // 2
for p in range(d + 1):
poly_list.append(scaled_pos_x**(d - p) * scaled_pos_y**p)
return tf.convert_to_tensor(poly_list, dtype=tf.float32)
def decimate_im(input_im, decim_f):
r"""Decimate image.
Decimated by a factor of decim_f.
Based on the PIL library using the default interpolator.
Default: PIL.Image.BICUBIC.
"""
pil_im = PIL.Image.fromarray(input_im)
(width, height) = (pil_im.width // decim_f, pil_im.height // decim_f)
im_resized = pil_im.resize((width, height))
return np.array(im_resized)
def downsample_im(input_im, output_dim):
r"""Downsample image.
Based on opencv function resize.
[doc](https://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html#void%20resize(InputArray%20src,%20OutputArray%20dst,%20Size%20dsize,%20double%20fx,%20double%20fy,%20int%20interpolation))
The input image is downsampled to the dimensions specified in `output_dim`.
The downsampling method is based on the `INTER_AREA` method.
See [tensorflow_doc](https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/resize-area)
Each output pixel is computed by first transforming the pixel's footprint
into the input tensor and then averaging the pixels that intersect the
footprint. An input pixel's contribution to the average is weighted by the
fraction of its area that intersects the footprint.
This is the same as OpenCV's INTER_AREA.
An explanation of the INTER_AREA method can be found in the next
[link](https://medium.com/@wenrudong/what-is-opencvs-inter-area-actually-doing-282a626a09b3).
This version should be consistent with the tensorflow one.
Parameters
----------
input_im: np.ndarray (dim_x, dim_y)
input image
output_dim: int
Contains the dimension of the square output image.
"""
return resize(input_im, (output_dim, output_dim), interpolation=INTER_AREA)
def zernike_generator(n_zernikes, wfe_dim):
r"""
Generate Zernike maps.
Based on the zernike github repository.
https://github.com/jacopoantonello/zernike
Parameters
----------
n_zernikes: int
Number of Zernike modes desired.
wfe_dim: int
Dimension of the Zernike map [wfe_dim x wfe_dim].
Returns
-------
zernikes: list of np.ndarray
List containing the Zernike modes.
The values outside the unit circle are filled with NaNs.
"""
# Calculate which n (from the (n,m) Zernike convention) we need
# so that we have the desired total number of Zernike coefficients
min_n = (-3 + np.sqrt(1 + 8 * n_zernikes)) / 2
n = int(np.ceil(min_n))
# Initialize the zernike generator
cart = zk.RZern(n)
# Create a [-1,1] mesh
ddx = np.linspace(-1.0, 1.0, wfe_dim)
ddy = np.linspace(-1.0, 1.0, wfe_dim)
xv, yv = np.meshgrid(ddx, ddy)
cart.make_cart_grid(xv, yv)
c = np.zeros(cart.nk)
zernikes = []
# Extract each Zernike map one by one
for i in range(n_zernikes):
c *= 0.0
c[i] = 1.0
zernikes.append(cart.eval_grid(c, matrix=True))
return zernikes
def add_noise(image, desired_SNR):
""" Add noise to an image to obtain a desired SNR. """
sigma_noise = np.sqrt((np.sum(image**2)) / (desired_SNR * image.shape[0] * image.shape[1]))
noisy_image = image + np.random.standard_normal(image.shape) * sigma_noise
return noisy_image
class NoiseEstimator(object):
""" Noise estimator.
Parameters
----------
img_dim: tuple of int
Image size
win_rad: int
window radius in pixels
"""
def __init__(self, img_dim, win_rad):
self.img_dim = img_dim
self.win_rad = win_rad
self.window = None
self._init_window()
def _init_window(self):
# Calculate window function for estimating the noise
# We couldn't use Galsim to estimate the moments, so we chose to work
# with the real center of the image (25.5,25.5)
# instead of using the real centroid. Also, we use 13 instead of
# 5 * obs_sigma, so that we are sure to cut all the flux from the star
self.window = np.ones(self.img_dim, dtype=bool)
mid_x = self.img_dim[0] / 2
mid_y = self.img_dim[1] / 2
for _x in range(self.img_dim[0]):
for _y in range(self.img_dim[1]):
if np.sqrt((_x - mid_x)**2 + (_y - mid_y)**2) <= self.win_rad:
self.window[_x, _y] = False
@staticmethod
def sigma_mad(x):
r"""Compute an estimation of the standard deviation
of a Gaussian distribution using the robust
MAD (Median Absolute Deviation) estimator."""
return 1.4826 * np.median(np.abs(x - np.median(x)))
def estimate_noise(self, image):
r"""Estimate the noise level of the image."""
# Calculate noise std dev
return self.sigma_mad(image[self.window])
class ZernikeInterpolation(object):
""" Interpolate zernikes
This class helps to interpolate zernikes using only the closest K elements
in a given dataset using a RBF interpolation.
Parameters
----------
tf_pos: Tensor (n_sources, 2)
Positions
tf_zks: Tensor (n_sources, n_zernikes)
Zernike coefficients for each position
k: int
Number of elements to use for the interpolation.
Default is 50
order: int
Order of the RBF interpolation.
Default is 2, corresponds to thin plate interp (r^2*log(r))
"""
def __init__(self, tf_pos, tf_zks, k=50, order=2):
self.tf_pos = tf_pos
self.tf_zks = tf_zks
self.k = k
self.order = order
def interpolate_zk(self, single_pos):
""" Interpolate a single position
"""
# Compute distance
dist = tf.math.reduce_euclidean_norm(self.tf_pos - single_pos, axis=1) * -1.
# Get top K elements
result = tf.math.top_k(dist, k=self.k)
# Gather useful elements from the array
rec_pos = tf.gather(
self.tf_pos, result.indices, validate_indices=None, axis=0, batch_dims=0,
)
rec_zks = tf.gather(
self.tf_zks, result.indices, validate_indices=None, axis=0, batch_dims=0,
)
# Interpolate
interp_zk = tfa.image.interpolate_spline(
train_points=tf.expand_dims(rec_pos, axis=0),
train_values=tf.expand_dims(rec_zks, axis=0),
query_points=tf.expand_dims(single_pos[tf.newaxis,:], axis=0),
order=self.order,
regularization_weight=0.0
)
# Remove extra dimension required by tfa's interpolate_spline
interp_zk = tf.squeeze(interp_zk, axis=0)
return interp_zk
def interpolate_zks(self, interp_positions):
""" Vectorize to interpolate to each position
"""
interp_zks = tf.map_fn(
self.interpolate_zk,
interp_positions,
parallel_iterations=10,
fn_output_signature=tf.float32,
swap_memory=True
)
return tf.squeeze(interp_zks, axis=1)
class IndependentZernikeInterpolation(object):
""" Interpolate each Zernike polynomial independently
The interpolation is done independently for each Zernike polynomial.
Parameters
----------
tf_pos: Tensor (n_sources, 2)
Positions
tf_zks: Tensor (n_sources, n_zernikes)
Zernike coefficients for each position
order: int
Order of the RBF interpolation.
Default is 2, corresponds to thin plate interp (r^2*log(r))
"""
def __init__(self, tf_pos, tf_zks, order=2):
self.tf_pos = tf_pos
self.tf_zks = tf_zks
self.order = order
self.target_pos = None
def interp_one_zk(self, zk_prior):
""" Interpolate each Zerkine polynomial independently
"""
interp_zk = tfa.image.interpolate_spline(
train_points=tf.expand_dims(self.tf_pos, axis=0),
train_values=tf.expand_dims(zk_prior[:,tf.newaxis], axis=0),
query_points=tf.expand_dims(self.target_pos, axis=0),
order=self.order,
regularization_weight=0.0
)
# Remove extra dimension required by tfa's interpolate_spline
return tf.squeeze(interp_zk, axis=0)
def interpolate_zks(self, target_pos):
""" Vectorize to interpolate to each Zernike!
Each zernike is computed indepently from the others.
"""
self.target_pos = target_pos
interp_zks = tf.map_fn(
self.interp_one_zk,
tf.transpose(self.tf_zks, perm=[1,0]),
parallel_iterations=10,
fn_output_signature=tf.float32,
swap_memory=True
)
# Remove null dimension and transpose back to have batch at input
return tf.transpose(tf.squeeze(interp_zks, axis=2), perm=[1,0])
| 1,633 | 0 | 185 |
7d0a3fd29bde06f254172fba68a5ca944366249f | 623 | py | Python | src/pyfel/main/constants.py | bellaz89/pyFEL | d96bc50096d32dac4ba957f0fc022bc377232680 | [
"MIT"
] | 1 | 2021-02-24T04:31:36.000Z | 2021-02-24T04:31:36.000Z | src/pyfel/main/constants.py | bellaz89/pyFEL | d96bc50096d32dac4ba957f0fc022bc377232680 | [
"MIT"
] | null | null | null | src/pyfel/main/constants.py | bellaz89/pyFEL | d96bc50096d32dac4ba957f0fc022bc377232680 | [
"MIT"
] | null | null | null | '''
Physical constants
'''
from scipy.constants import c, e, epsilon_0, physical_constants
C = c
ELEM_CHARGE = e
VACUUM_IMPEDANCE = 1.0/(c*epsilon_0)
ELECTRON_MASS_EV = physical_constants['electron mass energy equivalent in MeV'][0]*1e6
if __name__ == "__main__":
print("Speed of light (m/s):", C)
print("Elementary charge (C):", ELEM_CHARGE)
print("Speed of light x elementary charge (C*m/s):", C*ELEM_CHARGE)
print("Vacuum impedance (ohm):", VACUUM_IMPEDANCE)
print("Electron mass (eV):", ELECTRON_MASS_EV)
| 34.611111 | 86 | 0.605136 | '''
Physical constants
'''
from scipy.constants import c, e, epsilon_0, physical_constants
C = c
ELEM_CHARGE = e
VACUUM_IMPEDANCE = 1.0/(c*epsilon_0)
ELECTRON_MASS_EV = physical_constants['electron mass energy equivalent in MeV'][0]*1e6
if __name__ == "__main__":
print("Speed of light (m/s):", C)
print("Elementary charge (C):", ELEM_CHARGE)
print("Speed of light x elementary charge (C*m/s):", C*ELEM_CHARGE)
print("Vacuum impedance (ohm):", VACUUM_IMPEDANCE)
print("Electron mass (eV):", ELECTRON_MASS_EV)
| 0 | 0 | 0 |
de3439a6e6098b977a450500cb7f406a47950bae | 1,950 | py | Python | app/main_restplus.py | hazxone/face_recognition | e015288b49941b4c81600c76464673e946245a94 | [
"MIT"
] | null | null | null | app/main_restplus.py | hazxone/face_recognition | e015288b49941b4c81600c76464673e946245a94 | [
"MIT"
] | null | null | null | app/main_restplus.py | hazxone/face_recognition | e015288b49941b4c81600c76464673e946245a94 | [
"MIT"
] | null | null | null | '''
# GET /companies/
- list all companies
# POST /companies/
- Create new company
# DELETE /companies/{company_id}
- Delete a company by company_id
# GET /companies/{company_id}
- Get list of ic in company
# GET /companies/{company_id}/ic/
- Get list of ic in company
# POST /companies/{company_id}/ic/{ic_number}/
- Add new ic to company
# DELETE /companies/{company_id}/ic/{ic_number}/
- Delete ic in company_id
'''
from flask import Flask
from flask_restplus import Api, Resource, fields
from web_utils import *
app = Flask(__name__)
api = Api(app)
company_model = api.model('List of Companies', {'company_id' : fields.Integer('Company ID')})
ic_list = api.model('IC List', {
'ic': fields.String('IC number')
})
company_ic_model = api.model('List of ICs', {
'company_id': fields.Integer(required=True, description='Company ID'),
'ic_list': fields.Nested(ic_list, description='List of IC in the company')
})
@api.route('/companies')
'''
def post(self):
'''
@api.route('/companies/<int:company_id>')
'''
@api.route('/companies/<int:company_id>/ic')
class IC(Resource):
def get(self):
@api.route('/companies/<int:company_id>/ic/<int:ic_number')
class ListIC(Resource):
def get(self):
'''
if __name__ == '__main__':
app.run(debug=True)
| 27.464789 | 93 | 0.671795 | '''
# GET /companies/
- list all companies
# POST /companies/
- Create new company
# DELETE /companies/{company_id}
- Delete a company by company_id
# GET /companies/{company_id}
- Get list of ic in company
# GET /companies/{company_id}/ic/
- Get list of ic in company
# POST /companies/{company_id}/ic/{ic_number}/
- Add new ic to company
# DELETE /companies/{company_id}/ic/{ic_number}/
- Delete ic in company_id
'''
from flask import Flask
from flask_restplus import Api, Resource, fields
from web_utils import *
app = Flask(__name__)
api = Api(app)
company_model = api.model('List of Companies', {'company_id' : fields.Integer('Company ID')})
ic_list = api.model('IC List', {
'ic': fields.String('IC number')
})
company_ic_model = api.model('List of ICs', {
'company_id': fields.Integer(required=True, description='Company ID'),
'ic_list': fields.Nested(ic_list, description='List of IC in the company')
})
@api.route('/companies')
class Companies(Resource):
@api.marshal_with(company_model, envelope='list_of_companies', mask=None)
def get(self):
c_list, _ = get_list_companies()
return [{'company_id': int(c)} for c in c_list], 200
# return c_list, 200
'''
def post(self):
'''
@api.route('/companies/<int:company_id>')
class ListCompanies(Resource):
# def delete(self):
@api.marshal_list_with(company_ic_model, mask=None)
def get(self, company_id):
c_list, base_url = get_list_companies()
ic_list = os.listdir(os.path.join(base_url, str(company_id)))
if str(company_id) in c_list:
return {'company_id':company_id, 'ic_list':[{'ic': ic } for ic in ic_list]}, 200
'''
@api.route('/companies/<int:company_id>/ic')
class IC(Resource):
def get(self):
@api.route('/companies/<int:company_id>/ic/<int:ic_number')
class ListIC(Resource):
def get(self):
'''
if __name__ == '__main__':
app.run(debug=True)
| 379 | 224 | 44 |
f8e4d9b8b5905b68e4ef183cf0bab9664f046fce | 6,318 | py | Python | src/deepke/attribution_extraction/standard/tools/preprocess.py | hphphp123321/DeepKE | 94b39a20db0d848ccea81ea56fef4587ac31e2bc | [
"MIT"
] | 676 | 2018-08-07T03:01:32.000Z | 2021-07-31T02:08:06.000Z | src/deepke/attribution_extraction/standard/tools/preprocess.py | 807953261/DeepKE | f7efd3fc87d3bf88783a41efc3c09dca7a986013 | [
"MIT"
] | 66 | 2019-06-09T12:14:31.000Z | 2021-07-27T05:54:35.000Z | src/deepke/attribution_extraction/standard/tools/preprocess.py | 807953261/DeepKE | f7efd3fc87d3bf88783a41efc3c09dca7a986013 | [
"MIT"
] | 183 | 2018-09-07T06:57:13.000Z | 2021-08-01T08:50:15.000Z | import os
import logging
from collections import OrderedDict
from typing import List, Dict
from transformers import BertTokenizer
from .serializer import Serializer
from .vocab import Vocab
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
from utils import save_pkl, load_csv
logger = logging.getLogger(__name__)
__all__ = [
"_handle_pos_limit",
"_add_pos_seq",
"_convert_tokens_into_index",
"_serialize_sentence",
"_lm_serialize",
"_add_attribute_data",
"_handle_attribute_data",
"preprocess"
]
| 39.735849 | 120 | 0.652105 | import os
import logging
from collections import OrderedDict
from typing import List, Dict
from transformers import BertTokenizer
from .serializer import Serializer
from .vocab import Vocab
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
from utils import save_pkl, load_csv
logger = logging.getLogger(__name__)
__all__ = [
"_handle_pos_limit",
"_add_pos_seq",
"_convert_tokens_into_index",
"_serialize_sentence",
"_lm_serialize",
"_add_attribute_data",
"_handle_attribute_data",
"preprocess"
]
def _handle_pos_limit(pos: List[int], limit: int) -> List[int]:
for i,p in enumerate(pos):
if p > limit:
pos[i] = limit
if p < -limit:
pos[i] = -limit
return [p + limit + 1 for p in pos]
def _add_pos_seq(train_data: List[Dict], cfg):
for d in train_data:
entities_idx = [d['entity_index'],d['attribute_value_index']
] if d['entity_index'] < d['attribute_value_index'] else [d['entity_index'], d['attribute_value_index']]
d['entity_pos'] = list(map(lambda i: i - d['entity_index'], list(range(d['seq_len']))))
d['entity_pos'] = _handle_pos_limit(d['entity_pos'],int(cfg.pos_limit))
d['attribute_value_pos'] = list(map(lambda i: i - d['attribute_value_index'], list(range(d['seq_len']))))
d['attribute_value_pos'] = _handle_pos_limit(d['attribute_value_pos'],int(cfg.pos_limit))
if cfg.model_name == 'cnn':
if cfg.use_pcnn:
d['entities_pos'] = [1] * (entities_idx[0] + 1) + [2] * (entities_idx[1] - entities_idx[0] - 1) +\
[3] * (d['seq_len'] - entities_idx[1])
def _convert_tokens_into_index(data: List[Dict], vocab):
unk_str = '[UNK]'
unk_idx = vocab.word2idx[unk_str]
for d in data:
d['token2idx'] = [vocab.word2idx.get(i, unk_idx) for i in d['tokens']]
d['seq_len'] = len(d['token2idx'])
def _serialize_sentence(data: List[Dict], serial, cfg):
for d in data:
sent = d['sentence'].strip()
snet = sent.replace(d['entity'] , ' entity ' , 1).replace(d['attribute_value'] , ' attribute_value ' , 1)
d['tokens'] = serial(sent, never_split=['entity','attribute_value'])
entity_index, attribute_value_index = d['entity_offset'] , d['attribute_value_offset']
d['entity_index'],d['attribute_value_index'] = int(entity_index) , int(attribute_value_index)
def _lm_serialize(data: List[Dict], cfg):
logger.info('use bert tokenizer...')
tokenizer = BertTokenizer.from_pretrained(cfg.lm_file)
for d in data:
sent = d['sentence'].strip()
sent += '[SEP]' + d['entity'] + '[SEP]' + d['attribute_value']
d['token2idx'] = tokenizer.encode(sent, add_special_tokens=True)
d['seq_len'] = len(d['token2idx'])
def _add_attribute_data(atts: Dict, data: List) -> None:
for d in data:
d['att2idx'] = atts[d['attribute']]['index']
def _handle_attribute_data(attribute_data: List[Dict]) -> Dict:
atts = OrderedDict()
attribute_data = sorted(attribute_data, key=lambda i: int(i['index']))
for d in attribute_data:
atts[d['attribute']] = {
'index': int(d['index'])
}
return atts
def preprocess(cfg):
logger.info('===== start preprocess data =====')
train_fp = os.path.join(cfg.cwd, cfg.data_path, 'train.csv')
valid_fp = os.path.join(cfg.cwd, cfg.data_path, 'valid.csv')
test_fp = os.path.join(cfg.cwd, cfg.data_path, 'test.csv')
attribute_fp = os.path.join(cfg.cwd, cfg.data_path, 'attribute.csv')
logger.info('load raw files...')
train_data = load_csv(train_fp)
valid_data = load_csv(valid_fp)
test_data = load_csv(test_fp)
attribute_data = load_csv(attribute_fp)
logger.info('convert attribution into index...')
atts = _handle_attribute_data(attribute_data)
_add_attribute_data(atts,train_data)
_add_attribute_data(atts,test_data)
_add_attribute_data(atts,valid_data)
logger.info('verify whether use pretrained language models...')
if cfg.model_name == 'lm':
logger.info('use pretrained language models serialize sentence...')
_lm_serialize(train_data, cfg)
_lm_serialize(valid_data, cfg)
_lm_serialize(test_data, cfg)
else:
logger.info('serialize sentence into tokens...')
serializer = Serializer(do_chinese_split=cfg.chinese_split, do_lower_case=True)
serial = serializer.serialize
_serialize_sentence(train_data, serial, cfg)
_serialize_sentence(valid_data, serial, cfg)
_serialize_sentence(test_data, serial, cfg)
logger.info('build vocabulary...')
vocab = Vocab('word')
train_tokens = [d['tokens'] for d in train_data]
valid_tokens = [d['tokens'] for d in valid_data]
test_tokens = [d['tokens'] for d in test_data]
sent_tokens = [*train_tokens, *valid_tokens, *test_tokens]
for sent in sent_tokens:
vocab.add_words(sent)
vocab.trim(min_freq=cfg.min_freq)
_convert_tokens_into_index(train_data, vocab)
_convert_tokens_into_index(valid_data, vocab)
_convert_tokens_into_index(test_data, vocab)
logger.info('build position sequence...')
_add_pos_seq(train_data, cfg)
_add_pos_seq(valid_data, cfg)
_add_pos_seq(test_data, cfg)
logger.info('save data for backup...')
os.makedirs(os.path.join(cfg.cwd, cfg.out_path), exist_ok=True)
train_save_fp = os.path.join(cfg.cwd, cfg.out_path, 'train.pkl')
valid_save_fp = os.path.join(cfg.cwd, cfg.out_path, 'valid.pkl')
test_save_fp = os.path.join(cfg.cwd, cfg.out_path, 'test.pkl')
save_pkl(train_data, train_save_fp)
save_pkl(valid_data, valid_save_fp)
save_pkl(test_data, test_save_fp)
if cfg.model_name != 'lm':
vocab_save_fp = os.path.join(cfg.cwd, cfg.out_path, 'vocab.pkl')
vocab_txt = os.path.join(cfg.cwd, cfg.out_path, 'vocab.txt')
save_pkl(vocab, vocab_save_fp)
logger.info('save vocab in txt file, for watching...')
with open(vocab_txt, 'w', encoding='utf-8') as f:
f.write(os.linesep.join(vocab.word2idx.keys()))
logger.info('===== end preprocess data =====')
| 5,553 | 0 | 190 |
5edc2c6ddb5711b5e740c0c20e61715da9531459 | 2,055 | py | Python | analysis/src/python/data_analysis/utils/df_utils.py | eartser/hyperstyle-analyze | 58e2d361662e73e1e047919f57ab840055783b7a | [
"Apache-2.0"
] | 1 | 2022-03-15T09:46:06.000Z | 2022-03-15T09:46:06.000Z | analysis/src/python/data_analysis/utils/df_utils.py | eartser/hyperstyle-analyze | 58e2d361662e73e1e047919f57ab840055783b7a | [
"Apache-2.0"
] | 1 | 2022-02-14T13:53:38.000Z | 2022-02-14T13:53:38.000Z | analysis/src/python/data_analysis/utils/df_utils.py | eartser/hyperstyle-analyze | 58e2d361662e73e1e047919f57ab840055783b7a | [
"Apache-2.0"
] | 3 | 2022-02-13T16:49:53.000Z | 2022-02-17T13:53:07.000Z | from typing import Callable, Dict, List
import pandas as pd
from pandarallel import pandarallel
def _apply_to_row(row: pd.Series, column: str, func: Callable) -> pd.Series:
""" Apply `func` to data in `column` of dataframe's `raw`. """
copy_row = row.copy()
copy_row[column] = func(copy_row[column])
return copy_row
def apply(df: pd.DataFrame, column: str, func: Callable) -> pd.DataFrame:
""" Apply `func` to data in `column` of dataframe `df`. """
return df.apply(lambda row: _apply_to_row(row, column, func), axis=1)
def parallel_apply(df: pd.DataFrame, column: str, func: Callable) -> pd.DataFrame:
""" Parallel apply `func` to data in `column` of dataframe `df`. """
pandarallel.initialize(nb_workers=4, progress_bar=True)
return df.parallel_apply(lambda raw: _apply_to_row(raw, column, func), axis=1)
def rename_columns(df: pd.DataFrame, columns: Dict[str, str]) -> pd.DataFrame:
""" Rename columns of given dataframe `df`. """
return df.rename(columns=columns)
def drop_columns(df: pd.DataFrame, columns: List[str]) -> pd.DataFrame:
""" Drop columns from given dataframe `df`. """
return df.drop(labels=columns, axis=1)
def merge_dfs(df_left: pd.DataFrame, df_right: pd.DataFrame, left_on: str, right_on: str, how='inner') -> pd.DataFrame:
""" Merge two given dataframes on `left_on` = `right_on`. Duplicated columns are removed. """
df_merged = pd.merge(df_left, df_right, how=how, left_on=left_on, right_on=right_on, suffixes=('', '_extra'))
df_merged.drop(df_merged.filter(regex='_extra$').columns.tolist(), axis=1, inplace=True)
return df_merged
def read_df(path: str) -> pd.DataFrame:
""" Read dataframe from given .csv file. """
return pd.read_csv(path)
def write_df(df: pd.DataFrame, path: str):
""" Write dataframe to given .csv file. """
df.to_csv(path, index=False)
def append_df(df: pd.DataFrame, path: str):
""" Append data to dataframe by given .csv file. """
df.to_csv(path, index=False, mode='a', header=False)
| 32.109375 | 119 | 0.684672 | from typing import Callable, Dict, List
import pandas as pd
from pandarallel import pandarallel
def _apply_to_row(row: pd.Series, column: str, func: Callable) -> pd.Series:
""" Apply `func` to data in `column` of dataframe's `raw`. """
copy_row = row.copy()
copy_row[column] = func(copy_row[column])
return copy_row
def apply(df: pd.DataFrame, column: str, func: Callable) -> pd.DataFrame:
""" Apply `func` to data in `column` of dataframe `df`. """
return df.apply(lambda row: _apply_to_row(row, column, func), axis=1)
def parallel_apply(df: pd.DataFrame, column: str, func: Callable) -> pd.DataFrame:
""" Parallel apply `func` to data in `column` of dataframe `df`. """
pandarallel.initialize(nb_workers=4, progress_bar=True)
return df.parallel_apply(lambda raw: _apply_to_row(raw, column, func), axis=1)
def rename_columns(df: pd.DataFrame, columns: Dict[str, str]) -> pd.DataFrame:
""" Rename columns of given dataframe `df`. """
return df.rename(columns=columns)
def drop_columns(df: pd.DataFrame, columns: List[str]) -> pd.DataFrame:
""" Drop columns from given dataframe `df`. """
return df.drop(labels=columns, axis=1)
def merge_dfs(df_left: pd.DataFrame, df_right: pd.DataFrame, left_on: str, right_on: str, how='inner') -> pd.DataFrame:
""" Merge two given dataframes on `left_on` = `right_on`. Duplicated columns are removed. """
df_merged = pd.merge(df_left, df_right, how=how, left_on=left_on, right_on=right_on, suffixes=('', '_extra'))
df_merged.drop(df_merged.filter(regex='_extra$').columns.tolist(), axis=1, inplace=True)
return df_merged
def read_df(path: str) -> pd.DataFrame:
""" Read dataframe from given .csv file. """
return pd.read_csv(path)
def write_df(df: pd.DataFrame, path: str):
""" Write dataframe to given .csv file. """
df.to_csv(path, index=False)
def append_df(df: pd.DataFrame, path: str):
""" Append data to dataframe by given .csv file. """
df.to_csv(path, index=False, mode='a', header=False)
| 0 | 0 | 0 |
a7c44c90af19ed84eb25c5b73c35daeb76b01bf2 | 988 | py | Python | office_hours/utils.py | aiventimptner/farafmb.de | 691ea0330b4b0f6e9984d75169132d5d66bb7368 | [
"MIT"
] | 1 | 2017-04-06T09:12:45.000Z | 2017-04-06T09:12:45.000Z | office_hours/utils.py | aiventimptner/farafmb.de | 691ea0330b4b0f6e9984d75169132d5d66bb7368 | [
"MIT"
] | 2 | 2017-09-07T22:09:50.000Z | 2020-06-09T14:46:30.000Z | office_hours/utils.py | aiventimptner/farafmb.de | 691ea0330b4b0f6e9984d75169132d5d66bb7368 | [
"MIT"
] | null | null | null | import math
from datetime import time, timedelta, datetime, date
from typing import List
def time_to_seconds(time_: time) -> int:
"""Return total seconds from time object."""
minutes = time_.hour * 60
seconds = (time_.minute + minutes) * 60
return seconds
def seconds_to_time(seconds: int) -> time:
"""Return time object from total seconds."""
seconds_off = seconds % 60
minutes = int(seconds / 60)
minutes_off = minutes % 60
hours = int(minutes / 60)
return time(hours, minutes_off, seconds_off)
def calc_max_step_size(obj_list: List[time]) -> int:
"""Return largest possible step size in seconds to include every time from list."""
unique_ordered_list = sorted(set(obj_list))
diffs = []
for i in range(len(unique_ordered_list)):
if i == 0:
continue
diff = time_to_seconds(unique_ordered_list[i]) - time_to_seconds(unique_ordered_list[i-1])
diffs.append(diff)
return math.gcd(*diffs)
| 29.939394 | 98 | 0.676113 | import math
from datetime import time, timedelta, datetime, date
from typing import List
def time_to_seconds(time_: time) -> int:
"""Return total seconds from time object."""
minutes = time_.hour * 60
seconds = (time_.minute + minutes) * 60
return seconds
def seconds_to_time(seconds: int) -> time:
"""Return time object from total seconds."""
seconds_off = seconds % 60
minutes = int(seconds / 60)
minutes_off = minutes % 60
hours = int(minutes / 60)
return time(hours, minutes_off, seconds_off)
def calc_max_step_size(obj_list: List[time]) -> int:
"""Return largest possible step size in seconds to include every time from list."""
unique_ordered_list = sorted(set(obj_list))
diffs = []
for i in range(len(unique_ordered_list)):
if i == 0:
continue
diff = time_to_seconds(unique_ordered_list[i]) - time_to_seconds(unique_ordered_list[i-1])
diffs.append(diff)
return math.gcd(*diffs)
| 0 | 0 | 0 |
0d74c33f8c31e07fb1098478dbf22098a1662735 | 1,044 | py | Python | VideoObjectDetection.py | KAN-RYU/OpenCV-tutorial | 76673e48a93a5cd452ec53110d9815ae339fc06d | [
"MIT"
] | 1 | 2019-01-23T06:57:02.000Z | 2019-01-23T06:57:02.000Z | VideoObjectDetection.py | KAN-RYU/OpenCV-tutorial | 76673e48a93a5cd452ec53110d9815ae339fc06d | [
"MIT"
] | null | null | null | VideoObjectDetection.py | KAN-RYU/OpenCV-tutorial | 76673e48a93a5cd452ec53110d9815ae339fc06d | [
"MIT"
] | null | null | null | import cv2
import numpy as np
cap = cv2.VideoCapture("images/Circle.mp4")
# 옵션 설명 http://layer0.authentise.com/segment-background-using-computer-vision.html
fgbg = cv2.createBackgroundSubtractorMOG2(history=500, varThreshold=500, detectShadows=0)
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(fgmask)
for index, centroid in enumerate(centroids):
if stats[index][0] == 0 and stats[index][1] == 0:
continue
if np.any(np.isnan(centroid)):
continue
x, y, width, height, area = stats[index]
centerX, centerY = int(centroid[0]), int(centroid[1])
if area > 100:
cv2.circle(frame, (centerX, centerY), 1, (0, 255, 0), 2)
cv2.rectangle(frame, (x, y), (x + width, y + height), (0, 0, 255))
cv2.imshow('mask',fgmask)
cv2.imshow('frame',frame)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows() | 23.2 | 89 | 0.621648 | import cv2
import numpy as np
cap = cv2.VideoCapture("images/Circle.mp4")
# 옵션 설명 http://layer0.authentise.com/segment-background-using-computer-vision.html
fgbg = cv2.createBackgroundSubtractorMOG2(history=500, varThreshold=500, detectShadows=0)
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(fgmask)
for index, centroid in enumerate(centroids):
if stats[index][0] == 0 and stats[index][1] == 0:
continue
if np.any(np.isnan(centroid)):
continue
x, y, width, height, area = stats[index]
centerX, centerY = int(centroid[0]), int(centroid[1])
if area > 100:
cv2.circle(frame, (centerX, centerY), 1, (0, 255, 0), 2)
cv2.rectangle(frame, (x, y), (x + width, y + height), (0, 0, 255))
cv2.imshow('mask',fgmask)
cv2.imshow('frame',frame)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows() | 0 | 0 | 0 |
1a5ddb09f7621759c417ab1a115aaf6fc6222ace | 3,987 | py | Python | simple_ast.py | jjrscott/simple_ast | 1a561f2aacae40d677707aa07a98d8fd395262d3 | [
"MIT"
] | null | null | null | simple_ast.py | jjrscott/simple_ast | 1a561f2aacae40d677707aa07a98d8fd395262d3 | [
"MIT"
] | null | null | null | simple_ast.py | jjrscott/simple_ast | 1a561f2aacae40d677707aa07a98d8fd395262d3 | [
"MIT"
] | null | null | null |
# `$ python3 simple_ast.py --help` for more information
# MIT License
#
# Copyright (c) 2020 John Scott
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import os
import json
import re
if __name__ == "__main__":
main()
| 33.788136 | 144 | 0.64911 |
# `$ python3 simple_ast.py --help` for more information
# MIT License
#
# Copyright (c) 2020 John Scott
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import os
import json
import re
def main():
parser = argparse.ArgumentParser(description='Generate a simple abstract syntax tree from the given files', epilog="""
Parsing rules
This parser uses three values:
bounds A dictionary of start and end tokens. If the program finds a start
token it will push a new array on the stack and continue. When it
finds the corresponding end token the program will pop the array off
the stack and continue.
extra An array of tokens that don't push or pop when found (unless they're
in the bounds).
strip An array of tokens that will be removed from the output.
Example rules:
{
"bounds": { "(": ")" },
"extra": [ "-", "+", "*", "/", "%" ],
"strip": [ "\n", " " ]
}
""", formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('input', nargs='+', help='Files to be parsed')
parser.add_argument('--output', default='-', help='Location to save the AST')
parser.add_argument('--rules', help='A JSON file containing the parsing rules')
args = parser.parse_args()
rules = {}
if args.rules:
with open(args.rules, 'r') as f:
rules = json.load(f)
if 'bounds' not in rules:
rules['bounds'] = {}
if 'extra' not in rules:
rules['extra'] = ['\n']
if 'strip' not in rules:
rules['strip'] = []
if args.rules:
with open(args.rules, "w") as file:
file.write(json.dumps(rules, sort_keys=True, indent=2))
ast = {}
for input_path in args.input:
with open(input_path, 'r') as file:
text = file.read()
ast[input_path] = generate_ast(text, bounds=rules['bounds'], extra=rules['extra']+rules['strip'], strip=rules['strip'])
if len(ast) == 1:
ast = list(ast.values())[0]
outputContent = json.dumps(ast, sort_keys=True, indent=2)
if args.output != '-':
with open(args.output, "w") as file:
file.write(outputContent)
else:
print(outputContent)
def generate_ast(text, bounds={}, extra=['\n'], strip=['\n']):
boundingTokenRegex = '|'.join(map(lambda s: "("+re.escape(s)+")", sorted(list(bounds.keys()) + list(bounds.values()) + extra,reverse=True)))
tokens = re.compile(boundingTokenRegex).split(text)
stack = [[]]
for token in tokens:
if token is None or len(token) == 0:
continue
if token in bounds:
frame = []
stack[-1].append(frame)
stack.append(frame)
if token not in strip:
stack[-1].append(token)
if len(stack) > 1 and isinstance(stack[-1][0], str) and stack[-1][0] in bounds and token == bounds[stack[-1][0]]:
stack.pop()
return stack[0]
if __name__ == "__main__":
main()
| 2,689 | 0 | 46 |
10e3ff1f48fd4d01652be7da91ddd14fa6bcfb56 | 193 | py | Python | src/bilder/facts/has_systemd.py | mitodl/ol-configuration-management | 39152a4637b219fbb00c8a6c65864532c7da295f | [
"BSD-3-Clause"
] | 25 | 2020-07-10T21:05:43.000Z | 2022-03-09T03:55:30.000Z | src/bilder/facts/has_systemd.py | mitodl/ol-infrastructure | f09912e39ff280575964a4df7c004fde58912636 | [
"BSD-3-Clause"
] | 423 | 2020-06-23T18:00:43.000Z | 2022-03-31T17:44:08.000Z | src/bilder/facts/has_systemd.py | mitodl/ol-infrastructure | f09912e39ff280575964a4df7c004fde58912636 | [
"BSD-3-Clause"
] | null | null | null | from pyinfra.api import FactBase
| 21.444444 | 50 | 0.663212 | from pyinfra.api import FactBase
class HasSystemd(FactBase):
command = "/bin/which systemd || echo 'false'"
def process(self, output):
return "false" not in ",".join(output)
| 52 | 84 | 23 |
e145186692e406b8ba298d64061018c2f57cfa07 | 1,475 | py | Python | devvyn/cache/decorator.py | devvyn/knowledge-mapper | 441d34db04c8ca8892dade2a64983635e39b728c | [
"MIT"
] | 1 | 2019-11-21T17:48:52.000Z | 2019-11-21T17:48:52.000Z | devvyn/cache/decorator.py | devvyn/usask-scrape-course-prerequisites | 441d34db04c8ca8892dade2a64983635e39b728c | [
"MIT"
] | 8 | 2019-10-07T05:31:42.000Z | 2019-11-29T01:31:02.000Z | devvyn/cache/decorator.py | devvyn/knowledge-mapper | 441d34db04c8ca8892dade2a64983635e39b728c | [
"MIT"
] | null | null | null | """
File-based cache for text retrieved from URL-based resources.
"""
import typing
from typeguard import typechecked
from devvyn.cache.web_cache import WebCache
StringFunction = typing.Callable[[str], str]
@typechecked
def cached(function: StringFunction) -> StringFunction:
"""
Wrap the decorated function in a cache handler.
Example:
```
import requests
@cached
def get_content(url: str) -> str:
return requests.get(url).text
content_fresh = get_content('https://example.com/') # save file to
cache after fetching
content_again = get_content('https://example.com/') # load file from
cache instead of fetching
```
:param function: the URL fetch function to wrap
:return: Wrapped function
"""
if not callable(function):
raise TypeError(
f'`function` is type {type(function)}, but it must be callable.')
cache = WebCache()
def wrapped(key: str) -> str:
"""
Attempt to get the value stored in `key`, and if the key doesn't
exist, store it with the value returned from `function` before
returning it.
:param key: URL specifying the document location, which also
identifies the page in the cache
:return: Page content
"""
try:
return cache.get(key)
except KeyError:
text = function(key)
cache.set(key, text)
return text
return wrapped
| 25 | 77 | 0.633898 | """
File-based cache for text retrieved from URL-based resources.
"""
import typing
from typeguard import typechecked
from devvyn.cache.web_cache import WebCache
StringFunction = typing.Callable[[str], str]
@typechecked
def cached(function: StringFunction) -> StringFunction:
"""
Wrap the decorated function in a cache handler.
Example:
```
import requests
@cached
def get_content(url: str) -> str:
return requests.get(url).text
content_fresh = get_content('https://example.com/') # save file to
cache after fetching
content_again = get_content('https://example.com/') # load file from
cache instead of fetching
```
:param function: the URL fetch function to wrap
:return: Wrapped function
"""
if not callable(function):
raise TypeError(
f'`function` is type {type(function)}, but it must be callable.')
cache = WebCache()
def wrapped(key: str) -> str:
"""
Attempt to get the value stored in `key`, and if the key doesn't
exist, store it with the value returned from `function` before
returning it.
:param key: URL specifying the document location, which also
identifies the page in the cache
:return: Page content
"""
try:
return cache.get(key)
except KeyError:
text = function(key)
cache.set(key, text)
return text
return wrapped
| 0 | 0 | 0 |
d224ae3942d7fe2cf77b988495ef6e28a1bf48dc | 13,858 | py | Python | tools/pywebsocket/src/mod_pywebsocket/util.py | shs96c/web-platform-tests | 61acad6dd9bb99d32340eb41f5146de64f542359 | [
"BSD-3-Clause"
] | 5,964 | 2016-09-27T03:46:29.000Z | 2022-03-31T16:25:27.000Z | third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/util.py | w4454962/miniblink49 | b294b6eacb3333659bf7b94d670d96edeeba14c0 | [
"Apache-2.0"
] | 459 | 2016-09-29T00:51:38.000Z | 2022-03-07T14:37:46.000Z | third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/util.py | w4454962/miniblink49 | b294b6eacb3333659bf7b94d670d96edeeba14c0 | [
"Apache-2.0"
] | 1,006 | 2016-09-27T05:17:27.000Z | 2022-03-30T02:46:51.000Z | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket utilities.
"""
import array
import errno
# Import hash classes from a module available and recommended for each Python
# version and re-export those symbol. Use sha and md5 module in Python 2.4, and
# hashlib module in Python 2.6.
try:
import hashlib
md5_hash = hashlib.md5
sha1_hash = hashlib.sha1
except ImportError:
import md5
import sha
md5_hash = md5.md5
sha1_hash = sha.sha
import StringIO
import logging
import os
import re
import socket
import traceback
import zlib
try:
from mod_pywebsocket import fast_masking
except ImportError:
pass
def get_stack_trace():
"""Get the current stack trace as string.
This is needed to support Python 2.3.
TODO: Remove this when we only support Python 2.4 and above.
Use traceback.format_exc instead.
"""
out = StringIO.StringIO()
traceback.print_exc(file=out)
return out.getvalue()
def prepend_message_to_exception(message, exc):
"""Prepend message to the exception."""
exc.args = (message + str(exc),)
return
def __translate_interp(interp, cygwin_path):
"""Translate interp program path for Win32 python to run cygwin program
(e.g. perl). Note that it doesn't support path that contains space,
which is typically true for Unix, where #!-script is written.
For Win32 python, cygwin_path is a directory of cygwin binaries.
Args:
interp: interp command line
cygwin_path: directory name of cygwin binary, or None
Returns:
translated interp command line.
"""
if not cygwin_path:
return interp
m = re.match('^[^ ]*/([^ ]+)( .*)?', interp)
if m:
cmd = os.path.join(cygwin_path, m.group(1))
return cmd + m.group(2)
return interp
def get_script_interp(script_path, cygwin_path=None):
"""Gets #!-interpreter command line from the script.
It also fixes command path. When Cygwin Python is used, e.g. in WebKit,
it could run "/usr/bin/perl -wT hello.pl".
When Win32 Python is used, e.g. in Chromium, it couldn't. So, fix
"/usr/bin/perl" to "<cygwin_path>\perl.exe".
Args:
script_path: pathname of the script
cygwin_path: directory name of cygwin binary, or None
Returns:
#!-interpreter command line, or None if it is not #!-script.
"""
fp = open(script_path)
line = fp.readline()
fp.close()
m = re.match('^#!(.*)', line)
if m:
return __translate_interp(m.group(1), cygwin_path)
return None
def wrap_popen3_for_win(cygwin_path):
"""Wrap popen3 to support #!-script on Windows.
Args:
cygwin_path: path for cygwin binary if command path is needed to be
translated. None if no translation required.
"""
__orig_popen3 = os.popen3
os.popen3 = __wrap_popen3
class NoopMasker(object):
"""A masking object that has the same interface as RepeatedXorMasker but
just returns the string passed in without making any change.
"""
class RepeatedXorMasker(object):
"""A masking object that applies XOR on the string given to mask method
with the masking bytes given to the constructor repeatedly. This object
remembers the position in the masking bytes the last mask method call
ended and resumes from that point on the next mask method call.
"""
if 'fast_masking' in globals():
mask = _mask_using_swig
else:
mask = _mask_using_array
# By making wbits option negative, we can suppress CMF/FLG (2 octet) and
# ADLER32 (4 octet) fields of zlib so that we can use zlib module just as
# deflate library. DICTID won't be added as far as we don't set dictionary.
# LZ77 window of 32K will be used for both compression and decompression.
# For decompression, we can just use 32K to cover any windows size. For
# compression, we use 32K so receivers must use 32K.
#
# Compression level is Z_DEFAULT_COMPRESSION. We don't have to match level
# to decode.
#
# See zconf.h, deflate.cc, inflate.cc of zlib library, and zlibmodule.c of
# Python. See also RFC1950 (ZLIB 3.3).
# Compresses/decompresses given octets using the method introduced in RFC1979.
class _RFC1979Deflater(object):
"""A compressor class that applies DEFLATE to given byte sequence and
flushes using the algorithm described in the RFC1979 section 2.1.
"""
class _RFC1979Inflater(object):
"""A decompressor class for byte sequence compressed and flushed following
the algorithm described in the RFC1979 section 2.1.
"""
class DeflateSocket(object):
"""A wrapper class for socket object to intercept send and recv to perform
deflate compression and decompression transparently.
"""
# Size of the buffer passed to recv to receive compressed data.
_RECV_SIZE = 4096
def recv(self, size):
"""Receives data from the socket specified on the construction up
to the specified size. Once any data is available, returns it even
if it's smaller than the specified size.
"""
# TODO(tyoshino): Allow call with size=0. It should block until any
# decompressed data is available.
if size <= 0:
raise Exception('Non-positive size passed')
while True:
data = self._inflater.decompress(size)
if len(data) != 0:
return data
read_data = self._socket.recv(DeflateSocket._RECV_SIZE)
if not read_data:
return ''
self._inflater.append(read_data)
# vi:sts=4 sw=4 et
| 33.232614 | 79 | 0.656011 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket utilities.
"""
import array
import errno
# Import hash classes from a module available and recommended for each Python
# version and re-export those symbol. Use sha and md5 module in Python 2.4, and
# hashlib module in Python 2.6.
try:
import hashlib
md5_hash = hashlib.md5
sha1_hash = hashlib.sha1
except ImportError:
import md5
import sha
md5_hash = md5.md5
sha1_hash = sha.sha
import StringIO
import logging
import os
import re
import socket
import traceback
import zlib
try:
from mod_pywebsocket import fast_masking
except ImportError:
pass
def get_stack_trace():
"""Get the current stack trace as string.
This is needed to support Python 2.3.
TODO: Remove this when we only support Python 2.4 and above.
Use traceback.format_exc instead.
"""
out = StringIO.StringIO()
traceback.print_exc(file=out)
return out.getvalue()
def prepend_message_to_exception(message, exc):
"""Prepend message to the exception."""
exc.args = (message + str(exc),)
return
def __translate_interp(interp, cygwin_path):
"""Translate interp program path for Win32 python to run cygwin program
(e.g. perl). Note that it doesn't support path that contains space,
which is typically true for Unix, where #!-script is written.
For Win32 python, cygwin_path is a directory of cygwin binaries.
Args:
interp: interp command line
cygwin_path: directory name of cygwin binary, or None
Returns:
translated interp command line.
"""
if not cygwin_path:
return interp
m = re.match('^[^ ]*/([^ ]+)( .*)?', interp)
if m:
cmd = os.path.join(cygwin_path, m.group(1))
return cmd + m.group(2)
return interp
def get_script_interp(script_path, cygwin_path=None):
"""Gets #!-interpreter command line from the script.
It also fixes command path. When Cygwin Python is used, e.g. in WebKit,
it could run "/usr/bin/perl -wT hello.pl".
When Win32 Python is used, e.g. in Chromium, it couldn't. So, fix
"/usr/bin/perl" to "<cygwin_path>\perl.exe".
Args:
script_path: pathname of the script
cygwin_path: directory name of cygwin binary, or None
Returns:
#!-interpreter command line, or None if it is not #!-script.
"""
fp = open(script_path)
line = fp.readline()
fp.close()
m = re.match('^#!(.*)', line)
if m:
return __translate_interp(m.group(1), cygwin_path)
return None
def wrap_popen3_for_win(cygwin_path):
"""Wrap popen3 to support #!-script on Windows.
Args:
cygwin_path: path for cygwin binary if command path is needed to be
translated. None if no translation required.
"""
__orig_popen3 = os.popen3
def __wrap_popen3(cmd, mode='t', bufsize=-1):
cmdline = cmd.split(' ')
interp = get_script_interp(cmdline[0], cygwin_path)
if interp:
cmd = interp + ' ' + cmd
return __orig_popen3(cmd, mode, bufsize)
os.popen3 = __wrap_popen3
def hexify(s):
return ' '.join(map(lambda x: '%02x' % ord(x), s))
def get_class_logger(o):
return logging.getLogger(
'%s.%s' % (o.__class__.__module__, o.__class__.__name__))
class NoopMasker(object):
"""A masking object that has the same interface as RepeatedXorMasker but
just returns the string passed in without making any change.
"""
def __init__(self):
pass
def mask(self, s):
return s
class RepeatedXorMasker(object):
"""A masking object that applies XOR on the string given to mask method
with the masking bytes given to the constructor repeatedly. This object
remembers the position in the masking bytes the last mask method call
ended and resumes from that point on the next mask method call.
"""
def __init__(self, masking_key):
self._masking_key = masking_key
self._masking_key_index = 0
def _mask_using_swig(self, s):
masked_data = fast_masking.mask(
s, self._masking_key, self._masking_key_index)
self._masking_key_index = (
(self._masking_key_index + len(s)) % len(self._masking_key))
return masked_data
def _mask_using_array(self, s):
result = array.array('B')
result.fromstring(s)
# Use temporary local variables to eliminate the cost to access
# attributes
masking_key = map(ord, self._masking_key)
masking_key_size = len(masking_key)
masking_key_index = self._masking_key_index
for i in xrange(len(result)):
result[i] ^= masking_key[masking_key_index]
masking_key_index = (masking_key_index + 1) % masking_key_size
self._masking_key_index = masking_key_index
return result.tostring()
if 'fast_masking' in globals():
mask = _mask_using_swig
else:
mask = _mask_using_array
# By making wbits option negative, we can suppress CMF/FLG (2 octet) and
# ADLER32 (4 octet) fields of zlib so that we can use zlib module just as
# deflate library. DICTID won't be added as far as we don't set dictionary.
# LZ77 window of 32K will be used for both compression and decompression.
# For decompression, we can just use 32K to cover any windows size. For
# compression, we use 32K so receivers must use 32K.
#
# Compression level is Z_DEFAULT_COMPRESSION. We don't have to match level
# to decode.
#
# See zconf.h, deflate.cc, inflate.cc of zlib library, and zlibmodule.c of
# Python. See also RFC1950 (ZLIB 3.3).
class _Deflater(object):
def __init__(self, window_bits):
self._logger = get_class_logger(self)
self._compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -window_bits)
def compress(self, bytes):
compressed_bytes = self._compress.compress(bytes)
self._logger.debug('Compress input %r', bytes)
self._logger.debug('Compress result %r', compressed_bytes)
return compressed_bytes
def compress_and_flush(self, bytes):
compressed_bytes = self._compress.compress(bytes)
compressed_bytes += self._compress.flush(zlib.Z_SYNC_FLUSH)
self._logger.debug('Compress input %r', bytes)
self._logger.debug('Compress result %r', compressed_bytes)
return compressed_bytes
def compress_and_finish(self, bytes):
compressed_bytes = self._compress.compress(bytes)
compressed_bytes += self._compress.flush(zlib.Z_FINISH)
self._logger.debug('Compress input %r', bytes)
self._logger.debug('Compress result %r', compressed_bytes)
return compressed_bytes
class _Inflater(object):
def __init__(self, window_bits):
self._logger = get_class_logger(self)
self._window_bits = window_bits
self._unconsumed = ''
self.reset()
def decompress(self, size):
if not (size == -1 or size > 0):
raise Exception('size must be -1 or positive')
data = ''
while True:
if size == -1:
data += self._decompress.decompress(self._unconsumed)
# See Python bug http://bugs.python.org/issue12050 to
# understand why the same code cannot be used for updating
# self._unconsumed for here and else block.
self._unconsumed = ''
else:
data += self._decompress.decompress(
self._unconsumed, size - len(data))
self._unconsumed = self._decompress.unconsumed_tail
if self._decompress.unused_data:
# Encountered a last block (i.e. a block with BFINAL = 1) and
# found a new stream (unused_data). We cannot use the same
# zlib.Decompress object for the new stream. Create a new
# Decompress object to decompress the new one.
#
# It's fine to ignore unconsumed_tail if unused_data is not
# empty.
self._unconsumed = self._decompress.unused_data
self.reset()
if size >= 0 and len(data) == size:
# data is filled. Don't call decompress again.
break
else:
# Re-invoke Decompress.decompress to try to decompress all
# available bytes before invoking read which blocks until
# any new byte is available.
continue
else:
# Here, since unused_data is empty, even if unconsumed_tail is
# not empty, bytes of requested length are already in data. We
# don't have to "continue" here.
break
if data:
self._logger.debug('Decompressed %r', data)
return data
def append(self, data):
self._logger.debug('Appended %r', data)
self._unconsumed += data
def reset(self):
self._logger.debug('Reset')
self._decompress = zlib.decompressobj(-self._window_bits)
# Compresses/decompresses given octets using the method introduced in RFC1979.
class _RFC1979Deflater(object):
"""A compressor class that applies DEFLATE to given byte sequence and
flushes using the algorithm described in the RFC1979 section 2.1.
"""
def __init__(self, window_bits, no_context_takeover):
self._deflater = None
if window_bits is None:
window_bits = zlib.MAX_WBITS
self._window_bits = window_bits
self._no_context_takeover = no_context_takeover
def filter(self, bytes, end=True, bfinal=False):
if self._deflater is None:
self._deflater = _Deflater(self._window_bits)
if bfinal:
result = self._deflater.compress_and_finish(bytes)
# Add a padding block with BFINAL = 0 and BTYPE = 0.
result = result + chr(0)
self._deflater = None
return result
result = self._deflater.compress_and_flush(bytes)
if end:
# Strip last 4 octets which is LEN and NLEN field of a
# non-compressed block added for Z_SYNC_FLUSH.
result = result[:-4]
if self._no_context_takeover and end:
self._deflater = None
return result
class _RFC1979Inflater(object):
"""A decompressor class for byte sequence compressed and flushed following
the algorithm described in the RFC1979 section 2.1.
"""
def __init__(self, window_bits=zlib.MAX_WBITS):
self._inflater = _Inflater(window_bits)
def filter(self, bytes):
# Restore stripped LEN and NLEN field of a non-compressed block added
# for Z_SYNC_FLUSH.
self._inflater.append(bytes + '\x00\x00\xff\xff')
return self._inflater.decompress(-1)
class DeflateSocket(object):
"""A wrapper class for socket object to intercept send and recv to perform
deflate compression and decompression transparently.
"""
# Size of the buffer passed to recv to receive compressed data.
_RECV_SIZE = 4096
def __init__(self, socket):
self._socket = socket
self._logger = get_class_logger(self)
self._deflater = _Deflater(zlib.MAX_WBITS)
self._inflater = _Inflater(zlib.MAX_WBITS)
def recv(self, size):
"""Receives data from the socket specified on the construction up
to the specified size. Once any data is available, returns it even
if it's smaller than the specified size.
"""
# TODO(tyoshino): Allow call with size=0. It should block until any
# decompressed data is available.
if size <= 0:
raise Exception('Non-positive size passed')
while True:
data = self._inflater.decompress(size)
if len(data) != 0:
return data
read_data = self._socket.recv(DeflateSocket._RECV_SIZE)
if not read_data:
return ''
self._inflater.append(read_data)
def sendall(self, bytes):
self.send(bytes)
def send(self, bytes):
self._socket.sendall(self._deflater.compress_and_flush(bytes))
return len(bytes)
# vi:sts=4 sw=4 et
| 6,112 | 6 | 659 |
280095da5d26572a0bea29e5ab3e55b2c31501d8 | 714 | py | Python | interview/leet/22_Generate_Parentheses.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | 1 | 2020-10-12T13:33:29.000Z | 2020-10-12T13:33:29.000Z | interview/leet/22_Generate_Parentheses.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | null | null | null | interview/leet/22_Generate_Parentheses.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | 1 | 2016-11-09T07:28:45.000Z | 2016-11-09T07:28:45.000Z | #!/usr/bin/env python
sol = Solution()
print(sol.generateParenthesis(2))
print(sol.generateParenthesis(3))
ret = sol.generateParenthesis(4)
set1 = set(ret)
set2 = set(["(((())))","((()()))","((())())","((()))()","(()(()))","(()()())","(()())()","(())(())","(())()()","()((()))","()(()())","()(())()","()()(())","()()()()"])
print(set2-set1)
print(len(set(ret)))
print(sorted(ret))
| 31.043478 | 167 | 0.418768 | #!/usr/bin/env python
class Solution:
def generateParenthesis(self, n):
def gen(lp, rp):
if not lp:
return [rp]
ret = ['(' + s for s in gen(lp[1:], rp)]
if len(rp) > len(lp):
ret += [')' + s for s in gen(lp, rp[1:])]
return ret
return gen('('*n, ')'*n)
sol = Solution()
print(sol.generateParenthesis(2))
print(sol.generateParenthesis(3))
ret = sol.generateParenthesis(4)
set1 = set(ret)
set2 = set(["(((())))","((()()))","((())())","((()))()","(()(()))","(()()())","(()())()","(())(())","(())()()","()((()))","()(()())","()(())()","()()(())","()()()()"])
print(set2-set1)
print(len(set(ret)))
print(sorted(ret))
| 289 | -6 | 49 |
360d9fbf49ff044a82696ba39ee373994cd089ed | 4,332 | py | Python | lib/surface/compute/instances/ops_agents/policies/describe.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/surface/compute/instances/ops_agents/policies/describe.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/surface/compute/instances/ops_agents/policies/describe.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements command to describe an ops agents policy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.compute.instances.ops_agents import exceptions as ops_agents_exceptions
from googlecloudsdk.api_lib.compute.instances.ops_agents.converters import guest_policy_to_ops_agents_policy_converter as to_ops_agents
from googlecloudsdk.api_lib.compute.instances.ops_agents.validators import guest_policy_validator
from googlecloudsdk.api_lib.compute.os_config import utils as osconfig_api_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.command_lib.compute.instances.ops_agents.policies import parser_utils
from googlecloudsdk.command_lib.compute.os_config import utils as osconfig_command_utils
from googlecloudsdk.core import properties
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class Describe(base.DescribeCommand):
"""Describe a Google Cloud's operations suite agents (Ops Agents) policy.
*{command}* describes a policy that facilitates agent management across
Compute Engine instances based on user specified instance filters. This policy
installs, specifies versioning, enables autoupgrade, and removes Ops Agents.
The command returns the content of one policy. For instance:
agent_rules:
- enable_autoupgrade: true
package_state: installed
type: ops-agent
version: latest
assignment:
group_labels:
- app: myapp
env: prod
os_types:
- short_name: ubuntu
version: '18.04'
zones:
- us-central1-a
create_time: '2021-02-02T02:10:25.344Z'
description: A test policy to install agents
etag: <ETAG>
id: projects/<PROJECT_NUMBER>/guestPolicies/ops-agents-test-policy
update_time: '2021-02-02T02:10:25.344Z'
If no policies are found, it returns a ``NOT_FOUND'' error.
"""
detailed_help = {
'DESCRIPTION':
'{description}',
'EXAMPLES':
"""\
To describe an Ops Agents policy named ``ops-agents-test-policy'' in
the current project, run:
$ {command} ops-agents-test-policy
""",
}
@staticmethod
def Args(parser):
"""See base class."""
parser_utils.AddSharedArgs(parser)
def Run(self, args):
"""See base class."""
release_track = self.ReleaseTrack()
project = properties.VALUES.core.project.GetOrFail()
guest_policy_uri_path = osconfig_command_utils.GetGuestPolicyUriPath(
'projects', project, args.POLICY_ID)
client = osconfig_api_utils.GetClientInstance(
release_track, api_version_override='v1beta')
service = client.projects_guestPolicies
messages = osconfig_api_utils.GetClientMessages(
release_track, api_version_override='v1beta')
get_request = messages.OsconfigProjectsGuestPoliciesGetRequest(
name=guest_policy_uri_path)
try:
get_response = service.Get(get_request)
except apitools_exceptions.HttpNotFoundError:
raise ops_agents_exceptions.PolicyNotFoundError(
policy_id=args.POLICY_ID)
if not guest_policy_validator.IsOpsAgentPolicy(get_response):
raise ops_agents_exceptions.PolicyNotFoundError(
policy_id=args.POLICY_ID)
try:
ops_agents_policy = to_ops_agents.ConvertGuestPolicyToOpsAgentPolicy(
get_response)
except calliope_exceptions.BadArgumentException:
raise ops_agents_exceptions.PolicyMalformedError(
policy_id=args.POLICY_ID)
return ops_agents_policy
| 38.336283 | 135 | 0.752078 | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements command to describe an ops agents policy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.compute.instances.ops_agents import exceptions as ops_agents_exceptions
from googlecloudsdk.api_lib.compute.instances.ops_agents.converters import guest_policy_to_ops_agents_policy_converter as to_ops_agents
from googlecloudsdk.api_lib.compute.instances.ops_agents.validators import guest_policy_validator
from googlecloudsdk.api_lib.compute.os_config import utils as osconfig_api_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.command_lib.compute.instances.ops_agents.policies import parser_utils
from googlecloudsdk.command_lib.compute.os_config import utils as osconfig_command_utils
from googlecloudsdk.core import properties
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class Describe(base.DescribeCommand):
"""Describe a Google Cloud's operations suite agents (Ops Agents) policy.
*{command}* describes a policy that facilitates agent management across
Compute Engine instances based on user specified instance filters. This policy
installs, specifies versioning, enables autoupgrade, and removes Ops Agents.
The command returns the content of one policy. For instance:
agent_rules:
- enable_autoupgrade: true
package_state: installed
type: ops-agent
version: latest
assignment:
group_labels:
- app: myapp
env: prod
os_types:
- short_name: ubuntu
version: '18.04'
zones:
- us-central1-a
create_time: '2021-02-02T02:10:25.344Z'
description: A test policy to install agents
etag: <ETAG>
id: projects/<PROJECT_NUMBER>/guestPolicies/ops-agents-test-policy
update_time: '2021-02-02T02:10:25.344Z'
If no policies are found, it returns a ``NOT_FOUND'' error.
"""
detailed_help = {
'DESCRIPTION':
'{description}',
'EXAMPLES':
"""\
To describe an Ops Agents policy named ``ops-agents-test-policy'' in
the current project, run:
$ {command} ops-agents-test-policy
""",
}
@staticmethod
def Args(parser):
"""See base class."""
parser_utils.AddSharedArgs(parser)
def Run(self, args):
"""See base class."""
release_track = self.ReleaseTrack()
project = properties.VALUES.core.project.GetOrFail()
guest_policy_uri_path = osconfig_command_utils.GetGuestPolicyUriPath(
'projects', project, args.POLICY_ID)
client = osconfig_api_utils.GetClientInstance(
release_track, api_version_override='v1beta')
service = client.projects_guestPolicies
messages = osconfig_api_utils.GetClientMessages(
release_track, api_version_override='v1beta')
get_request = messages.OsconfigProjectsGuestPoliciesGetRequest(
name=guest_policy_uri_path)
try:
get_response = service.Get(get_request)
except apitools_exceptions.HttpNotFoundError:
raise ops_agents_exceptions.PolicyNotFoundError(
policy_id=args.POLICY_ID)
if not guest_policy_validator.IsOpsAgentPolicy(get_response):
raise ops_agents_exceptions.PolicyNotFoundError(
policy_id=args.POLICY_ID)
try:
ops_agents_policy = to_ops_agents.ConvertGuestPolicyToOpsAgentPolicy(
get_response)
except calliope_exceptions.BadArgumentException:
raise ops_agents_exceptions.PolicyMalformedError(
policy_id=args.POLICY_ID)
return ops_agents_policy
| 0 | 0 | 0 |
d9c95a352cb49a867bdb60a894a9a28d459071ff | 9,677 | py | Python | salt/modules/win_network.py | belvedere-trading/salt | 0ab0aa4d79237e9309dda6f685b34e55fda06258 | [
"Apache-2.0"
] | 2 | 2015-08-04T21:54:38.000Z | 2019-04-25T21:47:08.000Z | salt/modules/win_network.py | belvedere-trading/salt | 0ab0aa4d79237e9309dda6f685b34e55fda06258 | [
"Apache-2.0"
] | 1 | 2015-09-02T12:49:48.000Z | 2015-09-02T19:22:58.000Z | salt/modules/win_network.py | belvedere-trading/salt | 0ab0aa4d79237e9309dda6f685b34e55fda06258 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Module for gathering and managing network information
'''
from __future__ import absolute_import
# Import salt libs
import salt.utils
import hashlib
import datetime
import socket
import salt.utils.network
import salt.utils.validate.net
try:
import salt.utils.winapi
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
# Import 3rd party libraries
try:
import wmi # pylint: disable=W0611
except ImportError:
HAS_DEPENDENCIES = False
# Define the module's virtual name
__virtualname__ = 'network'
def __virtual__():
'''
Only works on Windows systems
'''
if salt.utils.is_windows() and HAS_DEPENDENCIES is True:
return __virtualname__
return False
def ping(host):
'''
Performs a ping to a host
CLI Example:
.. code-block:: bash
salt '*' network.ping archlinux.org
'''
cmd = ['ping', '-n', '4', salt.utils.network.sanitize_host(host)]
return __salt__['cmd.run'](cmd, python_shell=False)
def netstat():
'''
Return information on open ports and states
CLI Example:
.. code-block:: bash
salt '*' network.netstat
'''
ret = []
cmd = ['netstat', '-nao']
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in lines:
comps = line.split()
if line.startswith(' TCP'):
ret.append({
'local-address': comps[1],
'proto': comps[0],
'remote-address': comps[2],
'state': comps[3],
'program': comps[4]})
if line.startswith(' UDP'):
ret.append({
'local-address': comps[1],
'proto': comps[0],
'remote-address': comps[2],
'state': None,
'program': comps[3]})
return ret
def traceroute(host):
'''
Performs a traceroute to a 3rd party host
CLI Example:
.. code-block:: bash
salt '*' network.traceroute archlinux.org
'''
ret = []
cmd = ['tracert', salt.utils.network.sanitize_host(host)]
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in lines:
if ' ' not in line:
continue
if line.startswith('Trac'):
continue
if line.startswith('over'):
continue
comps = line.split()
complength = len(comps)
# This method still needs to better catch rows of other lengths
# For example if some of the ms returns are '*'
if complength == 9:
result = {
'count': comps[0],
'hostname': comps[7],
'ip': comps[8],
'ms1': comps[1],
'ms2': comps[3],
'ms3': comps[5]}
ret.append(result)
elif complength == 8:
result = {
'count': comps[0],
'hostname': None,
'ip': comps[7],
'ms1': comps[1],
'ms2': comps[3],
'ms3': comps[5]}
ret.append(result)
else:
result = {
'count': comps[0],
'hostname': None,
'ip': None,
'ms1': None,
'ms2': None,
'ms3': None}
ret.append(result)
return ret
def nslookup(host):
'''
Query DNS for information about a domain or ip address
CLI Example:
.. code-block:: bash
salt '*' network.nslookup archlinux.org
'''
ret = []
addresses = []
cmd = ['nslookup', salt.utils.network.sanitize_host(host)]
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in lines:
if addresses:
# We're in the last block listing addresses
addresses.append(line.strip())
continue
if line.startswith('Non-authoritative'):
continue
if 'Addresses' in line:
comps = line.split(":", 1)
addresses.append(comps[1].strip())
continue
if ":" in line:
comps = line.split(":", 1)
ret.append({comps[0].strip(): comps[1].strip()})
if addresses:
ret.append({'Addresses': addresses})
return ret
def dig(host):
'''
Performs a DNS lookup with dig
Note: dig must be installed on the Windows minion
CLI Example:
.. code-block:: bash
salt '*' network.dig archlinux.org
'''
cmd = ['dig', salt.utils.network.sanitize_host(host)]
return __salt__['cmd.run'](cmd, python_shell=False)
def interfaces_names():
'''
Return a list of all the interfaces names
CLI Example:
.. code-block:: bash
salt '*' network.interfaces_names
'''
ret = []
with salt.utils.winapi.Com():
c = wmi.WMI()
for iface in c.Win32_NetworkAdapter(NetEnabled=True):
ret.append(iface.NetConnectionID)
return ret
def interfaces():
'''
Return a dictionary of information about all the interfaces on the minion
CLI Example:
.. code-block:: bash
salt '*' network.interfaces
'''
return salt.utils.network.win_interfaces()
def hw_addr(iface):
'''
Return the hardware address (a.k.a. MAC address) for a given interface
CLI Example:
.. code-block:: bash
salt '*' network.hw_addr 'Wireless Connection #1'
'''
return salt.utils.network.hw_addr(iface)
# Alias hwaddr to preserve backward compat
hwaddr = hw_addr
def subnets():
'''
Returns a list of subnets to which the host belongs
CLI Example:
.. code-block:: bash
salt '*' network.subnets
'''
return salt.utils.network.subnets()
def in_subnet(cidr):
'''
Returns True if host is within specified subnet, otherwise False
CLI Example:
.. code-block:: bash
salt '*' network.in_subnet 10.0.0.0/16
'''
return salt.utils.network.in_subnet(cidr)
def ip_addrs(interface=None, include_loopback=False):
'''
Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is
ignored, unless 'include_loopback=True' is indicated. If 'interface' is
provided, then only IP addresses from that interface will be returned.
CLI Example:
.. code-block:: bash
salt '*' network.ip_addrs
'''
return salt.utils.network.ip_addrs(interface=interface,
include_loopback=include_loopback)
ipaddrs = ip_addrs
def ip_addrs6(interface=None, include_loopback=False):
'''
Returns a list of IPv6 addresses assigned to the host. ::1 is ignored,
unless 'include_loopback=True' is indicated. If 'interface' is provided,
then only IP addresses from that interface will be returned.
CLI Example:
.. code-block:: bash
salt '*' network.ip_addrs6
'''
return salt.utils.network.ip_addrs6(interface=interface,
include_loopback=include_loopback)
ipaddrs6 = ip_addrs6
def connect(host, port=None, **kwargs):
'''
Test connectivity to a host using a particular
port from the minion.
.. versionadded:: Boron
CLI Example:
.. code-block:: bash
salt '*' network.connect archlinux.org 80
salt '*' network.connect archlinux.org 80 timeout=3
salt '*' network.connect archlinux.org 80 timeout=3 family=ipv4
salt '*' network.connect google-public-dns-a.google.com port=53 proto=udp timeout=3
'''
ret = {'result': None,
'comment': ''}
if not host:
ret['result'] = False
ret['comment'] = 'Required argument, host, is missing.'
return ret
if not port:
ret['result'] = False
ret['comment'] = 'Required argument, port, is missing.'
return ret
proto = kwargs.get('proto', 'tcp')
timeout = kwargs.get('timeout', 5)
family = kwargs.get('family', None)
if salt.utils.validate.net.ipv4_addr(host) or salt.utils.validate.net.ipv6_addr(host):
address = host
else:
address = '{0}'.format(salt.utils.network.sanitize_host(host))
try:
if proto == 'udp':
__proto = socket.SOL_UDP
else:
__proto = socket.SOL_TCP
proto = 'tcp'
if family:
if family == 'ipv4':
__family = socket.AF_INET
elif family == 'ipv6':
__family = socket.AF_INET6
else:
__family = 0
else:
__family = 0
(family,
socktype,
_proto,
garbage,
_address) = socket.getaddrinfo(address, port, __family, 0, __proto)[0]
skt = socket.socket(family, socktype, _proto)
skt.settimeout(timeout)
if proto == 'udp':
# Generate a random string of a
# decent size to test UDP connection
md5h = hashlib.md5()
md5h.update(datetime.datetime.now().strftime('%s'))
msg = md5h.hexdigest()
skt.sendto(msg, _address)
recv, svr = skt.recvfrom(255)
skt.close()
else:
skt.connect(_address)
skt.shutdown(2)
except Exception as exc:
ret['result'] = False
ret['comment'] = 'Unable to connect to {0} ({1}) on {2} port {3}'\
.format(host, _address[0], proto, port)
return ret
ret['result'] = True
ret['comment'] = 'Successfully connected to {0} ({1}) on {2} port {3}'\
.format(host, _address[0], proto, port)
return ret
| 24.876607 | 91 | 0.563294 | # -*- coding: utf-8 -*-
'''
Module for gathering and managing network information
'''
from __future__ import absolute_import
# Import salt libs
import salt.utils
import hashlib
import datetime
import socket
import salt.utils.network
import salt.utils.validate.net
try:
import salt.utils.winapi
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
# Import 3rd party libraries
try:
import wmi # pylint: disable=W0611
except ImportError:
HAS_DEPENDENCIES = False
# Define the module's virtual name
__virtualname__ = 'network'
def __virtual__():
'''
Only works on Windows systems
'''
if salt.utils.is_windows() and HAS_DEPENDENCIES is True:
return __virtualname__
return False
def ping(host):
'''
Performs a ping to a host
CLI Example:
.. code-block:: bash
salt '*' network.ping archlinux.org
'''
cmd = ['ping', '-n', '4', salt.utils.network.sanitize_host(host)]
return __salt__['cmd.run'](cmd, python_shell=False)
def netstat():
'''
Return information on open ports and states
CLI Example:
.. code-block:: bash
salt '*' network.netstat
'''
ret = []
cmd = ['netstat', '-nao']
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in lines:
comps = line.split()
if line.startswith(' TCP'):
ret.append({
'local-address': comps[1],
'proto': comps[0],
'remote-address': comps[2],
'state': comps[3],
'program': comps[4]})
if line.startswith(' UDP'):
ret.append({
'local-address': comps[1],
'proto': comps[0],
'remote-address': comps[2],
'state': None,
'program': comps[3]})
return ret
def traceroute(host):
'''
Performs a traceroute to a 3rd party host
CLI Example:
.. code-block:: bash
salt '*' network.traceroute archlinux.org
'''
ret = []
cmd = ['tracert', salt.utils.network.sanitize_host(host)]
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in lines:
if ' ' not in line:
continue
if line.startswith('Trac'):
continue
if line.startswith('over'):
continue
comps = line.split()
complength = len(comps)
# This method still needs to better catch rows of other lengths
# For example if some of the ms returns are '*'
if complength == 9:
result = {
'count': comps[0],
'hostname': comps[7],
'ip': comps[8],
'ms1': comps[1],
'ms2': comps[3],
'ms3': comps[5]}
ret.append(result)
elif complength == 8:
result = {
'count': comps[0],
'hostname': None,
'ip': comps[7],
'ms1': comps[1],
'ms2': comps[3],
'ms3': comps[5]}
ret.append(result)
else:
result = {
'count': comps[0],
'hostname': None,
'ip': None,
'ms1': None,
'ms2': None,
'ms3': None}
ret.append(result)
return ret
def nslookup(host):
'''
Query DNS for information about a domain or ip address
CLI Example:
.. code-block:: bash
salt '*' network.nslookup archlinux.org
'''
ret = []
addresses = []
cmd = ['nslookup', salt.utils.network.sanitize_host(host)]
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in lines:
if addresses:
# We're in the last block listing addresses
addresses.append(line.strip())
continue
if line.startswith('Non-authoritative'):
continue
if 'Addresses' in line:
comps = line.split(":", 1)
addresses.append(comps[1].strip())
continue
if ":" in line:
comps = line.split(":", 1)
ret.append({comps[0].strip(): comps[1].strip()})
if addresses:
ret.append({'Addresses': addresses})
return ret
def dig(host):
'''
Performs a DNS lookup with dig
Note: dig must be installed on the Windows minion
CLI Example:
.. code-block:: bash
salt '*' network.dig archlinux.org
'''
cmd = ['dig', salt.utils.network.sanitize_host(host)]
return __salt__['cmd.run'](cmd, python_shell=False)
def interfaces_names():
'''
Return a list of all the interfaces names
CLI Example:
.. code-block:: bash
salt '*' network.interfaces_names
'''
ret = []
with salt.utils.winapi.Com():
c = wmi.WMI()
for iface in c.Win32_NetworkAdapter(NetEnabled=True):
ret.append(iface.NetConnectionID)
return ret
def interfaces():
'''
Return a dictionary of information about all the interfaces on the minion
CLI Example:
.. code-block:: bash
salt '*' network.interfaces
'''
return salt.utils.network.win_interfaces()
def hw_addr(iface):
'''
Return the hardware address (a.k.a. MAC address) for a given interface
CLI Example:
.. code-block:: bash
salt '*' network.hw_addr 'Wireless Connection #1'
'''
return salt.utils.network.hw_addr(iface)
# Alias hwaddr to preserve backward compat
hwaddr = hw_addr
def subnets():
'''
Returns a list of subnets to which the host belongs
CLI Example:
.. code-block:: bash
salt '*' network.subnets
'''
return salt.utils.network.subnets()
def in_subnet(cidr):
'''
Returns True if host is within specified subnet, otherwise False
CLI Example:
.. code-block:: bash
salt '*' network.in_subnet 10.0.0.0/16
'''
return salt.utils.network.in_subnet(cidr)
def ip_addrs(interface=None, include_loopback=False):
'''
Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is
ignored, unless 'include_loopback=True' is indicated. If 'interface' is
provided, then only IP addresses from that interface will be returned.
CLI Example:
.. code-block:: bash
salt '*' network.ip_addrs
'''
return salt.utils.network.ip_addrs(interface=interface,
include_loopback=include_loopback)
ipaddrs = ip_addrs
def ip_addrs6(interface=None, include_loopback=False):
'''
Returns a list of IPv6 addresses assigned to the host. ::1 is ignored,
unless 'include_loopback=True' is indicated. If 'interface' is provided,
then only IP addresses from that interface will be returned.
CLI Example:
.. code-block:: bash
salt '*' network.ip_addrs6
'''
return salt.utils.network.ip_addrs6(interface=interface,
include_loopback=include_loopback)
ipaddrs6 = ip_addrs6
def connect(host, port=None, **kwargs):
'''
Test connectivity to a host using a particular
port from the minion.
.. versionadded:: Boron
CLI Example:
.. code-block:: bash
salt '*' network.connect archlinux.org 80
salt '*' network.connect archlinux.org 80 timeout=3
salt '*' network.connect archlinux.org 80 timeout=3 family=ipv4
salt '*' network.connect google-public-dns-a.google.com port=53 proto=udp timeout=3
'''
ret = {'result': None,
'comment': ''}
if not host:
ret['result'] = False
ret['comment'] = 'Required argument, host, is missing.'
return ret
if not port:
ret['result'] = False
ret['comment'] = 'Required argument, port, is missing.'
return ret
proto = kwargs.get('proto', 'tcp')
timeout = kwargs.get('timeout', 5)
family = kwargs.get('family', None)
if salt.utils.validate.net.ipv4_addr(host) or salt.utils.validate.net.ipv6_addr(host):
address = host
else:
address = '{0}'.format(salt.utils.network.sanitize_host(host))
try:
if proto == 'udp':
__proto = socket.SOL_UDP
else:
__proto = socket.SOL_TCP
proto = 'tcp'
if family:
if family == 'ipv4':
__family = socket.AF_INET
elif family == 'ipv6':
__family = socket.AF_INET6
else:
__family = 0
else:
__family = 0
(family,
socktype,
_proto,
garbage,
_address) = socket.getaddrinfo(address, port, __family, 0, __proto)[0]
skt = socket.socket(family, socktype, _proto)
skt.settimeout(timeout)
if proto == 'udp':
# Generate a random string of a
# decent size to test UDP connection
md5h = hashlib.md5()
md5h.update(datetime.datetime.now().strftime('%s'))
msg = md5h.hexdigest()
skt.sendto(msg, _address)
recv, svr = skt.recvfrom(255)
skt.close()
else:
skt.connect(_address)
skt.shutdown(2)
except Exception as exc:
ret['result'] = False
ret['comment'] = 'Unable to connect to {0} ({1}) on {2} port {3}'\
.format(host, _address[0], proto, port)
return ret
ret['result'] = True
ret['comment'] = 'Successfully connected to {0} ({1}) on {2} port {3}'\
.format(host, _address[0], proto, port)
return ret
| 0 | 0 | 0 |
dc3a195bef8ce5e31f2367bff5171d939b46de19 | 260 | py | Python | tools/plot_knn.py | houwenbo87/DBSCAN | 3452d32186f2b59f2f1e515cebdf0ce15cb3e2f7 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2020-09-18T22:40:39.000Z | 2020-09-18T22:40:39.000Z | tools/plot_knn.py | conanhung/DBSCAN | 0bf4e6a83d61b83858f270dc5fbf78cd05ca3153 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | tools/plot_knn.py | conanhung/DBSCAN | 0bf4e6a83d61b83858f270dc5fbf78cd05ca3153 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2020-10-09T08:24:35.000Z | 2020-10-09T08:24:35.000Z | import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
if __name__ == '__main__':
main() | 20 | 74 | 0.665385 | import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def main():
df = pd.read_csv(sys.argv[1], sep=',', header=None, names=['id', 'dist'])
print df.shape
df.plot(x='id', y='dist')
plt.show()
if __name__ == '__main__':
main() | 120 | 0 | 23 |
f7411193c9e74da9972af45d8cd83b30e71b72c3 | 1,071 | py | Python | scripts/genSplitPDBlists.py | demattox/lec_gly_binding | 44a12445d3ed89029a21bed1c516a67bd86e0c68 | [
"MIT"
] | null | null | null | scripts/genSplitPDBlists.py | demattox/lec_gly_binding | 44a12445d3ed89029a21bed1c516a67bd86e0c68 | [
"MIT"
] | null | null | null | scripts/genSplitPDBlists.py | demattox/lec_gly_binding | 44a12445d3ed89029a21bed1c516a67bd86e0c68 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 1 16:23:55 2020
@author: dmattox
"""
import os
import dill
import lec_gly as LecGly
from bSiteResiFeatures import plipFile
os.chdir(LecGly.homeDir)
##########################
outDir = './data/structures/bsites/batchLists/'
if not os.path.exists(outDir):
os.makedirs(outDir)
##########################
maxBatchLstLength = 50 # generates 28 lists from 1365 PDB IDs, 27 lists of 50 and 1 of 15
##########################
with open(plipFile, "rb") as pickleFH:
allPLIP = dill.load(pickleFH)
def chunks(lst, n): # Function borrowed from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
"""Yield successive n-sized chunks from a list"""
for i in range(0, len(lst), n):
yield lst[i:i + n]
splitLst = chunks(list(allPLIP.keys()), maxBatchLstLength)
for i,lst in enumerate(splitLst):
with open(outDir + 'pdbList_' + str(i) + '.txt', 'w') as outFH:
for pdb in lst:
outFH.write(pdb + '\n')
| 24.340909 | 136 | 0.61718 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 1 16:23:55 2020
@author: dmattox
"""
import os
import dill
import lec_gly as LecGly
from bSiteResiFeatures import plipFile
os.chdir(LecGly.homeDir)
##########################
outDir = './data/structures/bsites/batchLists/'
if not os.path.exists(outDir):
os.makedirs(outDir)
##########################
maxBatchLstLength = 50 # generates 28 lists from 1365 PDB IDs, 27 lists of 50 and 1 of 15
##########################
with open(plipFile, "rb") as pickleFH:
allPLIP = dill.load(pickleFH)
def chunks(lst, n): # Function borrowed from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
"""Yield successive n-sized chunks from a list"""
for i in range(0, len(lst), n):
yield lst[i:i + n]
splitLst = chunks(list(allPLIP.keys()), maxBatchLstLength)
for i,lst in enumerate(splitLst):
with open(outDir + 'pdbList_' + str(i) + '.txt', 'w') as outFH:
for pdb in lst:
outFH.write(pdb + '\n')
| 0 | 0 | 0 |
fad44c00159e65658ec515bf14d41b4a2585ac00 | 6,986 | py | Python | examples/generative/real_nvp.py | rickiepark/keras-io | 4100b155baf2934900a3262d67bb2a8b093a365b | [
"Apache-2.0"
] | 3 | 2021-03-23T13:12:25.000Z | 2022-02-25T17:25:43.000Z | examples/generative/real_nvp.py | rickiepark/keras-io | 4100b155baf2934900a3262d67bb2a8b093a365b | [
"Apache-2.0"
] | 1 | 2020-11-30T01:32:49.000Z | 2020-11-30T01:32:49.000Z | examples/generative/real_nvp.py | rickiepark/keras-io | 4100b155baf2934900a3262d67bb2a8b093a365b | [
"Apache-2.0"
] | 2 | 2020-11-19T17:52:50.000Z | 2020-11-19T17:52:57.000Z | """
Title: Density estimation using Real NVP
Authors: [Mandolini Giorgio Maria](https://www.linkedin.com/in/giorgio-maria-mandolini-a2a1b71b4/), [Sanna Daniele](https://www.linkedin.com/in/daniele-sanna-338629bb/), [Zannini Quirini Giorgio](https://www.linkedin.com/in/giorgio-zannini-quirini-16ab181a0/)
Date created: 2020/08/10
Last modified: 2020/08/10
Description: Estimating the density distribution of the "double moon" dataset.
"""
"""
## Introduction
The aim of this work is to map a simple distribution - which is easy to sample
and whose density is simple to estimate - to a more complex one learned from the data.
This kind of generative model is also known as "normalizing flow".
In order to do this, the model is trained via the maximum
likelihood principle, using the "change of variable" formula.
We will use an affine coupling function. We create it such that its inverse, as well as
the determinant of the Jacobian, are easy to obtain (more details in the referenced paper).
**Requirements:**
* Tensorflow 2.3
* Tensorflow probability 0.11.0
**Reference:**
[Density estimation using Real NVP](https://arxiv.org/pdf/1605.08803.pdf)
"""
"""
## Setup
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from sklearn.datasets import make_moons
import numpy as np
import matplotlib.pyplot as plt
import tensorflow_probability as tfp
"""
## Load the data
"""
data = make_moons(3000, noise=0.05)[0].astype("float32")
norm = layers.experimental.preprocessing.Normalization()
norm.adapt(data)
normalized_data = norm(data)
"""
## Affine coupling layer
"""
# Creating a custom layer with keras API.
output_dim = 256
reg = 0.01
"""
## Real NVP
"""
"""
## Model training
"""
model = RealNVP(num_coupling_layers=6)
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.0001))
history = model.fit(
normalized_data, batch_size=256, epochs=300, verbose=2, validation_split=0.2
)
"""
## Performance evaluation
"""
plt.figure(figsize=(15, 10))
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.legend(["train", "validation"], loc="upper right")
plt.ylabel("loss")
plt.xlabel("epoch")
# From data to latent space.
z, _ = model(normalized_data)
# From latent space to data.
samples = model.distribution.sample(3000)
x, _ = model.predict(samples)
f, axes = plt.subplots(2, 2)
f.set_size_inches(20, 15)
axes[0, 0].scatter(normalized_data[:, 0], normalized_data[:, 1], color="r")
axes[0, 0].set(title="Inference data space X", xlabel="x", ylabel="y")
axes[0, 1].scatter(z[:, 0], z[:, 1], color="r")
axes[0, 1].set(title="Inference latent space Z", xlabel="x", ylabel="y")
axes[0, 1].set_xlim([-3.5, 4])
axes[0, 1].set_ylim([-4, 4])
axes[1, 0].scatter(samples[:, 0], samples[:, 1], color="g")
axes[1, 0].set(title="Generated latent space Z", xlabel="x", ylabel="y")
axes[1, 1].scatter(x[:, 0], x[:, 1], color="g")
axes[1, 1].set(title="Generated data space X", label="x", ylabel="y")
axes[1, 1].set_xlim([-2, 2])
axes[1, 1].set_ylim([-2, 2])
| 31.1875 | 259 | 0.670484 | """
Title: Density estimation using Real NVP
Authors: [Mandolini Giorgio Maria](https://www.linkedin.com/in/giorgio-maria-mandolini-a2a1b71b4/), [Sanna Daniele](https://www.linkedin.com/in/daniele-sanna-338629bb/), [Zannini Quirini Giorgio](https://www.linkedin.com/in/giorgio-zannini-quirini-16ab181a0/)
Date created: 2020/08/10
Last modified: 2020/08/10
Description: Estimating the density distribution of the "double moon" dataset.
"""
"""
## Introduction
The aim of this work is to map a simple distribution - which is easy to sample
and whose density is simple to estimate - to a more complex one learned from the data.
This kind of generative model is also known as "normalizing flow".
In order to do this, the model is trained via the maximum
likelihood principle, using the "change of variable" formula.
We will use an affine coupling function. We create it such that its inverse, as well as
the determinant of the Jacobian, are easy to obtain (more details in the referenced paper).
**Requirements:**
* Tensorflow 2.3
* Tensorflow probability 0.11.0
**Reference:**
[Density estimation using Real NVP](https://arxiv.org/pdf/1605.08803.pdf)
"""
"""
## Setup
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from sklearn.datasets import make_moons
import numpy as np
import matplotlib.pyplot as plt
import tensorflow_probability as tfp
"""
## Load the data
"""
data = make_moons(3000, noise=0.05)[0].astype("float32")
norm = layers.experimental.preprocessing.Normalization()
norm.adapt(data)
normalized_data = norm(data)
"""
## Affine coupling layer
"""
# Creating a custom layer with keras API.
output_dim = 256
reg = 0.01
def Coupling(input_shape):
input = keras.layers.Input(shape=input_shape)
t_layer_1 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(input)
t_layer_2 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(t_layer_1)
t_layer_3 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(t_layer_2)
t_layer_4 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(t_layer_3)
t_layer_5 = keras.layers.Dense(
input_shape, activation="linear", kernel_regularizer=regularizers.l2(reg)
)(t_layer_4)
s_layer_1 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(input)
s_layer_2 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(s_layer_1)
s_layer_3 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(s_layer_2)
s_layer_4 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(s_layer_3)
s_layer_5 = keras.layers.Dense(
input_shape, activation="tanh", kernel_regularizer=regularizers.l2(reg)
)(s_layer_4)
return keras.Model(inputs=input, outputs=[s_layer_5, t_layer_5])
"""
## Real NVP
"""
class RealNVP(keras.Model):
def __init__(self, num_coupling_layers):
super(RealNVP, self).__init__()
self.num_coupling_layers = num_coupling_layers
# Distribution of the latent space.
self.distribution = tfp.distributions.MultivariateNormalDiag(
loc=[0.0, 0.0], scale_diag=[1.0, 1.0]
)
self.masks = np.array(
[[0, 1], [1, 0]] * (num_coupling_layers // 2), dtype="float32"
)
self.loss_tracker = keras.metrics.Mean(name="loss")
self.layers_list = [Coupling(2) for i in range(num_coupling_layers)]
@property
def metrics(self):
"""List of the model's metrics.
We make sure the loss tracker is listed as part of `model.metrics`
so that `fit()` and `evaluate()` are able to `reset()` the loss tracker
at the start of each epoch and at the start of an `evaluate()` call.
"""
return [self.loss_tracker]
def call(self, x, training=True):
log_det_inv = 0
direction = 1
if training:
direction = -1
for i in range(self.num_coupling_layers)[::direction]:
x_masked = x * self.masks[i]
reversed_mask = 1 - self.masks[i]
s, t = self.layers_list[i](x_masked)
s *= reversed_mask
t *= reversed_mask
gate = (direction - 1) / 2
x = (
reversed_mask
* (x * tf.exp(direction * s) + direction * t * tf.exp(gate * s))
+ x_masked
)
log_det_inv += gate * tf.reduce_sum(s, [1])
return x, log_det_inv
# Log likelihood of the normal distribution plus the log determinant of the jacobian.
def log_loss(self, x):
y, logdet = self(x)
log_likelihood = self.distribution.log_prob(y) + logdet
return -tf.reduce_mean(log_likelihood)
def train_step(self, data):
with tf.GradientTape() as tape:
loss = self.log_loss(data)
g = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(g, self.trainable_variables))
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
def test_step(self, data):
loss = self.log_loss(data)
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
"""
## Model training
"""
model = RealNVP(num_coupling_layers=6)
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.0001))
history = model.fit(
normalized_data, batch_size=256, epochs=300, verbose=2, validation_split=0.2
)
"""
## Performance evaluation
"""
plt.figure(figsize=(15, 10))
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.legend(["train", "validation"], loc="upper right")
plt.ylabel("loss")
plt.xlabel("epoch")
# From data to latent space.
z, _ = model(normalized_data)
# From latent space to data.
samples = model.distribution.sample(3000)
x, _ = model.predict(samples)
f, axes = plt.subplots(2, 2)
f.set_size_inches(20, 15)
axes[0, 0].scatter(normalized_data[:, 0], normalized_data[:, 1], color="r")
axes[0, 0].set(title="Inference data space X", xlabel="x", ylabel="y")
axes[0, 1].scatter(z[:, 0], z[:, 1], color="r")
axes[0, 1].set(title="Inference latent space Z", xlabel="x", ylabel="y")
axes[0, 1].set_xlim([-3.5, 4])
axes[0, 1].set_ylim([-4, 4])
axes[1, 0].scatter(samples[:, 0], samples[:, 1], color="g")
axes[1, 0].set(title="Generated latent space Z", xlabel="x", ylabel="y")
axes[1, 1].scatter(x[:, 0], x[:, 1], color="g")
axes[1, 1].set(title="Generated data space X", label="x", ylabel="y")
axes[1, 1].set_xlim([-2, 2])
axes[1, 1].set_ylim([-2, 2])
| 3,243 | 589 | 46 |
0d4dcf4b3740e9236059bdb760a1648b29e6d370 | 1,533 | py | Python | src/tools/lidar_integration/lidar_integration/get_open_port.py | ruvus/auto | 25ae62d6e575cae40212356eed43ec3e76e9a13e | [
"Apache-2.0"
] | 19 | 2021-05-28T06:14:21.000Z | 2022-03-10T10:03:08.000Z | src/tools/lidar_integration/lidar_integration/get_open_port.py | ruvus/auto | 25ae62d6e575cae40212356eed43ec3e76e9a13e | [
"Apache-2.0"
] | 222 | 2021-10-29T22:00:27.000Z | 2022-03-29T20:56:34.000Z | src/tools/lidar_integration/lidar_integration/get_open_port.py | ruvus/auto | 25ae62d6e575cae40212356eed43ec3e76e9a13e | [
"Apache-2.0"
] | 14 | 2021-05-29T14:59:17.000Z | 2022-03-10T10:03:09.000Z | # Copyright 2018 the Autoware Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Co-developed by Tier IV, Inc. and Apex.AI, Inc.
import os
import socket
| 34.066667 | 93 | 0.69863 | # Copyright 2018 the Autoware Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Co-developed by Tier IV, Inc. and Apex.AI, Inc.
import os
import socket
def get_open_port_any():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def get_open_port(range_start=3000):
def bad_port(p):
return (p < 1024) or (p > 7400)
if (bad_port(range_start)):
raise ValueError
# Ports 1-1023 represent "Well-known ports"
# Ports 7400 and up are ports that might possibly be used by DDS/RTPS implementations
# ROS_DOMAIN_ID in the context of a test is "guaranteed" to be unique (and in 0-255)
if 'ROS_DOMAIN_ID' not in os.environ:
port = get_open_port_any()
if (bad_port(port)):
print("Warning, port {} might collide with DDS or well-known ports".format(port))
return port
domain_id = int(os.environ['ROS_DOMAIN_ID'])
return domain_id + range_start
| 821 | 0 | 46 |
e2290dfd5b24cbc2440d18ed10b34011008f0531 | 4,049 | py | Python | biqukan.py | Adsryen/python-spiders | 005513c78e82eaa3671d584c58a35b009aa7fd01 | [
"MIT"
] | 31 | 2022-01-02T11:35:36.000Z | 2022-03-26T16:56:08.000Z | biqukan.py | Adsryen/python-spiders | 005513c78e82eaa3671d584c58a35b009aa7fd01 | [
"MIT"
] | null | null | null | biqukan.py | Adsryen/python-spiders | 005513c78e82eaa3671d584c58a35b009aa7fd01 | [
"MIT"
] | 12 | 2022-01-07T04:04:57.000Z | 2022-03-29T08:22:24.000Z | #!/usr/bin/env python
# encoding: utf-8
'''
#-------------------------------------------------------------------
# CONFIDENTIAL --- CUSTOM STUDIOS
#-------------------------------------------------------------------
#
# @Project Name : 下载《笔趣看》网小说
#
# @File Name : biqukan.py
#
# @Programmer : autofelix
#
# @Start Date : 2022/01/10 13:14
#
# @Last Update : 2022/01/10 13:14
#
#-------------------------------------------------------------------
'''
from urllib import request
from bs4 import BeautifulSoup
import collections, re, os, sys
class biqukan:
'''
This is a main Class, the file contains all documents.
One document contains paragraphs that have several sentences
It loads the original file and converts the original file to new content
Then the new content will be saved by this class
'''
def hello(self):
'''
This is a welcome speech
:return: self
'''
print('*' * 50)
print(' ' * 15 + '下载《笔趣看》网小说')
print(' ' * 5 + '作者: autofelix Date: 2022-01-10 13:14')
print(' ' * 5 + '主页: https://autofelix.blog.csdn.net')
print('*' * 50)
return self
def get_download_url(self, target_url):
'''
get download url
'''
charter = re.compile(u'[第弟](.+)章', re.IGNORECASE)
target_req = request.Request(url = target_url, headers = self.header)
target_response = request.urlopen(target_req)
target_html = target_response.read().decode('gbk','ignore')
list_main_soup = BeautifulSoup(target_html,'lxml')
chapters = list_main_soup.find_all('div',class_ = 'listmain')
download_soup = BeautifulSoup(str(chapters), 'lxml')
novel_name = str(download_soup.dl.dt).split("》")[0][5:]
flag_name = "《" + novel_name + "》" + "正文卷"
numbers = (len(download_soup.dl.contents) - 1) / 2 - 8
download_dict = collections.OrderedDict()
begin_flag = False
numbers = 1
for child in download_soup.dl.children:
if child != '\n':
if child.string == u"%s" % flag_name:
begin_flag = True
if begin_flag == True and child.a != None:
download_url = "https://www.biqukan.com" + child.a.get('href')
download_name = child.string
names = str(download_name).split('章')
name = charter.findall(names[0] + '章')
if name:
download_dict['第' + str(numbers) + '章 ' + names[1]] = download_url
numbers += 1
return novel_name + '.txt', numbers, download_dict
def downloader(self, url):
'''
download the text
'''
download_req = request.Request(url = url, headers = self.header)
download_response = request.urlopen(download_req)
download_html = download_response.read().decode('gbk','ignore')
soup_texts = BeautifulSoup(download_html, 'lxml')
texts = soup_texts.find_all(id = 'content', class_ = 'showtxt')
soup_text = BeautifulSoup(str(texts), 'lxml').div.text.replace('\xa0','')
return soup_text
def writer(self, name, path, text):
'''
write to file
'''
write_flag = True
with open(path, 'a', encoding='utf-8') as f:
f.write(name + '\n\n')
for each in text:
if each == 'h':
write_flag = False
if write_flag == True and each != ' ':
f.write(each)
if write_flag == True and each == '\r':
f.write('\n')
f.write('\n\n')
def run(self):
'''
program entry
'''
target_url = str(input("请输入小说目录下载地址:\n"))
# 实例化下载类
d = self.downloader(target_url)
name, numbers, url_dict = d.get_download_url(target_url)
if name in os.listdir():
os.remove(name)
index = 1
# 下载中
print("《%s》下载中:" % name[:-4])
for key, value in url_dict.items():
d.Writer(key, name, d.Downloader(value))
sys.stdout.write("已下载:%.3f%%" % float(index / numbers) + '\r')
sys.stdout.flush()
index += 1
print("《%s》下载完成!" % name[:-4])
if __name__ == '__main__':
biqukan().hello().run()
| 29.992593 | 150 | 0.592245 | #!/usr/bin/env python
# encoding: utf-8
'''
#-------------------------------------------------------------------
# CONFIDENTIAL --- CUSTOM STUDIOS
#-------------------------------------------------------------------
#
# @Project Name : 下载《笔趣看》网小说
#
# @File Name : biqukan.py
#
# @Programmer : autofelix
#
# @Start Date : 2022/01/10 13:14
#
# @Last Update : 2022/01/10 13:14
#
#-------------------------------------------------------------------
'''
from urllib import request
from bs4 import BeautifulSoup
import collections, re, os, sys
class biqukan:
'''
This is a main Class, the file contains all documents.
One document contains paragraphs that have several sentences
It loads the original file and converts the original file to new content
Then the new content will be saved by this class
'''
def __init__(self):
self.header = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19'
}
def hello(self):
'''
This is a welcome speech
:return: self
'''
print('*' * 50)
print(' ' * 15 + '下载《笔趣看》网小说')
print(' ' * 5 + '作者: autofelix Date: 2022-01-10 13:14')
print(' ' * 5 + '主页: https://autofelix.blog.csdn.net')
print('*' * 50)
return self
def get_download_url(self, target_url):
'''
get download url
'''
charter = re.compile(u'[第弟](.+)章', re.IGNORECASE)
target_req = request.Request(url = target_url, headers = self.header)
target_response = request.urlopen(target_req)
target_html = target_response.read().decode('gbk','ignore')
list_main_soup = BeautifulSoup(target_html,'lxml')
chapters = list_main_soup.find_all('div',class_ = 'listmain')
download_soup = BeautifulSoup(str(chapters), 'lxml')
novel_name = str(download_soup.dl.dt).split("》")[0][5:]
flag_name = "《" + novel_name + "》" + "正文卷"
numbers = (len(download_soup.dl.contents) - 1) / 2 - 8
download_dict = collections.OrderedDict()
begin_flag = False
numbers = 1
for child in download_soup.dl.children:
if child != '\n':
if child.string == u"%s" % flag_name:
begin_flag = True
if begin_flag == True and child.a != None:
download_url = "https://www.biqukan.com" + child.a.get('href')
download_name = child.string
names = str(download_name).split('章')
name = charter.findall(names[0] + '章')
if name:
download_dict['第' + str(numbers) + '章 ' + names[1]] = download_url
numbers += 1
return novel_name + '.txt', numbers, download_dict
def downloader(self, url):
'''
download the text
'''
download_req = request.Request(url = url, headers = self.header)
download_response = request.urlopen(download_req)
download_html = download_response.read().decode('gbk','ignore')
soup_texts = BeautifulSoup(download_html, 'lxml')
texts = soup_texts.find_all(id = 'content', class_ = 'showtxt')
soup_text = BeautifulSoup(str(texts), 'lxml').div.text.replace('\xa0','')
return soup_text
def writer(self, name, path, text):
'''
write to file
'''
write_flag = True
with open(path, 'a', encoding='utf-8') as f:
f.write(name + '\n\n')
for each in text:
if each == 'h':
write_flag = False
if write_flag == True and each != ' ':
f.write(each)
if write_flag == True and each == '\r':
f.write('\n')
f.write('\n\n')
def run(self):
'''
program entry
'''
target_url = str(input("请输入小说目录下载地址:\n"))
# 实例化下载类
d = self.downloader(target_url)
name, numbers, url_dict = d.get_download_url(target_url)
if name in os.listdir():
os.remove(name)
index = 1
# 下载中
print("《%s》下载中:" % name[:-4])
for key, value in url_dict.items():
d.Writer(key, name, d.Downloader(value))
sys.stdout.write("已下载:%.3f%%" % float(index / numbers) + '\r')
sys.stdout.flush()
index += 1
print("《%s》下载完成!" % name[:-4])
if __name__ == '__main__':
biqukan().hello().run()
| 171 | 0 | 23 |
a430d9c6cd89854194892bdfcc0be76bfaba9d11 | 3,178 | py | Python | Professional/94/spaceInvaders.py | Matthew1906/100DaysOfPython | 94ffff8f5535ce5d574f49c0d7971d64a4575aad | [
"MIT"
] | 1 | 2021-12-25T02:19:18.000Z | 2021-12-25T02:19:18.000Z | Professional/94/spaceInvaders.py | Matthew1906/100DaysOfPython | 94ffff8f5535ce5d574f49c0d7971d64a4575aad | [
"MIT"
] | null | null | null | Professional/94/spaceInvaders.py | Matthew1906/100DaysOfPython | 94ffff8f5535ce5d574f49c0d7971d64a4575aad | [
"MIT"
] | 1 | 2021-11-25T10:31:47.000Z | 2021-11-25T10:31:47.000Z | # Turtle Objects
from turtle import Screen, Turtle
# Game Objects
from defender import Defender
from invaders import Invaders
from scoreboard import ScoreBoard
# Utilities
from time import sleep
from PIL import Image, ImageTk
# Initialize Screen
main_screen = Screen()
main_screen.title("Space Invaders")
main_screen.setup(width=800, height=550, startx= 250, starty=10)
# Setup Background
background_canvas = main_screen.getcanvas()
## Resize Image
image = Image.open("./images/space-bg.gif")
background_image = image.resize((800,550), Image.ANTIALIAS)
background_image = ImageTk.PhotoImage(background_image)
## Put image into background
background_canvas.create_image(0,0, image=background_image)
# Register shapes -> you need to resize the image first using PIL
main_screen.register_shape('./images/defender.gif')
main_screen.register_shape('./images/invader.gif')
# Add event listener
main_screen.tracer(0)
main_screen.listen()
# Menu indicator
is_game = False
# Menu drawing
menu = Turtle()
menu.hideturtle()
menu.penup()
menu.color('white')
draw_menu()
main_screen.onkey(key='s', fun=space_invaders)
main_screen.onkey(key='q', fun=main_screen.bye)
main_screen.mainloop() | 27.634783 | 88 | 0.678729 | # Turtle Objects
from turtle import Screen, Turtle
# Game Objects
from defender import Defender
from invaders import Invaders
from scoreboard import ScoreBoard
# Utilities
from time import sleep
from PIL import Image, ImageTk
# Initialize Screen
main_screen = Screen()
main_screen.title("Space Invaders")
main_screen.setup(width=800, height=550, startx= 250, starty=10)
# Setup Background
background_canvas = main_screen.getcanvas()
## Resize Image
image = Image.open("./images/space-bg.gif")
background_image = image.resize((800,550), Image.ANTIALIAS)
background_image = ImageTk.PhotoImage(background_image)
## Put image into background
background_canvas.create_image(0,0, image=background_image)
# Register shapes -> you need to resize the image first using PIL
main_screen.register_shape('./images/defender.gif')
main_screen.register_shape('./images/invader.gif')
# Add event listener
main_screen.tracer(0)
main_screen.listen()
# Menu indicator
is_game = False
# Menu drawing
menu = Turtle()
menu.hideturtle()
menu.penup()
menu.color('white')
def draw_menu():
menu.goto((0,45))
menu.write(f"Space Invaders", align = "center", font = ("Arial",24,"bold"))
menu.goto((0,0))
menu.write(f"Press 's' to start game", align = "center", font = ("Arial",12,"bold"))
menu.goto((0,-35))
menu.write(f"Press 'q' to quit game", align = "center", font = ("Arial",12,"bold"))
def space_invaders():
# Make sure that the game doesn't run multiple times
menu.clear()
global is_game
if is_game:
return
is_game=True
# Config game indicators
running = True
delay = 0.05
# Objects
scoreboard = ScoreBoard()
invaders = Invaders()
defender = Defender()
# Helper functions
def clear_all():
'''Reset all objects (except scoreboard)'''
defender.reset_bullets()
defender.reset()
defender.hideturtle()
invaders.reset()
scoreboard.clear()
def back_to_menu():
'''Reset scoreboard and go back to menu'''
scoreboard.reset()
draw_menu()
# Config listener
main_screen.onkeypress(key='Left', fun=defender.left)
main_screen.onkeypress(key='Right', fun=defender.right)
main_screen.onkeypress(key='space', fun=defender.shoot)
# start game
while running:
# Constant update of the main screen
main_screen.update()
# Slight delay to prevent glitch
sleep(delay)
# Object's movements
defender.move_bullets()
invaders.move_invaders()
invaders.move_bullets()
# Check for collision
if invaders.check_collision(defender):
scoreboard.update_lives()
defender.lose_life()
elif defender.successful_shot(invaders.invaders):
scoreboard.update_score()
# Win condition
if scoreboard.score == 560 or scoreboard.lives == 0:
break
clear_all()
scoreboard.final_result()
main_screen.ontimer(fun=back_to_menu, t=3000)
is_game=False
draw_menu()
main_screen.onkey(key='s', fun=space_invaders)
main_screen.onkey(key='q', fun=main_screen.bye)
main_screen.mainloop() | 1,947 | 0 | 47 |
ce42292fe7c299a4af67a13cf9348d0399c5099c | 1,415 | py | Python | detect.py | milangeorge2000/face_attendance | 90a41412ef5d0fabaa21f6246ce7243ae36d59b4 | [
"Apache-2.0"
] | null | null | null | detect.py | milangeorge2000/face_attendance | 90a41412ef5d0fabaa21f6246ce7243ae36d59b4 | [
"Apache-2.0"
] | null | null | null | detect.py | milangeorge2000/face_attendance | 90a41412ef5d0fabaa21f6246ce7243ae36d59b4 | [
"Apache-2.0"
] | null | null | null | from facenet_pytorch import MTCNN, InceptionResnetV1
import torch
from torchvision import datasets
from torch.utils.data import DataLoader
from PIL import Image
mtcnn = MTCNN(image_size=240, margin=0, min_face_size=20) # initializing mtcnn for face detection
resnet = InceptionResnetV1(pretrained='vggface2').eval() # initializing resnet for face img to embeding conversion
dataset=datasets.ImageFolder('train') # photos folder path
idx_to_class = {i:c for c,i in dataset.class_to_idx.items()} # accessing names of peoples from folder names
loader = DataLoader(dataset, collate_fn=collate_fn)
face_list = [] # list of cropped faces from photos folder
name_list = [] # list of names corrospoing to cropped photos
embedding_list = [] # list of embeding matrix after conversion from cropped faces to embedding matrix using resnet
for img, idx in loader:
face, prob = mtcnn(img, return_prob=True)
if face is not None and prob>0.90: # if face detected and porbability > 90%
emb = resnet(face.unsqueeze(0)) # passing cropped face into resnet model to get embedding matrix
embedding_list.append(emb.detach()) # resulten embedding matrix is stored in a list
name_list.append(idx_to_class[idx]) # names are stored in a list
data = [embedding_list, name_list]
torch.save(data, 'dataface2.pt') # saving data.pt file | 42.878788 | 115 | 0.742756 | from facenet_pytorch import MTCNN, InceptionResnetV1
import torch
from torchvision import datasets
from torch.utils.data import DataLoader
from PIL import Image
mtcnn = MTCNN(image_size=240, margin=0, min_face_size=20) # initializing mtcnn for face detection
resnet = InceptionResnetV1(pretrained='vggface2').eval() # initializing resnet for face img to embeding conversion
dataset=datasets.ImageFolder('train') # photos folder path
idx_to_class = {i:c for c,i in dataset.class_to_idx.items()} # accessing names of peoples from folder names
def collate_fn(x):
return x[0]
loader = DataLoader(dataset, collate_fn=collate_fn)
face_list = [] # list of cropped faces from photos folder
name_list = [] # list of names corrospoing to cropped photos
embedding_list = [] # list of embeding matrix after conversion from cropped faces to embedding matrix using resnet
for img, idx in loader:
face, prob = mtcnn(img, return_prob=True)
if face is not None and prob>0.90: # if face detected and porbability > 90%
emb = resnet(face.unsqueeze(0)) # passing cropped face into resnet model to get embedding matrix
embedding_list.append(emb.detach()) # resulten embedding matrix is stored in a list
name_list.append(idx_to_class[idx]) # names are stored in a list
data = [embedding_list, name_list]
torch.save(data, 'dataface2.pt') # saving data.pt file | 14 | 0 | 25 |
491e78ac407bddd3554ce7a1b2051d55374e1d69 | 6,969 | py | Python | rss2kml_test.py | klokan/googlecrisismap | e31b68706f7a4921037b0e513703483f2ac3b830 | [
"Apache-2.0"
] | 40 | 2015-06-24T12:31:14.000Z | 2021-01-30T23:33:54.000Z | rss2kml_test.py | klokan/googlecrisismap | e31b68706f7a4921037b0e513703483f2ac3b830 | [
"Apache-2.0"
] | 16 | 2015-07-30T14:24:42.000Z | 2021-02-24T22:30:56.000Z | rss2kml_test.py | klokan/googlecrisismap | e31b68706f7a4921037b0e513703483f2ac3b830 | [
"Apache-2.0"
] | 27 | 2015-06-21T07:31:58.000Z | 2021-02-12T08:47:42.000Z | #!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distrib-
# uted under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# specific language governing permissions and limitations under the License.
"""Tests for rss2kml.py."""
__author__ = 'arb@google.com (Anthony Baxter)'
import xml.etree.ElementTree as ElementTree
# Allow relative imports within the app. # pylint: disable=W0403
import mox
import rss2kml
import test_utils
from google.appengine.api import memcache
from google.appengine.api import urlfetch
class Rss2KmlTest(test_utils.BaseTest):
"""Tests for rss2kml.py."""
if __name__ == '__main__':
test_utils.main()
| 34.5 | 95 | 0.633807 | #!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distrib-
# uted under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# specific language governing permissions and limitations under the License.
"""Tests for rss2kml.py."""
__author__ = 'arb@google.com (Anthony Baxter)'
import xml.etree.ElementTree as ElementTree
# Allow relative imports within the app. # pylint: disable=W0403
import mox
import rss2kml
import test_utils
from google.appengine.api import memcache
from google.appengine.api import urlfetch
def Deindent(kml):
return '\n'.join(x.strip() for x in kml.split('\n'))
class Rss2KmlTest(test_utils.BaseTest):
"""Tests for rss2kml.py."""
def setUp(self):
self.mox = mox.Mox()
self.mox.StubOutWithMock(urlfetch, 'fetch')
self.mox.StubOutWithMock(memcache, 'get')
self.mox.StubOutWithMock(memcache, 'set')
def tearDown(self):
self.mox.UnsetStubs()
def testConversion(self):
handler = test_utils.SetupHandler(
'/crisismap/rss2kml', rss2kml.Rss2Kml(),
'ib=http%3A%2F%2Fwww.rfs.nsw.gov.au%2Ffile_system%2Fimages%2F'
'State08%2F%24.png&url=http%3A%2F%2Ffeeds.rfs.nsw.gov.au%2F'
'majorIncidents.xml&field=category&'
's=Emergency_Warning:0:Emergency+Warning&'
's=WatchAndAct:0:Watch+and+Act&'
's=Advice:0:Advice&'
's=:0:NotApplicable&'
'p=11111111:44444444')
last_mod = 'Wed, 26 Sep 2012 02:45:35 GMT'
class DummyRSS(object):
headers = {'Last-modified': last_mod}
content = """\
<rss xmlns:georss="http://www.georss.org/georss" version="2.0">
<channel>
<item>
<title>TITLE</title>
<description>DESCR</description>
<guid>GUID</guid>
<georss:point>12 24</georss:point>
<category>emergency warning</category>
</item>
<item>
<title>TITLE2</title>
<description>DESCR2</description>
<guid>GUID2</guid>
<georss:polygon>11 44 55 22</georss:polygon>
<category>Advice</category>
</item>
</channel>
</rss>"""
memcache.get(mox.IgnoreArg())
urlfetch.fetch('http://feeds.rfs.nsw.gov.au/majorIncidents.xml',
validate_certificate=False, deadline=30).AndReturn(DummyRSS)
# TODO(arb): test_utils.SetupHandler() doesn't set self.request.query_string
# This makes our cache key broken.
cache_key = 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
memcache.set('RSS2KML+' + cache_key, mox.IgnoreArg(), 120)
memcache.set('RSS2KML+' + cache_key + 'last_mod', last_mod, 120)
self.mox.ReplayAll()
handler.get()
self.mox.VerifyAll()
expected = """<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://earth.google.com/kml/2.2">
<Document>
<Style id="style_advice">
<IconStyle>
<Icon>
<href>http://www.rfs.nsw.gov.au/file_system/images/State08/Advice.png</href>
</Icon>
</IconStyle>
<PolyStyle>
<color>44444444</color>
<colorMode>normal</colorMode>
<fill>1</fill>
<outline>1</outline>
</PolyStyle>
<LineStyle>
<color>11111111</color>
<colorMode>normal</colorMode>
</LineStyle>
</Style>
<Style id="style_emergency_warning">
<IconStyle>
<Icon>
<href>http://www.rfs.nsw.gov.au/file_system/images/State08/Emergency_Warning.png</href>
</Icon>
</IconStyle>
<PolyStyle>
<color>44444444</color>
<colorMode>normal</colorMode>
<fill>1</fill>
<outline>1</outline>
</PolyStyle>
<LineStyle>
<color>11111111</color>
<colorMode>normal</colorMode>
</LineStyle>
</Style>
<Placemark id="GUID">
<name>TITLE</name>
<description>DESCR</description>
<MultiGeometry>
<Point>
<coordinates>24,12,0</coordinates>
</Point>
</MultiGeometry>
<styleUrl>#style_emergency_warning</styleUrl>
</Placemark>
<Placemark id="GUID2">
<name>TITLE2</name>
<description>DESCR2</description>
<MultiGeometry>
<Polygon>
<outerBoundaryIs>
<LinearRing>
<coordinates>44,11,0
22,55,0</coordinates>
</LinearRing>
</outerBoundaryIs>
</Polygon>
</MultiGeometry>
<styleUrl>#style_advice</styleUrl>
</Placemark>
</Document>
</kml>
"""
self.assertEquals(Deindent(expected), Deindent(handler.response.body))
self.assertEquals(last_mod, handler.response.headers['Last-modified'])
def testCreatePlacemarkPoint(self):
item_values = {'point': ['12 24'],
'title': ['a point'],
'description': ['described thing'],
'guid': ['GUID']}
placemark_xml = ('<Placemark id="GUID"><name>a point</name>'
'<description>described thing</description>'
'<MultiGeometry><Point><coordinates>24,12,0</coordinates>'
'</Point></MultiGeometry>'
'<styleUrl>#style_icon_foo</styleUrl></Placemark>')
instance = rss2kml.Rss2Kml()
placemark = instance.CreatePlacemark(item_values, 'icon_foo')
self.assertEquals(placemark_xml, ElementTree.tostring(placemark))
def testCreatePlacemarkMultipolygon(self):
item_values = {'polygon': ['1 2 3 4 1 2', '4 5 6 7 4 5'],
'title': ['2 polys'],
'description': ['described thing'],
'guid': ['GUID']}
placemark_xml = ('<Placemark id="GUID"><name>2 polys</name>'
'<description>described thing</description>'
'<MultiGeometry><Polygon><outerBoundaryIs><LinearRing>'
'<coordinates>2,1,0\n4,3,0\n2,1,0</coordinates>'
'</LinearRing></outerBoundaryIs></Polygon>'
'<Polygon><outerBoundaryIs><LinearRing>'
'<coordinates>5,4,0\n7,6,0\n5,4,0</coordinates>'
'</LinearRing></outerBoundaryIs></Polygon>'
'</MultiGeometry><styleUrl>#style_icon_foo</styleUrl>'
'</Placemark>')
instance = rss2kml.Rss2Kml()
placemark = instance.CreatePlacemark(item_values, 'icon_foo')
self.assertEquals(placemark_xml, ElementTree.tostring(placemark))
def testIconSafety(self):
cache = rss2kml.IconCache()
self.assertEquals('foo', cache.Add('foo'))
self.assertEquals('foo', cache.Add('foo'))
self.assertEquals(1, len(list(cache)))
self.assertEquals('foo_bar', cache.Add('foo/bar'))
self.assertNotEquals('foo_bar', cache.Add('foo#bar'))
self.assertEquals(3, len(list(cache)))
if __name__ == '__main__':
test_utils.main()
| 5,751 | 0 | 173 |
e8942f3a0705ae7acb5b7328d2889bc969c7f34c | 54 | py | Python | test/__init__.py | zachasme/pyresp | a363cef3435769ba86b86ac1d31576566ca3ddc9 | [
"MIT"
] | 3 | 2017-10-15T11:55:01.000Z | 2018-11-04T19:29:34.000Z | test/__init__.py | zachasme/pyresp | a363cef3435769ba86b86ac1d31576566ca3ddc9 | [
"MIT"
] | null | null | null | test/__init__.py | zachasme/pyresp | a363cef3435769ba86b86ac1d31576566ca3ddc9 | [
"MIT"
] | null | null | null | # run these tests with python -m pytest from src/ dir
| 27 | 53 | 0.740741 | # run these tests with python -m pytest from src/ dir
| 0 | 0 | 0 |
8551184ce4138e96c09b351cde834933ae754688 | 257 | py | Python | Exercicios/desafio18_mathangulo.py | lbarrosandre/Resolucao-Desafios-Python | d30897d9d5cb179579fd67a70f3d74af7a3293a1 | [
"MIT"
] | null | null | null | Exercicios/desafio18_mathangulo.py | lbarrosandre/Resolucao-Desafios-Python | d30897d9d5cb179579fd67a70f3d74af7a3293a1 | [
"MIT"
] | null | null | null | Exercicios/desafio18_mathangulo.py | lbarrosandre/Resolucao-Desafios-Python | d30897d9d5cb179579fd67a70f3d74af7a3293a1 | [
"MIT"
] | null | null | null | import math
angulo = float(input('Digite um angulo: '))
sen = math.sin(math.radians(angulo))
cos = math.cos(math.radians(angulo))
tan = math.tan(math.radians(angulo))
print('O seno é {:.2f}\nO Cosseno é {:.2f}\ne a tangente é {:.2f}'.format(sen, cos, tan))
| 36.714286 | 89 | 0.680934 | import math
angulo = float(input('Digite um angulo: '))
sen = math.sin(math.radians(angulo))
cos = math.cos(math.radians(angulo))
tan = math.tan(math.radians(angulo))
print('O seno é {:.2f}\nO Cosseno é {:.2f}\ne a tangente é {:.2f}'.format(sen, cos, tan))
| 0 | 0 | 0 |
44eb49328209b2f289e9757814c4bdf802ad9c46 | 1,303 | py | Python | Content/Scripts/debugtest.py | neo-talen/UnrealEnginePython | 9a0878f7f3c1b34c0bc5c886ec14ad3356284b86 | [
"MIT"
] | 65 | 2017-02-23T12:18:02.000Z | 2021-12-23T14:23:52.000Z | Content/Scripts/debugtest.py | Artisan-H3/UnrealEnginePython | 9a0878f7f3c1b34c0bc5c886ec14ad3356284b86 | [
"MIT"
] | 8 | 2017-03-16T18:16:27.000Z | 2019-08-07T18:07:23.000Z | Content/Scripts/debugtest.py | Artisan-H3/UnrealEnginePython | 9a0878f7f3c1b34c0bc5c886ec14ad3356284b86 | [
"MIT"
] | 22 | 2017-05-04T20:53:23.000Z | 2022-03-09T15:49:54.000Z | #import debugtest
#import imp
#imp.reload(debugtest)
#Testing script for various writing setups
#import redirect_print
import unreal_engine as ue
import time
import sys
import upythread as ut
from threading import Thread
#imp.reload(redirect_print)
#the test function
#test simple fire and forget
#Test with callback
#progress callback example functions
#test basic progress bar | 18.352113 | 51 | 0.71604 | #import debugtest
#import imp
#imp.reload(debugtest)
#Testing script for various writing setups
#import redirect_print
import unreal_engine as ue
import time
import sys
import upythread as ut
from threading import Thread
#imp.reload(redirect_print)
def onfinished(args=""):
ue.log(args)
ue.log('finished with: <' + str(args) + '>')
def onfinishedempty():
ue.log('finished')
def testaction(args=""):
ue.log('starting action with <' + str(args) + '>')
#onfinished()
#pretend you take time to finish
time.sleep(1)
ue.log('wait complete')
ue.run_on_gt(onfinished, args)
ue.run_on_gt(onfinishedempty)
#the test function
def test(params=None):
ue.log(type(params))
ue.log('starting test')
if not params:
t = Thread(target=testaction)
else:
t = Thread(target=testaction, args=(params,))
t.start()
def yolo():
ue.log('yolo!')
def yolodone():
ue.log('yolo done!')
#test simple fire and forget
def test2():
ut.run_on_bt(yolo)
#Test with callback
def test3():
ut.run_on_bt(yolo, yolodone)
#progress callback example functions
def progresscallback(progress):
ue.log('at ' + str(progress))
def doLongTask():
ue.log('started my task')
for x in range(1,10):
time.sleep(0.5)
ue.run_on_gt(progresscallback, x)
#test basic progress bar
def testp():
ut.run_on_bt(doLongTask) | 668 | 0 | 248 |
acd2e4f7bd002b978aa773228676c457d4179f4c | 732 | py | Python | test_find.py | UO-CIS-322/scrabble-helper | f18569709e6def2684dd7eada9200ff41c4c6b23 | [
"MIT"
] | null | null | null | test_find.py | UO-CIS-322/scrabble-helper | f18569709e6def2684dd7eada9200ff41c4c6b23 | [
"MIT"
] | null | null | null | test_find.py | UO-CIS-322/scrabble-helper | f18569709e6def2684dd7eada9200ff41c4c6b23 | [
"MIT"
] | null | null | null | """
Test suite for 'find.py' (Fall 2015 version)
"""
from find15F import matches
| 26.142857 | 65 | 0.617486 | """
Test suite for 'find.py' (Fall 2015 version)
"""
from find15F import matches
def test_find_no_wildcards():
## Matches without wildcards
assert matches("abc", "abc", "abc")
assert matches("abc", "abc", "xxx") # Uses nothing from tray
assert not matches("abx", "abc", "abc") # x doesn't match c
assert not matches("abcd", "abc", "abc") # too long
assert not matches("abc", "abcd", "abc") # too short
def test_find_with_wildcards():
# Matches with wildcards:
# Add at least three good test cases here
assert matches("abc", "__c", "xaxbx")
assert not matches("abbc", "a__c", "xbx")
assert not matches ("abc", "a_x", "bc")
assert not matches("abc", "a_c", "xyz")
| 590 | 0 | 46 |
5804c660e67eedf09b0dec6e599d1cf644156a9d | 7,584 | py | Python | tensorflow/python/ops/clustering_ops_test.py | abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | [
"Apache-2.0"
] | 848 | 2019-12-03T00:16:17.000Z | 2022-03-31T22:53:17.000Z | tensorflow/python/ops/clustering_ops_test.py | abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | [
"Apache-2.0"
] | 656 | 2019-12-03T00:48:46.000Z | 2022-03-31T18:41:54.000Z | tensorflow/python/ops/clustering_ops_test.py | abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | [
"Apache-2.0"
] | 506 | 2019-12-03T00:46:26.000Z | 2022-03-30T10:34:56.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for clustering_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops import clustering_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
# All but one input point are close to (101, 1). With uniform random sampling,
# it is highly improbable for (-1, -1) to be selected.
@test_util.run_all_in_graph_and_eager_modes
@test_util.run_all_in_graph_and_eager_modes
@test_util.run_all_in_graph_and_eager_modes
@test_util.run_all_in_graph_and_eager_modes
# A simple test that can be verified by hand.
@test_util.run_all_in_graph_and_eager_modes
# A test with large inputs.
if __name__ == "__main__":
np.random.seed(0)
test.main()
| 35.605634 | 80 | 0.615506 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for clustering_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops import clustering_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class KmeansPlusPlusInitializationTest(test.TestCase):
# All but one input point are close to (101, 1). With uniform random sampling,
# it is highly improbable for (-1, -1) to be selected.
def setUp(self):
self._points = np.array([[100., 0.],
[101., 2.],
[102., 0.],
[100., 1.],
[100., 2.],
[101., 0.],
[101., 0.],
[101., 1.],
[102., 0.],
[-1., -1.]]).astype(np.float32)
def runTestWithSeed(self, seed):
with self.cached_session():
sampled_points = clustering_ops.kmeans_plus_plus_initialization(
self._points, 3, seed, (seed % 5) - 1)
self.assertAllClose(
sorted(self.evaluate(sampled_points).tolist()),
[[-1., -1.], [101., 1.], [101., 1.]],
atol=1.0)
def testBasic(self):
for seed in range(100):
self.runTestWithSeed(seed)
@test_util.run_all_in_graph_and_eager_modes
class KMC2InitializationTest(test.TestCase):
def runTestWithSeed(self, seed):
with self.cached_session():
distances = np.zeros(1000).astype(np.float32)
distances[6] = 10e7
distances[4] = 10e3
sampled_point = clustering_ops.kmc2_chain_initialization(distances, seed)
self.assertAllEqual(sampled_point, 6)
distances[6] = 0.0
sampled_point = clustering_ops.kmc2_chain_initialization(distances, seed)
self.assertAllEqual(sampled_point, 4)
def testBasic(self):
for seed in range(100):
self.runTestWithSeed(seed)
@test_util.run_all_in_graph_and_eager_modes
class KMC2InitializationLargeTest(test.TestCase):
def setUp(self):
self._distances = np.zeros(1001)
self._distances[500] = 100.0
self._distances[1000] = 50.0
def testBasic(self):
with self.cached_session():
counts = {}
seed = 0
for i in range(50):
sample = self.evaluate(
clustering_ops.kmc2_chain_initialization(self._distances, seed + i))
counts[sample] = counts.get(sample, 0) + 1
self.assertEquals(len(counts), 2)
self.assertTrue(500 in counts)
self.assertTrue(1000 in counts)
self.assertGreaterEqual(counts[500], 5)
self.assertGreaterEqual(counts[1000], 5)
@test_util.run_all_in_graph_and_eager_modes
class KMC2InitializationCornercaseTest(test.TestCase):
def setUp(self):
self._distances = np.zeros(10)
def runTestWithSeed(self, seed):
with self.cached_session():
sampled_point = clustering_ops.kmc2_chain_initialization(
self._distances, seed)
self.assertAllEqual(sampled_point, 0)
def testBasic(self):
for seed in range(100):
self.runTestWithSeed(seed)
@test_util.run_all_in_graph_and_eager_modes
# A simple test that can be verified by hand.
class NearestCentersTest(test.TestCase):
def setUp(self):
self._points = np.array([[100., 0.],
[101., 2.],
[99., 2.],
[1., 1.]]).astype(np.float32)
self._centers = np.array([[100., 0.],
[99., 1.],
[50., 50.],
[0., 0.],
[1., 1.]]).astype(np.float32)
def testNearest1(self):
with self.cached_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 1)
self.assertAllClose(indices, [[0], [0], [1], [4]])
self.assertAllClose(distances, [[0.], [5.], [1.], [0.]])
def testNearest2(self):
with self.cached_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 2)
self.assertAllClose(indices, [[0, 1], [0, 1], [1, 0], [4, 3]])
self.assertAllClose(distances, [[0., 2.], [5., 5.], [1., 5.], [0., 2.]])
@test_util.run_all_in_graph_and_eager_modes
# A test with large inputs.
class NearestCentersLargeTest(test.TestCase):
def setUp(self):
num_points = 1000
num_centers = 2000
num_dim = 100
max_k = 5
# Construct a small number of random points and later tile them.
points_per_tile = 10
assert num_points % points_per_tile == 0
points = np.random.standard_normal(
[points_per_tile, num_dim]).astype(np.float32)
# Construct random centers.
self._centers = np.random.standard_normal(
[num_centers, num_dim]).astype(np.float32)
# Exhaustively compute expected nearest neighbors.
def squared_distance(x, y):
return np.linalg.norm(x - y, ord=2)**2
nearest_neighbors = [
sorted([(squared_distance(point, self._centers[j]), j)
for j in range(num_centers)])[:max_k] for point in points
]
expected_nearest_neighbor_indices = np.array(
[[i for _, i in nn] for nn in nearest_neighbors])
expected_nearest_neighbor_squared_distances = np.array(
[[dist for dist, _ in nn] for nn in nearest_neighbors])
# Tile points and expected results to reach requested size (num_points)
(self._points, self._expected_nearest_neighbor_indices,
self._expected_nearest_neighbor_squared_distances) = (
np.tile(x, (int(num_points / points_per_tile), 1))
for x in (points, expected_nearest_neighbor_indices,
expected_nearest_neighbor_squared_distances))
def testNearest1(self):
with self.cached_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 1)
self.assertAllClose(
indices,
self._expected_nearest_neighbor_indices[:, [0]])
self.assertAllClose(
distances,
self._expected_nearest_neighbor_squared_distances[:, [0]])
def testNearest5(self):
with self.cached_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 5)
self.assertAllClose(
indices,
self._expected_nearest_neighbor_indices[:, 0:5])
self.assertAllClose(
distances,
self._expected_nearest_neighbor_squared_distances[:, 0:5])
if __name__ == "__main__":
np.random.seed(0)
test.main()
| 5,344 | 160 | 531 |
915fde594c16947b4f2e02032719e1f7f32df1e2 | 2,636 | py | Python | src/models/CORAL_BART/args.py | behavioral-data/multiverse | 82b7265de0aa3e9d229ce9f3f86b8b48435ca365 | [
"MIT"
] | null | null | null | src/models/CORAL_BART/args.py | behavioral-data/multiverse | 82b7265de0aa3e9d229ce9f3f86b8b48435ca365 | [
"MIT"
] | null | null | null | src/models/CORAL_BART/args.py | behavioral-data/multiverse | 82b7265de0aa3e9d229ce9f3f86b8b48435ca365 | [
"MIT"
] | 1 | 2021-08-19T15:21:50.000Z | 2021-08-19T15:21:50.000Z | from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. Leave None if you want to train a model from scratch."
},
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
max_length: Optional[int] = field(
default=512, metadata={"help": "The maximum input sequence length"}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
data_dir: Optional[str] = field(
default=None, metadata={"help": "The input directory for training data."}
)
big_file_path: Optional[str] = field(
default=None, metadata={"help": "Big file with all cells in it"}
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
mlm: bool = field(
default=False, metadata={"help": "Train with masked-language modeling loss instead of language modeling."}
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
plm_probability: float = field(
default=1 / 6,
metadata={
"help": "Ratio of length of a span of masked tokens to surrounding context length for permutation language modeling."
},
)
max_span_length: int = field(
default=5, metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."}
)
block_size: int = field(
default=-1,
metadata={
"help": "Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
eval_frac: float = field(
default= 0.1,
metadata={
"help": "Fraction of dataset reserved for evaluation"
},
)
| 35.621622 | 129 | 0.644917 | from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. Leave None if you want to train a model from scratch."
},
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
max_length: Optional[int] = field(
default=512, metadata={"help": "The maximum input sequence length"}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
data_dir: Optional[str] = field(
default=None, metadata={"help": "The input directory for training data."}
)
big_file_path: Optional[str] = field(
default=None, metadata={"help": "Big file with all cells in it"}
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
mlm: bool = field(
default=False, metadata={"help": "Train with masked-language modeling loss instead of language modeling."}
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
plm_probability: float = field(
default=1 / 6,
metadata={
"help": "Ratio of length of a span of masked tokens to surrounding context length for permutation language modeling."
},
)
max_span_length: int = field(
default=5, metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."}
)
block_size: int = field(
default=-1,
metadata={
"help": "Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
eval_frac: float = field(
default= 0.1,
metadata={
"help": "Fraction of dataset reserved for evaluation"
},
)
| 0 | 0 | 0 |
c5881e9f7e8558acbb4c5c037b8d5e7f5fff6508 | 5,667 | py | Python | stko/calculators/planarity_calculators/planarity_calculators.py | stevenbennett96/stko | ee340af4fc549d5a2c3e9cba8360661335efe0fd | [
"MIT"
] | null | null | null | stko/calculators/planarity_calculators/planarity_calculators.py | stevenbennett96/stko | ee340af4fc549d5a2c3e9cba8360661335efe0fd | [
"MIT"
] | null | null | null | stko/calculators/planarity_calculators/planarity_calculators.py | stevenbennett96/stko | ee340af4fc549d5a2c3e9cba8360661335efe0fd | [
"MIT"
] | 2 | 2020-05-08T17:51:25.000Z | 2020-05-11T09:03:24.000Z | """
Planarity Calculators
=====================
#. :class:`.PlanarityCalculator`
Methods to calculate planarity measures of a molecule.
"""
import logging
import numpy as np
from ..calculators import Calculator
from ..results import PlanarityResults
logger = logging.getLogger(__name__)
class PlanarityCalculator(Calculator):
"""
Calculates measures of planarity of a molecule.
Measures based on plane deviation from Angew. paper [1]_ and a
ChemRxiv paper [2]_.
Plane deviation: sum of the shortest distance to the plane of best
fit of all deviation atoms (sum abs(d_i)).
Plane deviation span: d_max - d_min (SDP in [2]_)
Planarity parameter: defined as
sqrt((1/num_atoms) * (sum d_i ** 2)) (MPP in [2]_)
Examples
--------
.. code-block:: python
import stk
import stko
# Create a molecule whose torsions we want to know.
mol1 = stk.BuildingBlock('c1ccccc1')
# Create the calculator.
pc = stko.PlanarityCalculator()
# Extract the measures.
pc_results = pc.get_results(mol1)
plane_deviation = pc_results.get_plane_deviation()
plane_deviation_span = pc_results.get_plane_deviation_span()
planarity_parameter = pc_results.get_planarity_parameter()
References
----------
.. [1] https://onlinelibrary.wiley.com/doi/10.1002/anie.202106721
.. [2] https://chemrxiv.org/engage/chemrxiv/article-details/
60c73cbf9abda2e0c5f8b5c6
"""
def _shortest_distance_to_plane(self, plane, point):
"""
Calculate the perpendicular distance from a point and a plane.
"""
top = (
plane[0]*point[0] + plane[1]*point[1] +
plane[2]*point[2] - plane[3]
)
bottom = np.sqrt(plane[0]**2 + plane[1]**2 + plane[2]**2)
distance = top / bottom
return distance
def calculate(
self,
mol,
plane_atom_ids=None,
deviation_atom_ids=None,
):
"""
Perform calculation on `mol`.
Parameters
----------
mol : :class:`.Molecule`
The :class:`.Molecule` whose planarity is to be calculated.
plane_atom_ids : iterable of :class:`int`, optional
The atom ids to use to define the plane of best fit.
deviation_atom_ids : iterable of :class:`int`, optional
The atom ids to use to calculate planarity.
Yields
------
:class:`function`
The function to perform the calculation.
"""
if plane_atom_ids is None:
plane_atom_ids = list(range(len(list(mol.get_atoms()))))
else:
plane_atom_ids = list(plane_atom_ids)
if deviation_atom_ids is None:
deviation_atom_ids = list(
range(len(list(mol.get_atoms())))
)
else:
deviation_atom_ids = list(deviation_atom_ids)
atom_plane = self._get_plane_of_best_fit(mol, plane_atom_ids)
deviations = self._calculate_deviations(
mol=mol,
atom_plane=atom_plane,
deviation_atom_ids=deviation_atom_ids,
)
yield {
'plane_deviation': (
self._plane_deviation(deviations)
),
'plane_deviation_span': (
self._plane_deviation_span(deviations)
),
'planarity_parameter': (
self._planarity_parameter(deviations)
),
}
def get_results(
self,
mol,
plane_atom_ids=None,
deviation_atom_ids=None,
):
"""
Calculate the planarity of `mol`.
Parameters
----------
mol : :class:`.Molecule`
The :class:`.Molecule` whose planarity is to be calculated.
plane_atom_ids : iterable of :class:`int`, optional
The atom ids to use to define the plane of best fit.
deviation_atom_ids : iterable of :class:`int`, optional
The atom ids to use to calculate planarity.
Returns
-------
:class:`.PlanarityResults`
The planarity measures of the molecule.
"""
return PlanarityResults(self.calculate(
mol=mol,
plane_atom_ids=plane_atom_ids,
deviation_atom_ids=deviation_atom_ids,
))
| 27.643902 | 71 | 0.589201 | """
Planarity Calculators
=====================
#. :class:`.PlanarityCalculator`
Methods to calculate planarity measures of a molecule.
"""
import logging
import numpy as np
from ..calculators import Calculator
from ..results import PlanarityResults
logger = logging.getLogger(__name__)
class PlanarityCalculator(Calculator):
"""
Calculates measures of planarity of a molecule.
Measures based on plane deviation from Angew. paper [1]_ and a
ChemRxiv paper [2]_.
Plane deviation: sum of the shortest distance to the plane of best
fit of all deviation atoms (sum abs(d_i)).
Plane deviation span: d_max - d_min (SDP in [2]_)
Planarity parameter: defined as
sqrt((1/num_atoms) * (sum d_i ** 2)) (MPP in [2]_)
Examples
--------
.. code-block:: python
import stk
import stko
# Create a molecule whose torsions we want to know.
mol1 = stk.BuildingBlock('c1ccccc1')
# Create the calculator.
pc = stko.PlanarityCalculator()
# Extract the measures.
pc_results = pc.get_results(mol1)
plane_deviation = pc_results.get_plane_deviation()
plane_deviation_span = pc_results.get_plane_deviation_span()
planarity_parameter = pc_results.get_planarity_parameter()
References
----------
.. [1] https://onlinelibrary.wiley.com/doi/10.1002/anie.202106721
.. [2] https://chemrxiv.org/engage/chemrxiv/article-details/
60c73cbf9abda2e0c5f8b5c6
"""
def _get_plane_of_best_fit(self, mol, plane_atom_ids):
centroid = mol.get_centroid(atom_ids=plane_atom_ids)
normal = mol.get_plane_normal(atom_ids=plane_atom_ids)
# Plane of equation ax + by + cz = d.
atom_plane = np.append(normal, np.sum(normal*centroid))
return atom_plane
def _shortest_distance_to_plane(self, plane, point):
"""
Calculate the perpendicular distance from a point and a plane.
"""
top = (
plane[0]*point[0] + plane[1]*point[1] +
plane[2]*point[2] - plane[3]
)
bottom = np.sqrt(plane[0]**2 + plane[1]**2 + plane[2]**2)
distance = top / bottom
return distance
def _calculate_deviations(
self,
mol,
atom_plane,
deviation_atom_ids,
):
return [
self._shortest_distance_to_plane(
plane=atom_plane,
point=tuple(
mol.get_atomic_positions(atom_ids=i.get_id()),
)[0],
)
for i in mol.get_atoms()
if i.get_id() in deviation_atom_ids
]
def _plane_deviation(self, deviations):
deviations = [abs(i) for i in deviations]
return sum(deviations)
def _plane_deviation_span(self, deviations):
return max(deviations) - min(deviations)
def _planarity_parameter(self, deviations):
deviations = [abs(i) for i in deviations]
num_atoms = len(deviations)
inv_num_atoms = 1/num_atoms
sum_squared = sum(i**2 for i in deviations)
return np.sqrt(inv_num_atoms * sum_squared)
def calculate(
self,
mol,
plane_atom_ids=None,
deviation_atom_ids=None,
):
"""
Perform calculation on `mol`.
Parameters
----------
mol : :class:`.Molecule`
The :class:`.Molecule` whose planarity is to be calculated.
plane_atom_ids : iterable of :class:`int`, optional
The atom ids to use to define the plane of best fit.
deviation_atom_ids : iterable of :class:`int`, optional
The atom ids to use to calculate planarity.
Yields
------
:class:`function`
The function to perform the calculation.
"""
if plane_atom_ids is None:
plane_atom_ids = list(range(len(list(mol.get_atoms()))))
else:
plane_atom_ids = list(plane_atom_ids)
if deviation_atom_ids is None:
deviation_atom_ids = list(
range(len(list(mol.get_atoms())))
)
else:
deviation_atom_ids = list(deviation_atom_ids)
atom_plane = self._get_plane_of_best_fit(mol, plane_atom_ids)
deviations = self._calculate_deviations(
mol=mol,
atom_plane=atom_plane,
deviation_atom_ids=deviation_atom_ids,
)
yield {
'plane_deviation': (
self._plane_deviation(deviations)
),
'plane_deviation_span': (
self._plane_deviation_span(deviations)
),
'planarity_parameter': (
self._planarity_parameter(deviations)
),
}
def get_results(
self,
mol,
plane_atom_ids=None,
deviation_atom_ids=None,
):
"""
Calculate the planarity of `mol`.
Parameters
----------
mol : :class:`.Molecule`
The :class:`.Molecule` whose planarity is to be calculated.
plane_atom_ids : iterable of :class:`int`, optional
The atom ids to use to define the plane of best fit.
deviation_atom_ids : iterable of :class:`int`, optional
The atom ids to use to calculate planarity.
Returns
-------
:class:`.PlanarityResults`
The planarity measures of the molecule.
"""
return PlanarityResults(self.calculate(
mol=mol,
plane_atom_ids=plane_atom_ids,
deviation_atom_ids=deviation_atom_ids,
))
| 1,123 | 0 | 135 |
2787b6e45103c2fcd2cec3395806de8aa78ec903 | 1,557 | py | Python | tests/test_jsm_models.py | aroth85/joint-snv-mix | a83cda57912c6f859fd8c42f6646575c0f8a753e | [
"CNRI-Python"
] | null | null | null | tests/test_jsm_models.py | aroth85/joint-snv-mix | a83cda57912c6f859fd8c42f6646575c0f8a753e | [
"CNRI-Python"
] | null | null | null | tests/test_jsm_models.py | aroth85/joint-snv-mix | a83cda57912c6f859fd8c42f6646575c0f8a753e | [
"CNRI-Python"
] | null | null | null | '''
Created on 2012-01-19
@author: innovation
'''
import unittest
import numpy as np
from tests.simualtors.joint_binomial import JointSnvMixSimulator
from joint_snv_mix.counter import JointBinaryCountData, JointBinaryQualityData
from joint_snv_mix.models.joint_snv_mix import JointSnvMixModel, JointSnvMixPriors, JointSnvMixParameters
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 27.315789 | 105 | 0.540784 | '''
Created on 2012-01-19
@author: innovation
'''
import unittest
import numpy as np
from tests.simualtors.joint_binomial import JointSnvMixSimulator
from joint_snv_mix.counter import JointBinaryCountData, JointBinaryQualityData
from joint_snv_mix.models.joint_snv_mix import JointSnvMixModel, JointSnvMixPriors, JointSnvMixParameters
class Test(unittest.TestCase):
def test_init(self):
mu = (
{'alpha' : 100, 'beta' : 2},
{'alpha' : 50, 'beta' : 50},
{'alpha' : 2, 'beta' : 100}
)
priors = JointSnvMixPriors(mu_N=mu, mu_T=mu)
params = JointSnvMixParameters()
model = JointSnvMixModel(priors, params, model='jsm2')
# sim = JointSnvMixSimulator(mu_T=[0.9, 0.3, 0.01])
# counts, labels = sim.draw_sample(100000)
#
# data = [JointBinaryCountData(*x) for x in counts]
# model.fit(data, verbose=True)
#
# correct = 0
#
# for x, t in zip(data, labels):
# p = model.predict(x)
#
# l = p.index(max(p))
# t = np.argmax(t)
#
# if t == l:
# correct += 1
#
# print correct
#
print model.params
q = [0] * 100
r = [1] * 100
data = [JointBinaryQualityData(q, r, q, r) for _ in range(100000)]
model.fit(data, verbose=True)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 1,063 | 9 | 49 |
8be6c4017638beded12311dae67279b0905c5cbb | 181 | py | Python | config/dev/settings.py | newsdev/nyt-screenshot-service | 9a53ae6b0f4c6830bf94f884a95980ecbe4710f6 | [
"Apache-2.0"
] | 2 | 2017-08-21T21:50:17.000Z | 2017-08-29T02:48:56.000Z | config/dev/settings.py | newsdev/nyt-screenshot-service | 9a53ae6b0f4c6830bf94f884a95980ecbe4710f6 | [
"Apache-2.0"
] | null | null | null | config/dev/settings.py | newsdev/nyt-screenshot-service | 9a53ae6b0f4c6830bf94f884a95980ecbe4710f6 | [
"Apache-2.0"
] | null | null | null | import os
DEBUG=True
TEMPLATE_PATH = '%s/templates/' % os.path.dirname(os.path.realpath(__file__))
STATIC_BUCKET = 'nytint-stg-newsapps'
REMOTE_STORAGE_PATH = 'apps/screenshotter' | 25.857143 | 77 | 0.779006 | import os
DEBUG=True
TEMPLATE_PATH = '%s/templates/' % os.path.dirname(os.path.realpath(__file__))
STATIC_BUCKET = 'nytint-stg-newsapps'
REMOTE_STORAGE_PATH = 'apps/screenshotter' | 0 | 0 | 0 |
75ca243bffd5efef72e3ed85dcc899fe108be91f | 10,157 | py | Python | src/paukenator/nlp/annotations.py | nkrot/paukenator | d7d188dd4bc283208c544f2b29530046768df7ee | [
"MIT"
] | null | null | null | src/paukenator/nlp/annotations.py | nkrot/paukenator | d7d188dd4bc283208c544f2b29530046768df7ee | [
"MIT"
] | null | null | null | src/paukenator/nlp/annotations.py | nkrot/paukenator | d7d188dd4bc283208c544f2b29530046768df7ee | [
"MIT"
] | null | null | null | from typing import List, Tuple, Optional
from .symbols import *
from .errors import NLPError
class AnnotationError(NLPError):
'''Any error related to Annotation and subclasses'''
class TextData(object):
'''Container for text data.
All Annotations reference it and use it as a source of text data.
'''
# TODO: Why do I need this class? why cant it be Text with additional fields
# hide this class inside Text and delegate some methods?
@classmethod
class Annotation(object):
'''
Annotation is a base class for all other types more specific types of
annotations. Ideally, one does not need to create an object of type
Annotation directly.
An annotation does not contain text data directly but by proxy of
a TextData object.
'''
def __len__(self):
'''Length in bytes (not in characters)'''
if self.end:
return self.end - self.start
return 0
def is_blank(self):
'''Test if the annotation is blank or not. A blank annotation has no
text or consists of whitespace characters only.'''
return len(self) == 0 or self.text.isspace()
def __lt__(self, other):
'''By offsets, from left to right'''
# TODO: allow comparing different types of Annotations?
# does it make sense?
assert type(self) is type(other), \
("Can compare annotations of the same type only but got:"
" {} and {}".format(type(self), type(other)))
res = self.end <= other.start
# print("Comparing: {} vs {} = {}".format(self.end, other.start, res))
return res
def __contains__(self, other):
'''Test of other annotation is within boundaries of the current annotation'''
return self.start <= other.start and self.end >= other.end
@property
# TODO: what should happen when source gets set?
@source.setter
@property
@property
@property
def reoffset(self, val: int):
'''Shift annotation position by given number of characters.'''
self.start += val
self.end += val
def annotations(self, target=None):
'''List *all* annotations or annotations of given type <target> that
occur within the boundaries of the current annotation.
Target can be a tuple of type names: (Token, WSWord)'''
if target:
anns = [ann for ann in self.source.annotations
if isinstance(ann, target)]
else:
anns = self.source.annotations
anns = [ann for ann in anns if ann in self]
return anns
def paragraphs(self):
'''Return a list of annotations of type Paragraph'''
ann_type = Paragraph
self._must_be_annotated(ann_type)
return sorted(self.annotations(ann_type))
def sentences(self):
'''Return a list of annotations of type Sentence'''
ann_type = Sentence
self._must_be_annotated(ann_type)
return sorted(self.annotations(ann_type))
def lines(self):
'''Return a list of annotations of type Line'''
ann_type = Line
self._must_be_annotated(ann_type)
return sorted(self.annotations(ann_type))
def wswords(self):
'''Return a list of annotations of type WSWord'''
ann_type = WSWord
self._must_be_annotated(ann_type)
return sorted(self.annotations(ann_type))
def tokens(self):
'''Return a list of annotations of type Token'''
ann_type = Token
self._must_be_annotated(ann_type)
return sorted(self.annotations(ann_type))
def next(self, scope: 'Annotation' = None):
'''Get the next adjacent annotation of the same type.
If <scope> is given (and is another annotation), also ensure that
the annotation found is contained in the annotation <scope>.
This is intended to be used for finding another annotation within
the same super annotation, for example, finding next word that
belongs to the same sentence.
Example:
>> word.next(word.line())
'''
scope = scope or self.root
anns = sorted(scope.annotations(target=self.__class__))
idx = anns.index(self)
ann = None
if idx is not None and 1+idx < len(anns):
ann = anns[1+idx]
return ann
def line(self) -> Optional['Line']:
'''Find annotation of type Line that contains current annotation'''
anns = [line for line in self.root.lines() if self in line]
assert len(anns) < 2, \
("ERROR: an annotation can be contained in one Line annotation"
" only but found {} Lines".format(len(anns)))
return anns[0] if anns else None
def _must_be_annotated(self, anntype):
'''Check if current object has been annotated for specific phenomena
(e.g. Paragraphs, Sentences, Tokens, etc) and thrown an error if not.
'''
anntypes = self.source.annotation_types
if anntype not in anntypes:
raise AnnotationError(
f"Annotations of type {anntype} not available in the current"
" object. Was appropriate annotator applied to the text?"
)
def tokenized(self):
'''Return the current annotation as a single tokenized line.'''
return " ".join(map(str, self.tokens()))
class Text(Annotation):
'''Annotation that represents the whole text'''
@classmethod
def load_from_file(cls, filepath: str, **kwargs):
'''Load plain text from given file <filepath> and annotate it for
- lines (annotation.Line)
- words (annotation.WSWord, separated by white spaces)
Return:
annotation.Text
'''
textdata = TextData.load_from_file(filepath)
# print("--- Text annotation ---")
text = cls()
text.source = textdata
text.language = kwargs.get('lang', 'deu')
# print(text.offsets)
# print(repr(text))
textdata.root = text
# print("--- Annotating Lines ---")
LineAnnotator().annotate(text)
# for ann in text.lines():
# print(repr(ann))
# print("--- Annotating WSWords ---")
WSWordAnnotator().annotate(text)
# for ann in text.wswords():
# print(repr(ann))
return text
@property
@source.setter
def source(self, obj: TextData):
'''Set source and create annotation of class Text'''
self._source = obj
self.start = 0
self.end = len(self.source.content)
def tokenized(self):
'''Serialize the current object to a string. The text is fully
tokenized, that is:
- one sentence per line;
- paragraphs are separated by an empty line;
- words are separated from punctuation marks
'''
lines = []
for par in self.paragraphs():
for sent in par.sentences():
words = sent.tokens()
# words = sent.wswords()
line = " ".join(map(str, words))
lines.append(line)
lines.append('') # paragraph separator
if lines:
lines.pop()
return "\n".join(lines)
class Line(Annotation):
'''Annotation that holds one line of a Text'''
pass
class Sentence(Annotation):
'''Annotation that holds a sentence.'''
pass
class WSWord(Annotation):
'''A substring of text between two space characters.
It can be a word, a punctuation mark or combination of them.
Not to be confused with a Token.
WSWord stands for WhiteSpace Word.
'''
pass
class Token(Annotation):
'''A Token is either a lexeme (stripped off punctuation marks) or
a punctuation mark.'''
pass
# These are necessary in Text.load_from_file() and are imported here at
# the end of the file to fix circular dependencies.
from .line_annotator import LineAnnotator
from .wsword_annotator import WSWordAnnotator
| 31.940252 | 85 | 0.61081 | from typing import List, Tuple, Optional
from .symbols import *
from .errors import NLPError
class AnnotationError(NLPError):
'''Any error related to Annotation and subclasses'''
class TextData(object):
'''Container for text data.
All Annotations reference it and use it as a source of text data.
'''
# TODO: Why do I need this class? why cant it be Text with additional fields
# hide this class inside Text and delegate some methods?
@classmethod
def load_from_file(cls, filepath):
obj = cls()
with open(filepath) as fd:
obj.filepath = filepath
obj.content = fd.read(-1)
# print(f"---{obj.content}---")
return obj
def __init__(self):
self.filepath: str = None
self.content: str = None # text inself
self.annotations: List[Annotation] = [] # or into Text?
self.annotation_types = []
self.root: Text = None # top level annotation
class Annotation(object):
'''
Annotation is a base class for all other types more specific types of
annotations. Ideally, one does not need to create an object of type
Annotation directly.
An annotation does not contain text data directly but by proxy of
a TextData object.
'''
def __init__(self, span=(None, None)):
assert len(span) == 2, \
"A span (offsets) must contain 2 elements but got: {}".format(span)
self.id = None
self.start: int = span[0]
self.end: int = span[1]
#self.level = None
self.language = None
self._source: TextData = None
def __repr__(self):
return "<{}: id={}, offsets={}, text='{}'>".format(
self.__class__.__name__, self.id, self.offsets, self.text)
def __str__(self):
return self.text
def __len__(self):
'''Length in bytes (not in characters)'''
if self.end:
return self.end - self.start
return 0
def is_blank(self):
'''Test if the annotation is blank or not. A blank annotation has no
text or consists of whitespace characters only.'''
return len(self) == 0 or self.text.isspace()
def __lt__(self, other):
'''By offsets, from left to right'''
# TODO: allow comparing different types of Annotations?
# does it make sense?
assert type(self) is type(other), \
("Can compare annotations of the same type only but got:"
" {} and {}".format(type(self), type(other)))
res = self.end <= other.start
# print("Comparing: {} vs {} = {}".format(self.end, other.start, res))
return res
def __contains__(self, other):
'''Test of other annotation is within boundaries of the current annotation'''
return self.start <= other.start and self.end >= other.end
@property
def source(self):
return self._source
# TODO: what should happen when source gets set?
@source.setter
def source(self, obj: TextData):
assert isinstance(obj, TextData), "Unsupported type of object"
self._source = obj
@property
def offsets(self) -> T_SPAN:
return (self.start, self.end)
@property
def text(self):
# TODO: naming conflict: this name should be for the annotation Text?
if self.source:
return self.source.content[self.start:self.end]
return None
@property
def root(self):
if self.source:
return self.source.root
return None
def add_annotation(self, ann: 'Annotation'):
ann.reoffset(self.start)
ann.source = self.source
ann.source.annotations.append(ann)
# register annotation type as added to the current object
t = type(ann)
if t not in ann.source.annotation_types:
ann.source.annotation_types.append(t)
def reoffset(self, val: int):
'''Shift annotation position by given number of characters.'''
self.start += val
self.end += val
def annotations(self, target=None):
'''List *all* annotations or annotations of given type <target> that
occur within the boundaries of the current annotation.
Target can be a tuple of type names: (Token, WSWord)'''
if target:
anns = [ann for ann in self.source.annotations
if isinstance(ann, target)]
else:
anns = self.source.annotations
anns = [ann for ann in anns if ann in self]
return anns
def paragraphs(self):
'''Return a list of annotations of type Paragraph'''
ann_type = Paragraph
self._must_be_annotated(ann_type)
return sorted(self.annotations(ann_type))
def sentences(self):
'''Return a list of annotations of type Sentence'''
ann_type = Sentence
self._must_be_annotated(ann_type)
return sorted(self.annotations(ann_type))
def lines(self):
'''Return a list of annotations of type Line'''
ann_type = Line
self._must_be_annotated(ann_type)
return sorted(self.annotations(ann_type))
def wswords(self):
'''Return a list of annotations of type WSWord'''
ann_type = WSWord
self._must_be_annotated(ann_type)
return sorted(self.annotations(ann_type))
def tokens(self):
'''Return a list of annotations of type Token'''
ann_type = Token
self._must_be_annotated(ann_type)
return sorted(self.annotations(ann_type))
def next(self, scope: 'Annotation' = None):
'''Get the next adjacent annotation of the same type.
If <scope> is given (and is another annotation), also ensure that
the annotation found is contained in the annotation <scope>.
This is intended to be used for finding another annotation within
the same super annotation, for example, finding next word that
belongs to the same sentence.
Example:
>> word.next(word.line())
'''
scope = scope or self.root
anns = sorted(scope.annotations(target=self.__class__))
idx = anns.index(self)
ann = None
if idx is not None and 1+idx < len(anns):
ann = anns[1+idx]
return ann
def line(self) -> Optional['Line']:
'''Find annotation of type Line that contains current annotation'''
anns = [line for line in self.root.lines() if self in line]
assert len(anns) < 2, \
("ERROR: an annotation can be contained in one Line annotation"
" only but found {} Lines".format(len(anns)))
return anns[0] if anns else None
def _must_be_annotated(self, anntype):
'''Check if current object has been annotated for specific phenomena
(e.g. Paragraphs, Sentences, Tokens, etc) and thrown an error if not.
'''
anntypes = self.source.annotation_types
if anntype not in anntypes:
raise AnnotationError(
f"Annotations of type {anntype} not available in the current"
" object. Was appropriate annotator applied to the text?"
)
def tokenized(self):
'''Return the current annotation as a single tokenized line.'''
return " ".join(map(str, self.tokens()))
class Text(Annotation):
'''Annotation that represents the whole text'''
@classmethod
def load_from_file(cls, filepath: str, **kwargs):
'''Load plain text from given file <filepath> and annotate it for
- lines (annotation.Line)
- words (annotation.WSWord, separated by white spaces)
Return:
annotation.Text
'''
textdata = TextData.load_from_file(filepath)
# print("--- Text annotation ---")
text = cls()
text.source = textdata
text.language = kwargs.get('lang', 'deu')
# print(text.offsets)
# print(repr(text))
textdata.root = text
# print("--- Annotating Lines ---")
LineAnnotator().annotate(text)
# for ann in text.lines():
# print(repr(ann))
# print("--- Annotating WSWords ---")
WSWordAnnotator().annotate(text)
# for ann in text.wswords():
# print(repr(ann))
return text
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def source(self):
return self._source
@source.setter
def source(self, obj: TextData):
'''Set source and create annotation of class Text'''
self._source = obj
self.start = 0
self.end = len(self.source.content)
def tokenized(self):
'''Serialize the current object to a string. The text is fully
tokenized, that is:
- one sentence per line;
- paragraphs are separated by an empty line;
- words are separated from punctuation marks
'''
lines = []
for par in self.paragraphs():
for sent in par.sentences():
words = sent.tokens()
# words = sent.wswords()
line = " ".join(map(str, words))
lines.append(line)
lines.append('') # paragraph separator
if lines:
lines.pop()
return "\n".join(lines)
class Line(Annotation):
'''Annotation that holds one line of a Text'''
pass
class Paragraph(Annotation):
pass
class Sentence(Annotation):
'''Annotation that holds a sentence.'''
pass
class WSWord(Annotation):
'''A substring of text between two space characters.
It can be a word, a punctuation mark or combination of them.
Not to be confused with a Token.
WSWord stands for WhiteSpace Word.
'''
pass
class Token(Annotation):
'''A Token is either a lexeme (stripped off punctuation marks) or
a punctuation mark.'''
pass
# These are necessary in Text.load_from_file() and are imported here at
# the end of the file to fix circular dependencies.
from .line_annotator import LineAnnotator
from .wsword_annotator import WSWordAnnotator
| 1,725 | 16 | 367 |
1c814a15b7a4ce4f2ff67e9f1daec7b4d0ffdeac | 10,799 | py | Python | Projects/VirtualCanvas/VirtualCanvas.py | Balaji-Ganesh/Furnishing-OpenCV-Basics | 54cd8fa09cc6f1298861b12ffb190432f412bd1f | [
"MIT"
] | null | null | null | Projects/VirtualCanvas/VirtualCanvas.py | Balaji-Ganesh/Furnishing-OpenCV-Basics | 54cd8fa09cc6f1298861b12ffb190432f412bd1f | [
"MIT"
] | null | null | null | Projects/VirtualCanvas/VirtualCanvas.py | Balaji-Ganesh/Furnishing-OpenCV-Basics | 54cd8fa09cc6f1298861b12ffb190432f412bd1f | [
"MIT"
] | null | null | null | # Import the required libraries
import cv2
import numpy as np
import Projects.VirtualCanvas.utils as utils
cv2.destroyAllWindows()
if __name__ == "__main__":
# mode = int(input("Debug mode -- 1 or Normal Run ---0: "))
drawOnCanvas(debug_mode=True)
"""
LOG:
Final HSV values as: HUE_min, SAT_min, VAL_min, HUE_max, SAT_max, VAL_max 103 45 0 120 255 255
Final HSV values as: HUE_min, SAT_min, VAL_min, HUE_max, SAT_max, VAL_max 0 80 148 255 255 255
Final HSV values as: HUE_min, SAT_min, VAL_min, HUE_max, SAT_max, VAL_max 0 89 178 255 238 255
for Dark blue, Orange, Yellow Sparx pens respectively..
"""
"""
Improvements:
Add the facility of saving the HSV values -- either via numpy or pkl
"""
"""
Backup code of def detect_and_draw_as_marker(self):
"""
"""
This function is made for testing purposes.
The part of this function's code is used in some other functions with some optimizations
:return:
"""
"""
while True:
# Required variables
count = 0
# Get camera feed..
image = self.get_camera_feed()
# convert to HSV.. so that we can filter out the image from our captured HSV values for our markers previously..
HSVimg = cv2.cvtColor(src=image, code=cv2.COLOR_BGR2HSV)
# loop through all marker's HSV values
for marker_HSV in self.markers_HSV:
lower_boundary = np.array(marker_HSV[0])
upper_boundary = np.array(marker_HSV[1])
# Get the mask image that satisfies the lower and upper HSV values..
maskImg = cv2.inRange(src=HSVimg, lowerb=lower_boundary, upperb=upper_boundary)
'''Draw the contours for the mask image detected, marker point for the marker'''
# Get the bounding box corners (In the function call to self.draw_contours(), contours are drawn to original camera feed, if self.debug_mode is set to 1)
x, y, width, height = self.draw_contours(image, maskImg)
if self.debug_mode:
cv2.rectangle(img=image, pt1=(x, y), pt2=(x+width, y+height), color=(0, 0, 0), thickness=3)
# Select the marker point..
marker_point_center = (x+width//2, y)
# Draw the marker point..
# cv2.circle(img=image, center=marker_point_center, radius=5, color=(2, 255, 10), thickness=cv2.FILLED)
cv2.circle(img=image, center=marker_point_center, radius=5, color=self.marker_colors[count], thickness=cv2.FILLED)
count += 1
cv2.imshow("Virtual Canvas", image)
#print("Working....")
if cv2.waitKey(1) == 27:
break
"""
"""
0 26 255 255 255 255 # orange
101 35 0 255 255 255 # Blue
0 76 25 255 255 255 # yellow
"""
| 49.086364 | 184 | 0.624965 | # Import the required libraries
import cv2
import numpy as np
import Projects.VirtualCanvas.utils as utils
class VirtualCanvas:
def __init__(self, num_markers=2, debug_mode=False):
# Mode in which the user would like to run the program..
self.debug_mode = debug_mode # False for normal_run, True for debug_mode
# Get the camera access..
self.capture = cv2.VideoCapture(0)
# Adjust the camera capture properties..
self.capture.set(cv2.CAP_PROP_BRIGHTNESS, 200)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, 800)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 600)
# No. of markers the user would like to draw..
self.num_markers = num_markers
self.marker_path_points = [] # of format -> [[x, y], colorId]
self.markers_HSV, self.marker_colors = utils.load_data(num_markers=num_markers, debug_mode=debug_mode)
if debug_mode: print("Data loaded Successfully.. as: markers_HSV = \n", self.markers_HSV, "\nmarker_colors = \n", self.marker_colors)
def get_camera_feed(self):
"""
This function will return the frames from the web cam feed
:return:
"""
# get the frame..from cam feed
read_status, self.frame = self.capture.read()
return self.frame
def detect_and_draw_as_marker(self, image):
"""
This function is made for testing purposes.
The part of this function's code is used in some other functions with some optimizations
:return:
"""
# Required variables
count = 0
# convert to HSV.. so that we can filter out the image from our captured HSV values for our markers previously..
HSVimg = cv2.cvtColor(src=image, code=cv2.COLOR_BGR2HSV)
# loop through all marker's HSV values
for marker_HSV in self.markers_HSV:
lower_boundary = np.array(marker_HSV[0])
upper_boundary = np.array(marker_HSV[1])
# Get the mask image that satisfies the lower and upper HSV values..
maskImg = cv2.inRange(src=HSVimg, lowerb=lower_boundary, upperb=upper_boundary)
'''Draw the contours for the mask image detected, marker point for the marker'''
# Get the bounding box corners (In the function call to self.draw_contours(), contours are drawn to original camera feed, if self.debug_mode is set to 1)
x, y, width, height = self.draw_contours(image, maskImg)
if self.debug_mode:
cv2.rectangle(img=image, pt1=(x, y), pt2=(x + width, y + height), color=(255, 0, 255), thickness=3)
# Select the marker point..
marker_point_center = (x + width // 2, y)
# Draw the marker point..
# cv2.circle(img=image, center=marker_point_center, radius=5, color=(2, 255, 10), thickness=cv2.FILLED)
cv2.circle(img=image, center=marker_point_center, radius=5, color=list(self.marker_colors[count]), thickness=cv2.FILLED)
# Append the trace point of marker..
self.marker_path_points.append([marker_point_center, count])
#print(count, end="\n")
count += 1
def draw_contours(self, image, maskImg):
"""
This function will find the contours for the mask image which is obtained via HSV values filtering.
and it also draws the contours for the markers on color image of camera feed(But can be turned of by commenting the line cv2.drawContours() or if self.debug_mode is set to 0).
:param image: Original color image of camera feed.
:param maskImg: Mask Image (Contains only the markers)
:return: The bounding box corners of the detected markers..
"""
# Required variables..
x, y, width, height = 0, 0, 0, 0
# Find contours..
contours, hierarchy = cv2.findContours(image=maskImg, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE) # Playable Parameters..
# Draw the contours..
for contour in contours:
# Calculate the area of the contour, so can remove unnecessary contours..
area = cv2.contourArea(contour=contour)
if area > 3000: # Playable adjustment..!! Found Good as 3000 for current light condition.. change this if light condition changes..
# Draw the contours to the image -- actual frame..
if self.debug_mode:
cv2.drawContours(image=image, contours=contour, contourIdx=-1, color=(255, 255, 0), thickness=4)
# Find the perimeter of the markers detected...
perimeter = cv2.arcLength(curve=contour, closed=True)
# Approximating/Finding the corners of the image from the obtained corners..
approx_corners = cv2.approxPolyDP(curve=contour, epsilon=0.02 * perimeter, closed=True)
# Find the bounding box rectangle for the approximated corners..
x, y, width, height = cv2.boundingRect(approx_corners)
# Return the values with which a rectangle can be drawn..
return x, y, width, height
def get_markers_center(self, image):
"""
The code in this function is some part of the "detect_and_draw_as_marker()"
This function will be called by trace_marker_path()
:return:
"""
# Required variables..
x, y, width, height = 0, 0, 0, 0
# convert to HSV.. so that we can filter out the image from our captured HSV values for our markers previously..
HSVimg = cv2.cvtColor(src=image, code=cv2.COLOR_BGR2HSV)
# loop through all marker's HSV values
for marker_HSV in self.markers_HSV:
lower_boundary = np.array(marker_HSV[0])
upper_boundary = np.array(marker_HSV[1])
# Get the mask image that satisfies the lower and upper HSV values..
maskImg = cv2.inRange(src=HSVimg, lowerb=lower_boundary, upperb=upper_boundary)
'''Draw the contours for the mask image detected, marker point for the marker'''
# Get the bounding box corners (In the function call to self.draw_contours(), contours are drawn to original camera feed, if self.debug_mode is set to 1)
x, y, width, height = self.draw_contours(image, maskImg)
if self.debug_mode:
cv2.rectangle(img=image, pt1=(x, y), pt2=(x + width, y + height), color=(0, 0, 0), thickness=3)
# Select the marker point..
# marker_point_center = [x+width//2, y]
return x + width // 2, y
def trace_marker_path(self, image):
"""
This function will trace the path of the marker where marker has moved..
:return: Nothing
"""
for trace_point in self.marker_path_points:
cv2.circle(img=image, center=(trace_point[0]), radius=10, color=self.marker_colors[trace_point[1]], thickness=cv2.FILLED)
def drawOnCanvas(debug_mode=False):
# Number of markers user would like to choose..
markers_count = 3
while markers_count <= 1:
markers_count = int(input("How many markers would you like to use? (>1): "))
# Create object to class "Virtual Canvas"
virtualCanvas = VirtualCanvas(num_markers=markers_count, debug_mode=debug_mode)
while True:
# Get the cam feed..
image = virtualCanvas.get_camera_feed()
# Get the marker drawing points and save it in the marker_path_points..as [center(x, y), marker_color(count)]
virtualCanvas.detect_and_draw_as_marker(image)
# Draw all the path points to resemble like drawing on canvas..
virtualCanvas.trace_marker_path(image)
# Display the final results..
cv2.imshow("Virtual Canvas", image)
if cv2.waitKey(1) == 27:
break
cv2.destroyAllWindows()
if __name__ == "__main__":
# mode = int(input("Debug mode -- 1 or Normal Run ---0: "))
drawOnCanvas(debug_mode=True)
"""
LOG:
Final HSV values as: HUE_min, SAT_min, VAL_min, HUE_max, SAT_max, VAL_max 103 45 0 120 255 255
Final HSV values as: HUE_min, SAT_min, VAL_min, HUE_max, SAT_max, VAL_max 0 80 148 255 255 255
Final HSV values as: HUE_min, SAT_min, VAL_min, HUE_max, SAT_max, VAL_max 0 89 178 255 238 255
for Dark blue, Orange, Yellow Sparx pens respectively..
"""
"""
Improvements:
Add the facility of saving the HSV values -- either via numpy or pkl
"""
"""
Backup code of def detect_and_draw_as_marker(self):
"""
"""
This function is made for testing purposes.
The part of this function's code is used in some other functions with some optimizations
:return:
"""
"""
while True:
# Required variables
count = 0
# Get camera feed..
image = self.get_camera_feed()
# convert to HSV.. so that we can filter out the image from our captured HSV values for our markers previously..
HSVimg = cv2.cvtColor(src=image, code=cv2.COLOR_BGR2HSV)
# loop through all marker's HSV values
for marker_HSV in self.markers_HSV:
lower_boundary = np.array(marker_HSV[0])
upper_boundary = np.array(marker_HSV[1])
# Get the mask image that satisfies the lower and upper HSV values..
maskImg = cv2.inRange(src=HSVimg, lowerb=lower_boundary, upperb=upper_boundary)
'''Draw the contours for the mask image detected, marker point for the marker'''
# Get the bounding box corners (In the function call to self.draw_contours(), contours are drawn to original camera feed, if self.debug_mode is set to 1)
x, y, width, height = self.draw_contours(image, maskImg)
if self.debug_mode:
cv2.rectangle(img=image, pt1=(x, y), pt2=(x+width, y+height), color=(0, 0, 0), thickness=3)
# Select the marker point..
marker_point_center = (x+width//2, y)
# Draw the marker point..
# cv2.circle(img=image, center=marker_point_center, radius=5, color=(2, 255, 10), thickness=cv2.FILLED)
cv2.circle(img=image, center=marker_point_center, radius=5, color=self.marker_colors[count], thickness=cv2.FILLED)
count += 1
cv2.imshow("Virtual Canvas", image)
#print("Working....")
if cv2.waitKey(1) == 27:
break
"""
"""
0 26 255 255 255 255 # orange
101 35 0 255 255 255 # Blue
0 76 25 255 255 255 # yellow
"""
| 1,777 | 6,034 | 50 |
e24171f9c3ab6010088a1c42f912952e42a5477f | 5,506 | py | Python | datasets/Part 6 - Reinforcement Learning/Section 32 - Upper Confidence Bound (UCB)/upper_confidence_bound.py | kevinLCG/machinelearning-az | 54e3090275a3fc419aad17caadc6a47a71dcd3d4 | [
"MIT"
] | null | null | null | datasets/Part 6 - Reinforcement Learning/Section 32 - Upper Confidence Bound (UCB)/upper_confidence_bound.py | kevinLCG/machinelearning-az | 54e3090275a3fc419aad17caadc6a47a71dcd3d4 | [
"MIT"
] | null | null | null | datasets/Part 6 - Reinforcement Learning/Section 32 - Upper Confidence Bound (UCB)/upper_confidence_bound.py | kevinLCG/machinelearning-az | 54e3090275a3fc419aad17caadc6a47a71dcd3d4 | [
"MIT"
] | null | null | null | #!/home/kevinml/anaconda3/bin/python3.7
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 7 12:41:01 2019
@author: juangabriel and kevin Meza
"""
# Upper Confidence Bound (UCB)
# =======================================================================================================
# PASOS
#
# NOTAS: Se considera que cada recompensa puede ser diferente en cada ronda.
# Entre MAYOR es la "n", MENOR es la amplitud del intervalo de confianza.
#
# 1.- En cada ronda "n" se consideran 2 numeros para cada anuncio "i":
# N(n) = Numero de veces que el anuncio "i" se selecciona en la ronda "n".
# R(n) = La suma de recompensas del anuncio "i" hasta la ronda "n".
# 2.- A partir de estos 2 numeros, se calcula:
# - La recompensa media del anuncio "i" hasta la ronda "n".
# r(n)= R(n)/ N(n)
#
# - El intervalo de confianza de la ronda "n".
# ( r(n)-Δ(n) , r(n)+Δ(n) ); Donde:
# Δ(n) = sqrt( 3*log(n) / 2*N(n) )
#
# 3.- Se selecciona el anuncio "i" con mayor limite superior del intervalo de confianza (UCB)
#
# En un inicio, se parte del supuesto de que las medias y los intervalos de confianza de cada una de las
# distribuciones son iguales y con al paso del tiempo al juntar observaciones, se va definiendo el valor
# medio de recompensa de cada una, al igua que los intervalos de confianza. Recordando que ntre mayor sea
# la "n", mrnor sera la amplitud del intervalo de confianza.
#
# Primero se comienza a tirar en todas las maquinas (muestreando asi todas las distribuciones) y despues
# de ciertas iteraciones, se comienza a tirar (muestrear) la maquina (la distribucion) con el mayor limite
# superior del intervalo de confianza (UCB), hasta que el algoritmo converja.
#
# =======================================================================================================
# Importar las librerías
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
################################################
### IMPORTAR EL DATA SET ###
################################################
# El dataset tiene en las filas las ronas en las que se presentan los anuncios y en las columnas estan los 20 anuncios.
# los 1's y 0's representan si el usuario dio "click" en el anuncio.
dataset = pd.read_csv("Ads_CTR_Optimisation.csv")
################################################
# Implementacion del Algoritmo de UCB #
################################################
import math
N = 10000 # no. de observaciones
d = 10 # no. de anuncios
# aqui se guardara el numero de veces que se muestreo cada anuncio. Vectro inicializado con 0's de tamaño igual al no. de observaciones.
number_of_selections = [0] * d
sums_of_rewards = [0] * d # aqui se guardara la recompenza de cada anuncio
ads_selected = [] # vector con el numero de anuncio elegido en cda ronda
total_reward = 0 # recompenza total
for n in range(0, N):
max_upper_bound = 0 # Contiene el UCB de la ronda
ad = 0 # Contiene el numero del anuncio con el mayor intervalo de confianza
# En la ronda actual, para cada anuncio, se obtiene la "recompensa media" y el limite superior del intervalo de confianza
# y se actualiza el UCB si es necesario
for i in range(0, d):
if(number_of_selections[i] > 0):
# Se obtiene la recompensa media
average_reward = sums_of_rewards[i] / number_of_selections[i]
# Se obtiene Δn, para sacar el intervalo de confianza; sumamos 1 para no dividir entre cero
delta_i = math.sqrt(3 / 2 * math.log(n + 1) /
number_of_selections[i])
# Se obtiene el limite superior del intervalo de confianza
upper_bound = average_reward + delta_i
else:
# Para las primeras rondas cuando no se ha seleccionado el anuncio, se le asigna como como "upper confidence bound" el numero 10^400
# Asi ningun anuncio sera mejor que otro en la primera ronda.
# En la primera ronda se eligira el primer anuncio, en la siguiente ronda el segundo, despues el tercero y asi sucesivamente,
# esto con la intencion de que al menos todos sean muestreados 1 vez, por eso el numero "10^400".
upper_bound = 1e400
# Si el limite superior del intervalo de confianza del actual anuncio supera al UCB, este pasa a ser el nuevo UCB
if upper_bound > max_upper_bound:
max_upper_bound = upper_bound
ad = i
# se añade a la lista correspondiente el anuncio "elegido", es decir, con el UCB hasta esa ronda
ads_selected.append(ad)
# se le suma 1 al vector que contiene cuantas veces se ha elegido el anuncio
number_of_selections[ad] = number_of_selections[ad] + 1
# Se guarda la recompensa de seleccionar ese anuncio
reward = dataset.values[n, ad]
# A la recompenza previa del anuncio "elegido", se le suma la recompenza conseguida en esta ronda
sums_of_rewards[ad] = sums_of_rewards[ad] + reward
# Se suma la recompenza de esta ronda a la recompenza total
total_reward = total_reward + reward
# En cada ronda, siempre se va seleccionar el anuncio con el UCB
################################################
# VISUALIZACION DE RESULTADOS #
################################################
# Histograma de resultados
plt.hist(ads_selected)
plt.title("Histograma de anuncios")
plt.xlabel("ID del Anuncio")
plt.ylabel("Frecuencia de visualización del anuncio")
plt.show()
| 47.878261 | 144 | 0.628405 | #!/home/kevinml/anaconda3/bin/python3.7
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 7 12:41:01 2019
@author: juangabriel and kevin Meza
"""
# Upper Confidence Bound (UCB)
# =======================================================================================================
# PASOS
#
# NOTAS: Se considera que cada recompensa puede ser diferente en cada ronda.
# Entre MAYOR es la "n", MENOR es la amplitud del intervalo de confianza.
#
# 1.- En cada ronda "n" se consideran 2 numeros para cada anuncio "i":
# N(n) = Numero de veces que el anuncio "i" se selecciona en la ronda "n".
# R(n) = La suma de recompensas del anuncio "i" hasta la ronda "n".
# 2.- A partir de estos 2 numeros, se calcula:
# - La recompensa media del anuncio "i" hasta la ronda "n".
# r(n)= R(n)/ N(n)
#
# - El intervalo de confianza de la ronda "n".
# ( r(n)-Δ(n) , r(n)+Δ(n) ); Donde:
# Δ(n) = sqrt( 3*log(n) / 2*N(n) )
#
# 3.- Se selecciona el anuncio "i" con mayor limite superior del intervalo de confianza (UCB)
#
# En un inicio, se parte del supuesto de que las medias y los intervalos de confianza de cada una de las
# distribuciones son iguales y con al paso del tiempo al juntar observaciones, se va definiendo el valor
# medio de recompensa de cada una, al igua que los intervalos de confianza. Recordando que ntre mayor sea
# la "n", mrnor sera la amplitud del intervalo de confianza.
#
# Primero se comienza a tirar en todas las maquinas (muestreando asi todas las distribuciones) y despues
# de ciertas iteraciones, se comienza a tirar (muestrear) la maquina (la distribucion) con el mayor limite
# superior del intervalo de confianza (UCB), hasta que el algoritmo converja.
#
# =======================================================================================================
# Importar las librerías
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
################################################
### IMPORTAR EL DATA SET ###
################################################
# El dataset tiene en las filas las ronas en las que se presentan los anuncios y en las columnas estan los 20 anuncios.
# los 1's y 0's representan si el usuario dio "click" en el anuncio.
dataset = pd.read_csv("Ads_CTR_Optimisation.csv")
################################################
# Implementacion del Algoritmo de UCB #
################################################
import math
N = 10000 # no. de observaciones
d = 10 # no. de anuncios
# aqui se guardara el numero de veces que se muestreo cada anuncio. Vectro inicializado con 0's de tamaño igual al no. de observaciones.
number_of_selections = [0] * d
sums_of_rewards = [0] * d # aqui se guardara la recompenza de cada anuncio
ads_selected = [] # vector con el numero de anuncio elegido en cda ronda
total_reward = 0 # recompenza total
for n in range(0, N):
max_upper_bound = 0 # Contiene el UCB de la ronda
ad = 0 # Contiene el numero del anuncio con el mayor intervalo de confianza
# En la ronda actual, para cada anuncio, se obtiene la "recompensa media" y el limite superior del intervalo de confianza
# y se actualiza el UCB si es necesario
for i in range(0, d):
if(number_of_selections[i] > 0):
# Se obtiene la recompensa media
average_reward = sums_of_rewards[i] / number_of_selections[i]
# Se obtiene Δn, para sacar el intervalo de confianza; sumamos 1 para no dividir entre cero
delta_i = math.sqrt(3 / 2 * math.log(n + 1) /
number_of_selections[i])
# Se obtiene el limite superior del intervalo de confianza
upper_bound = average_reward + delta_i
else:
# Para las primeras rondas cuando no se ha seleccionado el anuncio, se le asigna como como "upper confidence bound" el numero 10^400
# Asi ningun anuncio sera mejor que otro en la primera ronda.
# En la primera ronda se eligira el primer anuncio, en la siguiente ronda el segundo, despues el tercero y asi sucesivamente,
# esto con la intencion de que al menos todos sean muestreados 1 vez, por eso el numero "10^400".
upper_bound = 1e400
# Si el limite superior del intervalo de confianza del actual anuncio supera al UCB, este pasa a ser el nuevo UCB
if upper_bound > max_upper_bound:
max_upper_bound = upper_bound
ad = i
# se añade a la lista correspondiente el anuncio "elegido", es decir, con el UCB hasta esa ronda
ads_selected.append(ad)
# se le suma 1 al vector que contiene cuantas veces se ha elegido el anuncio
number_of_selections[ad] = number_of_selections[ad] + 1
# Se guarda la recompensa de seleccionar ese anuncio
reward = dataset.values[n, ad]
# A la recompenza previa del anuncio "elegido", se le suma la recompenza conseguida en esta ronda
sums_of_rewards[ad] = sums_of_rewards[ad] + reward
# Se suma la recompenza de esta ronda a la recompenza total
total_reward = total_reward + reward
# En cada ronda, siempre se va seleccionar el anuncio con el UCB
################################################
# VISUALIZACION DE RESULTADOS #
################################################
# Histograma de resultados
plt.hist(ads_selected)
plt.title("Histograma de anuncios")
plt.xlabel("ID del Anuncio")
plt.ylabel("Frecuencia de visualización del anuncio")
plt.show()
| 0 | 0 | 0 |
e4715ff122d45df33ee2821da9d2fb00569dea8c | 1,771 | py | Python | src/tensorflow_time_series_dataset/loaders/csv_data_loader.py | MArpogaus/tensorflow_timeseries_dataset | f5202d2845e2583b21b30d467e301517f5c5f5f1 | [
"Apache-2.0"
] | 1 | 2022-01-27T10:33:11.000Z | 2022-01-27T10:33:11.000Z | src/tensorflow_time_series_dataset/loaders/csv_data_loader.py | MArpogaus/tensorflow_timeseries_dataset | f5202d2845e2583b21b30d467e301517f5c5f5f1 | [
"Apache-2.0"
] | null | null | null | src/tensorflow_time_series_dataset/loaders/csv_data_loader.py | MArpogaus/tensorflow_timeseries_dataset | f5202d2845e2583b21b30d467e301517f5c5f5f1 | [
"Apache-2.0"
] | null | null | null | # -*- time-stamp-pattern: "changed[\s]+:[\s]+%%$"; -*-
# AUTHOR INFORMATION ##########################################################
# file : csv_data_loader.py
# author : Marcel Arpogaus <marcel dot arpogaus at gmail dot com>
#
# created : 2022-01-07 09:02:38 (Marcel Arpogaus)
# changed : 2022-01-07 09:02:38 (Marcel Arpogaus)
# DESCRIPTION #################################################################
# ...
# LICENSE #####################################################################
# Copyright 2022 Marcel Arpogaus
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import pandas as pd
| 34.72549 | 79 | 0.586674 | # -*- time-stamp-pattern: "changed[\s]+:[\s]+%%$"; -*-
# AUTHOR INFORMATION ##########################################################
# file : csv_data_loader.py
# author : Marcel Arpogaus <marcel dot arpogaus at gmail dot com>
#
# created : 2022-01-07 09:02:38 (Marcel Arpogaus)
# changed : 2022-01-07 09:02:38 (Marcel Arpogaus)
# DESCRIPTION #################################################################
# ...
# LICENSE #####################################################################
# Copyright 2022 Marcel Arpogaus
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import pandas as pd
def _read_csv_file(file_path, date_time_col="date_time", **kwds):
file_path = file_path
load_data = pd.read_csv(
file_path,
parse_dates=[date_time_col],
infer_datetime_format=True,
index_col=[date_time_col],
**kwds
)
if load_data.isnull().any().sum() != 0:
raise ValueError("Data contains NaNs")
return load_data
class CSVDataLoader:
def __init__(self, file_path, **kwds):
self.file_path = file_path
self.kwds = kwds
def __call__(self):
return _read_csv_file(self.file_path, **self.kwds)
| 495 | -1 | 99 |
d0e9ce6b89c57a2e405fd744fba0b2586b5192d8 | 711 | py | Python | networks/FFNetworks.py | ebucheli/FreeSound_GPAT | 6737732276cb664e682fdc631d429ffcd5875990 | [
"MIT"
] | null | null | null | networks/FFNetworks.py | ebucheli/FreeSound_GPAT | 6737732276cb664e682fdc631d429ffcd5875990 | [
"MIT"
] | null | null | null | networks/FFNetworks.py | ebucheli/FreeSound_GPAT | 6737732276cb664e682fdc631d429ffcd5875990 | [
"MIT"
] | null | null | null | from tensorflow.keras.layers import Dropout, Activation, Flatten
from tensorflow.keras.layers import Dense, Input, BatchNormalization
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import Model, load_model, Sequential
from tensorflow.keras.activations import relu, softmax
| 28.44 | 68 | 0.722925 | from tensorflow.keras.layers import Dropout, Activation, Flatten
from tensorflow.keras.layers import Dense, Input, BatchNormalization
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import Model, load_model, Sequential
from tensorflow.keras.activations import relu, softmax
def DNN_3HL(input_shape,n_classes):
X_input = Input(input_shape)
X = Flatten()(X_input)
X = Dense(128,activation=relu)(X)
X = Dropout(0.5)(X)
X = Dense(128,activation=relu)(X)
X = Dropout(0.5)(X)
X = Dense(128,activation=relu)(X)
X = Dropout(0.5)(X)
X = Dense(n_classes,activation=softmax)(X)
model = Model(inputs = X_input, outputs= X)
return model
| 377 | 0 | 23 |
225e4495fa60f2a9bb662b836515d9302e1d4a78 | 6,300 | py | Python | adj_mat_func.py | LcRss/DeepLabV2-Pascal-Part-Argmax | 3911ba41f387684c42414682fd8abcc8d028a28d | [
"Apache-2.0"
] | null | null | null | adj_mat_func.py | LcRss/DeepLabV2-Pascal-Part-Argmax | 3911ba41f387684c42414682fd8abcc8d028a28d | [
"Apache-2.0"
] | null | null | null | adj_mat_func.py | LcRss/DeepLabV2-Pascal-Part-Argmax | 3911ba41f387684c42414682fd8abcc8d028a28d | [
"Apache-2.0"
] | null | null | null | import sys
import cv2
import numpy as np
import tensorflow as tf
| 33.157895 | 108 | 0.437143 | import sys
import cv2
import numpy as np
import tensorflow as tf
class adj_mat_func(object):
def __init__(self, batch_size):
super().__init__()
self.batch_size = batch_size
def adj_mat(self, y_true, y_pred):
# Wraps np_mean_iou method and uses it as a TensorFlow op.
# Takes numpy arrays as its arguments and returns numpy arrays as
# its outputs.
return tf.py_func(self.np_adj_func, [y_true, y_pred], tf.float32)
def np_adj_func(self, y_true, y_pred):
adj_mat = np.zeros(shape=(108, 108))
for o in range(self.batch_size):
img = y_true[o]
classes = np.unique(img)
classes = classes[1:]
if 255 in classes:
classes = classes[:-1]
mat_contour = []
for i in range(len(classes)):
value = classes[i]
mask = cv2.inRange(img, int(value), int(value))
_, per, _ = cv2.findContours(image=mask, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE)
mat_total = np.zeros(shape=(1, 2))
for q in range(len(per)):
tmp = per[q]
mat = np.zeros(shape=(len(tmp), 2))
for j in range(len(tmp)):
point = tmp[j]
x = point[0][0]
y = point[0][1]
mat[j][0] = x
mat[j][1] = y
mat_total = np.concatenate((mat_total, mat), axis=0)
mat_contour.append(mat_total[1:])
for i in range(len(classes)):
tmp = mat_contour[i]
for j in range(i + 1, len(classes)):
min_v = sys.maxsize
second_mat = mat_contour[j]
for p in range(len(tmp)):
first_mat = tmp[p]
dif = first_mat - second_mat
dif = dif * dif
sum_mat = np.sum(dif, 1)
sqrt = np.sqrt(sum_mat)
min_tmp = np.min(sqrt)
if min_tmp < min_v:
min_v = min_tmp
if min_v <= 1:
adj_mat[classes[i]][classes[j]] = 1 + adj_mat[classes[i]][classes[j]]
return adj_mat.astype(np.float32)
def np_adj_func_2in1(self, y_true, y_pred):
adj_mat_true = np.zeros(shape=(108, 108))
for o in range(self.batch_size):
img = y_true[o]
classes = np.unique(img)
classes = classes[1:]
if 255 in classes:
classes = classes[:-1]
mat_contour = []
for i in range(len(classes)):
value = classes[i]
mask = cv2.inRange(img, int(value), int(value))
_, per, _ = cv2.findContours(image=mask, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE)
mat_total = np.zeros(shape=(1, 2))
for q in range(len(per)):
tmp = per[q]
mat = np.zeros(shape=(len(tmp), 2))
for j in range(len(tmp)):
point = tmp[j]
x = point[0][0]
y = point[0][1]
mat[j][0] = x
mat[j][1] = y
mat_total = np.concatenate((mat_total, mat), axis=0)
mat_contour.append(mat_total[1:])
for i in range(len(classes)):
tmp = mat_contour[i]
for j in range(i + 1, len(classes)):
min_v = sys.maxsize
second_mat = mat_contour[j]
for p in range(len(tmp)):
first_mat = tmp[p]
dif = first_mat - second_mat
dif = dif * dif
sum_mat = np.sum(dif, 1)
sqrt = np.sqrt(sum_mat)
min_tmp = np.min(sqrt)
if min_tmp < min_v:
min_v = min_tmp
if min_v <= 1:
adj_mat_true = [classes[i]][classes[j]] = 1 + adj_mat_true[classes[i]][classes[j]]
adj_mat_pred = np.zeros(shape=(108, 108))
for o in range(self.batch_size):
img = y_pred[o]
classes = np.unique(img)
classes = classes[1:]
if 255 in classes:
classes = classes[:-1]
mat_contour = []
for i in range(len(classes)):
value = classes[i]
mask = cv2.inRange(img, int(value), int(value))
_, per, _ = cv2.findContours(image=mask, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE)
mat_total = np.zeros(shape=(1, 2))
for q in range(len(per)):
tmp = per[q]
mat = np.zeros(shape=(len(tmp), 2))
for j in range(len(tmp)):
point = tmp[j]
x = point[0][0]
y = point[0][1]
mat[j][0] = x
mat[j][1] = y
mat_total = np.concatenate((mat_total, mat), axis=0)
mat_contour.append(mat_total[1:])
for i in range(len(classes)):
tmp = mat_contour[i]
for j in range(i + 1, len(classes)):
min_v = sys.maxsize
second_mat = mat_contour[j]
for p in range(len(tmp)):
first_mat = tmp[p]
dif = first_mat - second_mat
dif = dif * dif
sum_mat = np.sum(dif, 1)
sqrt = np.sqrt(sum_mat)
min_tmp = np.min(sqrt)
if min_tmp < min_v:
min_v = min_tmp
if min_v <= 1:
adj_mat_pred = [classes[i]][classes[j]] = 1 + adj_mat_pred[classes[i]][classes[j]]
return adj_mat_true.astype(np.float32), adj_mat_pred.astype(np.float32)
| 6,098 | 6 | 130 |
96d95d628d56102ae37b9dbbcffde08391ae9f3b | 40 | py | Python | better_id3/commands/__init__.py | AymericBebert/Better-ID3 | d30480738bbef3d64a2c88386fbce9fef1fa35bc | [
"MIT"
] | null | null | null | better_id3/commands/__init__.py | AymericBebert/Better-ID3 | d30480738bbef3d64a2c88386fbce9fef1fa35bc | [
"MIT"
] | null | null | null | better_id3/commands/__init__.py | AymericBebert/Better-ID3 | d30480738bbef3d64a2c88386fbce9fef1fa35bc | [
"MIT"
] | null | null | null | from .clean_command import CleanCommand
| 20 | 39 | 0.875 | from .clean_command import CleanCommand
| 0 | 0 | 0 |
e85000f3143b4e7e3b062d4789bdb1aba85cfb9b | 1,413 | py | Python | tests/test_parser.py | hindman/optopus | daaba31c6b1bd4f56e442326e36f7b3ea0b74b15 | [
"MIT"
] | 2 | 2021-05-04T23:44:42.000Z | 2021-07-25T20:45:33.000Z | tests/test_parser.py | hindman/optopus | daaba31c6b1bd4f56e442326e36f7b3ea0b74b15 | [
"MIT"
] | null | null | null | tests/test_parser.py | hindman/optopus | daaba31c6b1bd4f56e442326e36f7b3ea0b74b15 | [
"MIT"
] | null | null | null | import pytest
from collections import OrderedDict
from optopus import (
Parser,
Result,
)
| 22.428571 | 74 | 0.518754 | import pytest
from collections import OrderedDict
from optopus import (
Parser,
Result,
)
def test_parse_noconfig_flag(tr):
p = Parser()
# Simple use case.
args = 'rgx path -i -v'.split()
exp = {
'i': True,
'v': True,
'positionals': ['rgx', 'path'],
}
got = p.parse(args)
assert dict(got) == exp
# More elaborate inputs.
args = 'AA BB CC -27 -f F --go G1 G2 --blort-maker_2 -- DD EE'.split()
exp = {
'f': 'F',
'go': ['G1', 'G2'],
'blort_maker_2': True,
'positionals': ['AA', 'BB', 'CC', '-27', 'DD', 'EE'],
}
got = p.parse(args)
assert dict(got) == exp
# No arguments.
args = []
exp = {}
got = p.parse(args)
assert dict(got) == exp
# Just positionals.
args = 'A B C'.split()
exp = {'positionals': args}
got = p.parse(args)
assert dict(got) == exp
def test_result(tr):
d = OrderedDict((
('f', True),
('go', True),
('positionals', ['A', 'B']),
))
res = Result(d)
# Iterable.
assert tuple(res) == tuple(d.items())
# Support for key access and membership tests.
assert 'go' in res
assert res['f'] is True
# Support for str() and len().
exp_str = "Result(f=True, go=True, positionals=['A', 'B'])"
assert str(res) == exp_str
assert repr(res) == exp_str
assert len(res) == len(d)
| 1,267 | 0 | 46 |