hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ec32c5d19d9ecee103848ad41723ca7b1e920e88
| 3,466
|
py
|
Python
|
tests/chainerx_tests/dtype_utils.py
|
hikjik/chainer
|
324a1bc1ea3edd63d225e4a87ed0a36af7fd712f
|
[
"MIT"
] | 1
|
2019-03-09T07:39:07.000Z
|
2019-03-09T07:39:07.000Z
|
tests/chainerx_tests/dtype_utils.py
|
hitsgub/chainer
|
20d4d70f5cdacc1f24f243443f5bebc2055c8f8e
|
[
"MIT"
] | null | null | null |
tests/chainerx_tests/dtype_utils.py
|
hitsgub/chainer
|
20d4d70f5cdacc1f24f243443f5bebc2055c8f8e
|
[
"MIT"
] | null | null | null |
import itertools
import numpy
import chainerx
def _permutate_dtype_mapping(dtype_mapping_list):
# Permutates in dtypes of dtype mapping.
d = {}
for in_dtypes, out_dtype in dtype_mapping_list:
for in_dtypes_ in itertools.permutations(in_dtypes):
d[in_dtypes_] = out_dtype
return sorted(d.items())
result_dtypes_two_arrays = _permutate_dtype_mapping([
# Bools.
(('bool_', 'bool_'), 'bool_'),
# Floats.
(('float16', 'float16'), 'float16'),
(('float32', 'float32'), 'float32'),
(('float64', 'float64'), 'float64'),
(('float32', 'float16'), 'float32'),
(('float64', 'float16'), 'float64'),
(('float64', 'float32'), 'float64'),
# Signed ints.
(('int8', 'int8'), 'int8'),
(('int8', 'int16'), 'int16'),
(('int8', 'int32'), 'int32'),
(('int8', 'int64'), 'int64'),
(('int16', 'int16'), 'int16'),
(('int32', 'int32'), 'int32'),
(('int64', 'int64'), 'int64'),
(('int16', 'int32'), 'int32'),
(('int16', 'int64'), 'int64'),
(('int32', 'int64'), 'int64'),
# Unsigned ints.
(('uint8', 'uint8'), 'uint8'),
# Signed int and unsigned int.
(('uint8', 'int8'), 'int16'),
(('uint8', 'int16'), 'int16'),
(('uint8', 'int32'), 'int32'),
# Int and float.
(('int8', 'float16'), 'float16'),
(('uint8', 'float16'), 'float16'),
(('int16', 'float32'), 'float32'),
(('int32', 'float32'), 'float32'),
(('int64', 'float32'), 'float32'),
# Bool and other.
(('bool_', 'uint8'), 'uint8'),
(('bool_', 'int8'), 'int8'),
(('bool_', 'int16'), 'int16'),
(('bool_', 'float16'), 'float16'),
(('bool_', 'float64'), 'float64'),
])
result_dtypes_three_arrays = _permutate_dtype_mapping([
# Signed ints.
(('int32', 'int32', 'int32'), 'int32'),
(('int8', 'int8', 'int32'), 'int32'),
(('int8', 'int16', 'int32'), 'int32'),
(('int8', 'int32', 'int32'), 'int32'),
(('int8', 'int64', 'int32'), 'int64'),
# Unsigned ints.
(('uint8', 'uint8', 'uint8'), 'uint8'),
(('uint8', 'uint8', 'int8'), 'int16'),
(('uint8', 'int8', 'int8'), 'int16'),
(('uint8', 'int8', 'int16'), 'int16'),
(('uint8', 'uint8', 'int16'), 'int16'),
# Float and signed int.
(('float16', 'int8', 'int8'), 'float16'),
(('float16', 'int32', 'int64'), 'float16'),
(('float16', 'float32', 'int64'), 'float32'),
# Float and unsigned int.
(('float16', 'int8', 'uint8'), 'float16'),
(('float16', 'int32', 'uint8'), 'float16'),
(('float16', 'float32', 'uint8'), 'float32'),
# Bool and other.
(('bool_', 'uint8', 'uint8'), 'uint8'),
(('bool_', 'bool_', 'uint8'), 'uint8'),
(('bool_', 'int8', 'uint8'), 'int16'),
(('bool_', 'bool_', 'int32'), 'int32'),
(('bool_', 'float16', 'float32'), 'float32'),
(('bool_', 'bool_', 'float64'), 'float64'),
])
def cast_if_numpy_array(xp, array, chx_expected_dtype):
"""Casts NumPy result array to match the dtype of ChainerX's corresponding
result.
This function receives result arrays for both NumPy and ChainerX and only
converts dtype of the NumPy array.
"""
if xp is chainerx:
assert isinstance(array, chainerx.ndarray)
return array
if xp is numpy:
assert isinstance(array, (numpy.ndarray, numpy.generic))
# Dtype conversion to allow comparing the correctnesses of the values.
return array.astype(chx_expected_dtype, copy=False)
assert False
| 32.392523
| 78
| 0.549913
|
966691a28d2d70bca9dbe151910724fe86b94c4c
| 5,381
|
py
|
Python
|
schedules/migrations/0004_auto_20160816_2356.py
|
janga1997/video_village
|
58cba131c97dd3a033935e0675ba62daff7ca64a
|
[
"MIT"
] | 1
|
2017-03-10T22:44:35.000Z
|
2017-03-10T22:44:35.000Z
|
schedules/migrations/0004_auto_20160816_2356.py
|
janga1997/video_village
|
58cba131c97dd3a033935e0675ba62daff7ca64a
|
[
"MIT"
] | 14
|
2016-07-08T13:52:46.000Z
|
2017-02-13T20:57:18.000Z
|
schedules/migrations/0004_auto_20160816_2356.py
|
janga1997/video_village
|
58cba131c97dd3a033935e0675ba62daff7ca64a
|
[
"MIT"
] | 8
|
2016-07-11T16:23:20.000Z
|
2018-10-13T06:07:58.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-08-16 23:56
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('videos', '0006_videoscore'),
('schedules', '0003_auto_20160808_2110'),
]
operations = [
migrations.CreateModel(
name='Playlist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=255)),
('notes', models.TextField(blank=True, default='')),
],
),
migrations.CreateModel(
name='Show',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('notes', models.TextField(default='')),
],
),
migrations.CreateModel(
name='VideoSegment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('offset_in_playlist', models.PositiveIntegerField(help_text='The number of seconds from the start of the playlist to begin playing a video')),
('offset_in_video', models.PositiveIntegerField(default=0, help_text='The number of seconds from the start of the video to begin playing')),
('duration', models.PositiveIntegerField(help_text='the length of the video to play (in seconds)')),
('playlist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schedules.Playlist')),
('video', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='videos.Video')),
],
options={
'ordering': ('offset_in_playlist',),
},
),
migrations.CreateModel(
name='WindowShow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('offset_in_show', models.PositiveIntegerField(default=0, help_text='The number of seconds from the start of the show to delay playing this playlist in this window')),
('repeats', models.PositiveIntegerField(default=1, help_text='The number of times to play the playlist (e.g., 1 means play it once)')),
('playlist', models.ForeignKey(help_text='The playlist to display in this window', on_delete=django.db.models.deletion.CASCADE, to='schedules.Playlist')),
('show', models.ForeignKey(help_text='The show this is a part of', on_delete=django.db.models.deletion.CASCADE, to='schedules.Show')),
],
options={
'ordering': ('show', 'offset_in_show'),
},
),
migrations.AlterUniqueTogether(
name='schedule',
unique_together=set([]),
),
migrations.RemoveField(
model_name='schedule',
name='window',
),
migrations.DeleteModel(
name='Showtime',
),
migrations.AlterModelOptions(
name='scheduleitem',
options={'ordering': ('date', 'time')},
),
migrations.AlterModelOptions(
name='window',
options={'ordering': ('building',)},
),
migrations.AddField(
model_name='scheduleitem',
name='date',
field=models.DateField(default=django.utils.timezone.now, help_text='The date of a show'),
),
migrations.AddField(
model_name='scheduleitem',
name='time',
field=models.TimeField(default=datetime.time(0, 0), help_text='The time a show starts'),
),
migrations.AddField(
model_name='window',
name='description',
field=models.CharField(default='?', max_length=64),
),
migrations.AlterUniqueTogether(
name='scheduleitem',
unique_together=set([]),
),
migrations.AddField(
model_name='windowshow',
name='window',
field=models.ForeignKey(help_text='The window to be used', on_delete=django.db.models.deletion.CASCADE, to='schedules.Window'),
),
migrations.RemoveField(
model_name='scheduleitem',
name='play_order',
),
migrations.RemoveField(
model_name='scheduleitem',
name='schedule',
),
migrations.RemoveField(
model_name='scheduleitem',
name='video',
),
migrations.RemoveField(
model_name='scheduleitem',
name='video_duration_seconds',
),
migrations.RemoveField(
model_name='scheduleitem',
name='video_start_seconds',
),
migrations.AddField(
model_name='scheduleitem',
name='show',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='schedules.Show'),
),
migrations.DeleteModel(
name='Schedule',
),
]
| 40.458647
| 183
| 0.57703
|
c14a957fa70acf0000b78a217c407185e84a3db8
| 9,673
|
py
|
Python
|
datasimulator/graph.py
|
AustralianBioCommons/data-simulator
|
0c33620a18c62cc098ec83e2118c4e8b76eb6f26
|
[
"Apache-2.0"
] | 3
|
2019-02-19T04:48:49.000Z
|
2020-08-26T20:39:35.000Z
|
datasimulator/graph.py
|
AustralianBioCommons/data-simulator
|
0c33620a18c62cc098ec83e2118c4e8b76eb6f26
|
[
"Apache-2.0"
] | 25
|
2019-02-19T22:37:30.000Z
|
2022-01-31T18:08:53.000Z
|
datasimulator/graph.py
|
AustralianBioCommons/data-simulator
|
0c33620a18c62cc098ec83e2118c4e8b76eb6f26
|
[
"Apache-2.0"
] | 4
|
2019-10-11T21:25:27.000Z
|
2021-11-19T03:28:56.000Z
|
import json
from os.path import join
from .node import Node, logger
from .errors import UserError, DictionaryError
from .generator import generate_list_numbers
from .utils import generate_list_numbers_from_file
EXCLUDED_NODE = ["program", "root", "data_release"]
class Graph(object):
"""
Graph representation class
"""
def __init__(self, dictionary, program="DEV", project="test"):
"""
Graph constructor
Args:
dictionary(DataDictionary): a dictionary instance
program(str): program name
project(str): project name
Outputs:
None
"""
self.dictionary = dictionary
self.root = None
self.program = program
self.project = project
self.nodes = []
def prelimary_dictionary_check(self):
"""
raise exception if dictionary has not initialized yet
"""
if self.dictionary is None:
raise UserError("Dictionary is not initialized!!!")
return True
def _get_list_of_node_names(self):
"""
return a list of node names
"""
return [k for k in self.dictionary.schema if k not in EXCLUDED_NODE]
def generate_nodes_from_dictionary(self, consent_codes=False):
"""
generate nodes from dictionary
Args:
consent_codes(bool): whether to include generation of random consent codes
"""
# logger.info('Start simulating data')
for node_name in self._get_list_of_node_names():
node = Node(
node_name,
self.dictionary.schema[node_name],
self.project,
consent_codes,
)
if node_name == "project":
self.root = node
self.nodes.append(node)
def get_node_with_name(self, node_name):
"""
get node object given name
Args:
node_name(str): node name
Outputs:
Node: node object
"""
for node in self.nodes:
if node.name == node_name:
return node
return None
def _add_required_link_to_node(
self, node, link_node_name, link_name, multiplicity=None, skip=True
):
"""
assign required links to a node
Args:
node(Node): node object
link_node_name(str): link node name
multiplicity(str): link type (one_to_one, one_to_many, ..etc.)
skip(bool): skip raising an exception to terminate
Outputs:
None or raise exception
"""
# skip all the links to Project node
if link_node_name in EXCLUDED_NODE:
return
node_parent = self.get_node_with_name(link_node_name)
if not node_parent:
msg = "Node {} have a link to node {} which does not exist".format(
node.name, link_node_name
)
if skip:
logger.error(msg)
else:
raise DictionaryError(message=msg)
node.required_links.append(
{"node": node_parent, "multiplicity": multiplicity, "name": link_name}
)
def graph_validation(self, required_only=False):
"""
Call to all node validation to validate
"""
self.prelimary_dictionary_check()
required_link_pass = self.graph_required_link_validation()
return (
all(
[
node.node_validation(required_only=required_only)[0]
for node in self.nodes
]
)
and required_link_pass
)
def graph_required_link_validation(self):
"""
Validate node links
"""
pass_validation = True
for node in self.nodes:
# validate required links
if not node.required_links and node.name != "project":
logger.error(
"Node {} does not have any required link".format(node.name)
)
pass_validation = False
return pass_validation
def construct_graph_edges(self):
"""
Construct edges between nodes. Ignore option links
"""
# Link nodes together to create graph
for node in self.nodes:
if node == self.root:
continue
if not node.links:
logger.error(
"ERROR: {} should have at least one link to other node".format(
node.name
)
)
try:
node_links = node.links
if not isinstance(node_links, list):
node_links = [node_links]
# expect node_links contains list of links
for link in node_links:
if isinstance(link, dict):
if not link.get("required"):
continue
if "target_type" in link:
self._add_required_link_to_node(
node,
link["target_type"],
link.get("name"),
link.get("multiplicity"),
)
if "sub_group" in link or "subgroup" in link:
sub_links = link.get("sub_group") or link.get("subgroup")
if not isinstance(sub_links, list):
sub_links = [sub_links]
# just pick one of sub-group links
for sub_link in sub_links:
if "target_type" in sub_link:
self._add_required_link_to_node(
node,
sub_link["target_type"],
sub_link.get("name"),
sub_link.get("multiplicity"),
)
break
except TypeError as e:
raise DictionaryError(
"Node {} have non-list links. Detail {}".format(
node.name, e.message
)
)
def generate_submission_order_path_to_node(self, node):
"""
Generate submission order so that the current node can be submitted
Args:
node(Node): current node object
Outputs:
list: list of submission order
"""
submission_order = [node]
index = 0
while index < len(submission_order):
cur_node = submission_order[index]
for linked_node_dict in cur_node.required_links:
if linked_node_dict["node"] not in submission_order:
submission_order.append(linked_node_dict["node"])
index += 1
submission_order.reverse()
return submission_order
def generate_submission_order(self):
"""
Generate submission order for the graph
"""
submission_order = []
for node in self.nodes:
if node not in submission_order:
for item in self.generate_submission_order_path_to_node(node):
if item not in submission_order:
submission_order.append(item)
return submission_order
def simulate_graph_data(
self,
path,
n_samples=1,
node_num_instances_file=None,
random=True,
required_only=True,
skip=True,
):
"""
Simulate data for the whole graph.
Args:
random(bool): whether randomly link to parent nodes
required_only(bool): only simulate required properties
skip(bool): skip raising an exception to terminate
Outputs:
None
"""
submission_order = self.generate_submission_order()
with open(join(path, "DataImportOrder.txt"), "w") as outfile:
for node in submission_order:
outfile.write(node.name + "\n")
if node_num_instances_file is None:
n_samples_list = generate_list_numbers(
len(submission_order), nmax=n_samples, random=random
)
else:
try:
n_samples_list = generate_list_numbers_from_file(
node_num_instances_file, submission_order, n_samples
)
except UserError as e:
raise e
for idx, node in enumerate(submission_order):
# raise exception if not skip and not pass validation
_, is_submitable = node.node_validation()
if is_submitable:
# simulate data
logger.info("Simulating data for node {}".format(node.name))
node.simulate_data(
n_samples=n_samples_list[idx],
random=random,
required_only=required_only,
)
else:
if not skip:
raise DictionaryError("Can not simulate node {}".format(node.name))
if skip:
logger.error("Can not simulate node {}".format(node.name))
with open(join(path, node.name + ".json"), "w") as outfile:
json.dump(node.simulated_dataset, outfile, indent=4, sort_keys=True)
| 32.789831
| 87
| 0.519177
|
fa25e9130f6b6f3734916defd96fa03864c58a22
| 32,965
|
py
|
Python
|
google/ads/googleads/v7/googleads-py/tests/unit/gapic/googleads.v7/services/test_campaign_simulation_service.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/ads/googleads/v7/googleads-py/tests/unit/gapic/googleads.v7/services/test_campaign_simulation_service.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/ads/googleads/v7/googleads-py/tests/unit/gapic/googleads.v7/services/test_campaign_simulation_service.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from unittest import mock
import grpc
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.ads.googleads.v7.common.types import simulation
from google.ads.googleads.v7.enums.types import simulation_modification_method
from google.ads.googleads.v7.enums.types import simulation_type
from google.ads.googleads.v7.resources.types import campaign_simulation
from google.ads.googleads.v7.services.services.campaign_simulation_service import CampaignSimulationServiceClient
from google.ads.googleads.v7.services.services.campaign_simulation_service import transports
from google.ads.googleads.v7.services.types import campaign_simulation_service
from google.api_core import client_options
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert CampaignSimulationServiceClient._get_default_mtls_endpoint(None) is None
assert CampaignSimulationServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert CampaignSimulationServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert CampaignSimulationServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert CampaignSimulationServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert CampaignSimulationServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
def test_campaign_simulation_service_client_from_service_account_info():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = CampaignSimulationServiceClient.from_service_account_info(info)
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_campaign_simulation_service_client_from_service_account_file():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = CampaignSimulationServiceClient.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = CampaignSimulationServiceClient.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_campaign_simulation_service_client_get_transport_class():
transport = CampaignSimulationServiceClient.get_transport_class()
assert transport == transports.CampaignSimulationServiceGrpcTransport
transport = CampaignSimulationServiceClient.get_transport_class("grpc")
assert transport == transports.CampaignSimulationServiceGrpcTransport
@mock.patch.object(CampaignSimulationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CampaignSimulationServiceClient))
def test_campaign_simulation_service_client_client_options():
# Check that if channel is provided we won't create a new one.
with mock.patch('google.ads.googleads.v7.services.services.campaign_simulation_service.CampaignSimulationServiceClient.get_transport_class') as gtc:
transport = transports.CampaignSimulationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials()
)
client = CampaignSimulationServiceClient(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch('google.ads.googleads.v7.services.services.campaign_simulation_service.CampaignSimulationServiceClient.get_transport_class') as gtc:
client = CampaignSimulationServiceClient(transport="grpc")
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch('google.ads.googleads.v7.services.services.campaign_simulation_service.transports.CampaignSimulationServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CampaignSimulationServiceClient(client_options=options)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT
# is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch('google.ads.googleads.v7.services.services.campaign_simulation_service.transports.CampaignSimulationServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CampaignSimulationServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch('google.ads.googleads.v7.services.services.campaign_simulation_service.transports.CampaignSimulationServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CampaignSimulationServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = CampaignSimulationServiceClient()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = CampaignSimulationServiceClient()
@mock.patch.object(CampaignSimulationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CampaignSimulationServiceClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
@pytest.mark.parametrize("use_client_cert_env", ["true", "false"])
def test_campaign_simulation_service_client_mtls_env_auto(use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch('google.ads.googleads.v7.services.services.campaign_simulation_service.transports.CampaignSimulationServiceGrpcTransport.__init__') as grpc_transport:
ssl_channel_creds = mock.Mock()
with mock.patch('grpc.ssl_channel_credentials', return_value=ssl_channel_creds):
grpc_transport.return_value = None
client = CampaignSimulationServiceClient(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v7.services.services.campaign_simulation_service.transports.CampaignSimulationServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
with mock.patch('google.auth.transport.grpc.SslCredentials.ssl_credentials', new_callable=mock.PropertyMock) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = ssl_credentials_mock.return_value
grpc_transport.return_value = None
client = CampaignSimulationServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v7.services.services.campaign_simulation_service.transports.CampaignSimulationServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
is_mtls_mock.return_value = False
grpc_transport.return_value = None
client = CampaignSimulationServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_campaign_simulation_service_client_client_options_from_dict():
with mock.patch('google.ads.googleads.v7.services.services.campaign_simulation_service.transports.CampaignSimulationServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CampaignSimulationServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_get_campaign_simulation(transport: str = 'grpc', request_type=campaign_simulation_service.GetCampaignSimulationRequest):
client = CampaignSimulationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_campaign_simulation),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = campaign_simulation.CampaignSimulation(
resource_name='resource_name_value',
campaign_id=1132,
type_=simulation_type.SimulationTypeEnum.SimulationType.UNKNOWN,
modification_method=simulation_modification_method.SimulationModificationMethodEnum.SimulationModificationMethod.UNKNOWN,
start_date='start_date_value',
end_date='end_date_value',
cpc_bid_point_list=simulation.CpcBidSimulationPointList(points=[simulation.CpcBidSimulationPoint(required_budget_amount_micros=3098)]),
)
response = client.get_campaign_simulation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == campaign_simulation_service.GetCampaignSimulationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, campaign_simulation.CampaignSimulation)
assert response.resource_name == 'resource_name_value'
assert response.campaign_id == 1132
assert response.type_ == simulation_type.SimulationTypeEnum.SimulationType.UNKNOWN
assert response.modification_method == simulation_modification_method.SimulationModificationMethodEnum.SimulationModificationMethod.UNKNOWN
assert response.start_date == 'start_date_value'
assert response.end_date == 'end_date_value'
def test_get_campaign_simulation_from_dict():
test_get_campaign_simulation(request_type=dict)
def test_get_campaign_simulation_field_headers():
client = CampaignSimulationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = campaign_simulation_service.GetCampaignSimulationRequest()
request.resource_name = 'resource_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_campaign_simulation),
'__call__') as call:
call.return_value = campaign_simulation.CampaignSimulation()
client.get_campaign_simulation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'resource_name=resource_name/value',
) in kw['metadata']
def test_get_campaign_simulation_flattened():
client = CampaignSimulationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_campaign_simulation),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = campaign_simulation.CampaignSimulation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_campaign_simulation(
resource_name='resource_name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource_name == 'resource_name_value'
def test_get_campaign_simulation_flattened_error():
client = CampaignSimulationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_campaign_simulation(
campaign_simulation_service.GetCampaignSimulationRequest(),
resource_name='resource_name_value',
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.CampaignSimulationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CampaignSimulationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.CampaignSimulationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = CampaignSimulationServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.CampaignSimulationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = CampaignSimulationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.CampaignSimulationServiceGrpcTransport,
)
@pytest.mark.parametrize("transport_class", [
transports.CampaignSimulationServiceGrpcTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_campaign_simulation_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.ads.googleads.v7.services.services.campaign_simulation_service.transports.CampaignSimulationServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.CampaignSimulationServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'get_campaign_simulation',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_campaign_simulation_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default') as adc, mock.patch('google.ads.googleads.v7.services.services.campaign_simulation_service.transports.CampaignSimulationServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CampaignSimulationServiceTransport()
adc.assert_called_once()
def test_campaign_simulation_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
CampaignSimulationServiceClient()
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_campaign_simulation_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transports.CampaignSimulationServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_campaign_simulation_service_host_no_port():
client = CampaignSimulationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com'),
)
assert client.transport._host == 'googleads.googleapis.com:443'
def test_campaign_simulation_service_host_with_port():
client = CampaignSimulationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com:8000'),
)
assert client.transport._host == 'googleads.googleapis.com:8000'
def test_campaign_simulation_service_grpc_transport_channel():
channel = grpc.insecure_channel('http://localhost/')
# Check that channel is used if provided.
transport = transports.CampaignSimulationServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize("transport_class", [transports.CampaignSimulationServiceGrpcTransport])
def test_campaign_simulation_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize("transport_class", [transports.CampaignSimulationServiceGrpcTransport,])
def test_campaign_simulation_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_campaign_simulation_path():
customer_id = "squid"
campaign_id = "clam"
type = "whelk"
modification_method = "octopus"
start_date = "oyster"
end_date = "nudibranch"
expected = "customers/{customer_id}/campaignSimulations/{campaign_id}~{type}~{modification_method}~{start_date}~{end_date}".format(customer_id=customer_id, campaign_id=campaign_id, type=type, modification_method=modification_method, start_date=start_date, end_date=end_date, )
actual = CampaignSimulationServiceClient.campaign_simulation_path(customer_id, campaign_id, type, modification_method, start_date, end_date)
assert expected == actual
def test_parse_campaign_simulation_path():
expected = {
"customer_id": "cuttlefish",
"campaign_id": "mussel",
"type": "winkle",
"modification_method": "nautilus",
"start_date": "scallop",
"end_date": "abalone",
}
path = CampaignSimulationServiceClient.campaign_simulation_path(**expected)
# Check that the path construction is reversible.
actual = CampaignSimulationServiceClient.parse_campaign_simulation_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = CampaignSimulationServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = CampaignSimulationServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = CampaignSimulationServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder, )
actual = CampaignSimulationServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = CampaignSimulationServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = CampaignSimulationServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization, )
actual = CampaignSimulationServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = CampaignSimulationServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = CampaignSimulationServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project, )
actual = CampaignSimulationServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = CampaignSimulationServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = CampaignSimulationServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = CampaignSimulationServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = CampaignSimulationServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = CampaignSimulationServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.CampaignSimulationServiceTransport, '_prep_wrapped_messages') as prep:
client = CampaignSimulationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.CampaignSimulationServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = CampaignSimulationServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
def test_grpc_transport_close():
client = CampaignSimulationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
with mock.patch.object(type(client.transport._grpc_channel), 'close') as chan_close:
with client as _:
chan_close.assert_not_called()
chan_close.assert_called_once()
def test_grpc_client_ctx():
client = CampaignSimulationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client as _:
pass
close.assert_called()
| 44.911444
| 280
| 0.719551
|
1208e913c0849f3e5c625c7af7447cf9365e9879
| 4,536
|
py
|
Python
|
greentest/test__event.py
|
bkad/gevent
|
185b71cc472db413515059ab4a197207cdaf1f6c
|
[
"MIT"
] | 2
|
2015-12-19T01:34:43.000Z
|
2018-02-02T12:32:01.000Z
|
greentest/test__event.py
|
alex/gevent
|
454a77ca561868854760b2d9cbfa3bf3bbd2e062
|
[
"MIT"
] | null | null | null |
greentest/test__event.py
|
alex/gevent
|
454a77ca561868854760b2d9cbfa3bf3bbd2e062
|
[
"MIT"
] | 2
|
2019-11-24T12:11:50.000Z
|
2020-12-26T19:00:20.000Z
|
import greentest
import gevent
import sys
from gevent.event import Event, AsyncResult
DELAY = 0.01
class TestEventWait(greentest.GenericWaitTestCase):
def wait(self, timeout):
Event().wait(timeout=timeout)
class TestWaitEvent(greentest.GenericWaitTestCase):
def wait(self, timeout):
gevent.wait([Event()], timeout=timeout)
class TestAsyncResultWait(greentest.GenericWaitTestCase):
def wait(self, timeout):
AsyncResult().wait(timeout=timeout)
class TestWaitAsyncResult(greentest.GenericWaitTestCase):
def wait(self, timeout):
gevent.wait([AsyncResult()], timeout=timeout)
class TestAsyncResultGet(greentest.GenericGetTestCase):
def wait(self, timeout):
AsyncResult().get(timeout=timeout)
class TestAsyncResult(greentest.TestCase):
def test_set_exc(self):
log = []
e = AsyncResult()
def waiter():
try:
result = e.get()
log.append(('received', result))
except Exception:
ex = sys.exc_info()[1]
log.append(('catched', ex))
gevent.spawn(waiter)
obj = Exception()
e.set_exception(obj)
gevent.sleep(0)
assert log == [('catched', obj)], log
def test_set(self):
event1 = AsyncResult()
event2 = AsyncResult()
g = gevent.spawn_later(DELAY / 2.0, event1.set, 'hello event1')
t = gevent.Timeout.start_new(0, ValueError('interrupted'))
try:
try:
result = event1.get()
except ValueError:
X = object()
result = gevent.with_timeout(DELAY, event2.get, timeout_value=X)
assert result is X, 'Nobody sent anything to event2 yet it received %r' % (result, )
finally:
t.cancel()
g.kill()
class TestAsyncResultAsLinkTarget(greentest.TestCase):
error_fatal = False
def test_set(self):
g = gevent.spawn(lambda: 1)
s1, s2, s3 = AsyncResult(), AsyncResult(), AsyncResult()
g.link(s1)
g.link_value(s2)
g.link_exception(s3)
assert s1.get() == 1
assert s2.get() == 1
assert gevent.with_timeout(DELAY, s3.get, timeout_value=X) is X
def test_set_exception(self):
def func():
raise greentest.ExpectedException('TestAsyncResultAsLinkTarget.test_set_exception')
g = gevent.spawn(func)
s1, s2, s3 = AsyncResult(), AsyncResult(), AsyncResult()
g.link(s1)
g.link_value(s2)
g.link_exception(s3)
self.assertRaises(greentest.ExpectedException, s1.get)
assert gevent.with_timeout(DELAY, s2.get, timeout_value=X) is X
self.assertRaises(greentest.ExpectedException, s3.get)
class TestEvent_SetThenClear(greentest.TestCase):
N = 1
def test(self):
e = Event()
waiters = [gevent.spawn(e.wait) for i in range(self.N)]
gevent.sleep(0.001)
e.set()
e.clear()
for t in waiters:
t.join()
class TestEvent_SetThenClear100(TestEvent_SetThenClear):
N = 100
class TestEvent_SetThenClear1000(TestEvent_SetThenClear):
N = 1000
class TestWait(greentest.TestCase):
N = 5
count = None
timeout = 1
period = 0.01
def _sender(self, events, asyncs):
while events or asyncs:
gevent.sleep(self.period)
if events:
events.pop().set()
gevent.sleep(self.period)
if asyncs:
asyncs.pop().set()
def test(self):
events = [Event() for _ in xrange(self.N)]
asyncs = [AsyncResult() for _ in xrange(self.N)]
max_len = len(events) + len(asyncs)
sender = gevent.spawn(self._sender, events, asyncs)
results = gevent.wait(events + asyncs, count=self.count, timeout=self.timeout)
if self.timeout is None:
expected_len = max_len
else:
expected_len = min(max_len, self.timeout / self.period)
if self.count is None:
assert sender.ready()
else:
expected_len = min(self.count, expected_len)
assert not sender.ready()
sender.kill()
assert expected_len == len(results), (expected_len, results)
class TestWait_notimeout(TestWait):
timeout = None
class TestWait_count1(TestWait):
count = 1
class TestWait_count2(TestWait):
count = 2
X = object()
if __name__ == '__main__':
greentest.main()
| 26.219653
| 100
| 0.604277
|
eca8d1ecf02340ce4c8b1b61a89dddfe0a37364c
| 8,645
|
py
|
Python
|
tppflush.py
|
hlixed/TPPFLUSH
|
e162edca158228a32e12b0912a034da0229e1af3
|
[
"MIT"
] | 13
|
2017-06-13T09:49:53.000Z
|
2021-08-01T17:26:23.000Z
|
tppflush.py
|
hlixed/TPPFLUSH
|
e162edca158228a32e12b0912a034da0229e1af3
|
[
"MIT"
] | 5
|
2017-06-13T21:24:32.000Z
|
2017-11-15T02:47:15.000Z
|
tppflush.py
|
hlixed/TPPFLUSH
|
e162edca158228a32e12b0912a034da0229e1af3
|
[
"MIT"
] | null | null | null |
VERSION = 1.22
import sys
if sys.version_info[0] < 3 or sys.version_info[1] < 6:
raise ImportError("You are using python {}.{}. Python 3.6 or greater is required to use TPPFLUSH.\nYou can download the latest version of python from http://www.python.org.\n".format(sys.version_info[0],sys.version_info[1]))
import socket #imports module allowing connection to IRC
from itertools import chain
from enum import IntFlag, Flag, auto #Python 3.6 is required for this import
class HIDButtons(IntFlag):
A = auto()
B = auto()
SELECT = auto()
START = auto()
DPADRIGHT = auto()
DPADLEFT = auto()
DPADUP = auto()
DPADDOWN = auto()
R = auto()
L = auto()
X = auto()
Y = auto()
class CPAD_Commands(Flag):
CPADUP = auto()
CPADDOWN = auto()
CPADLEFT = auto()
CPADRIGHT = auto()
CPADNEUTRAL = auto() #sets cpad to 0,0
class CSTICK_Commands(Flag): #N3DS c-stick
CSTICKUP = auto()
CSTICKDOWN = auto()
CSTICKLEFT = auto()
CSTICKRIGHT = auto()
CSTICKNEUTRAL = auto() #sets cstick to 0,0
class N3DS_Buttons(IntFlag):
ZL = 2
ZR = 4
class Special_Buttons(IntFlag):
HOME = auto()
POWER = auto()
POWER_LONG = auto()
def bytearray_not(arr):
return bytearray([255-i for i in arr])
class LumaInputServer():
def __init__(self, server, port=4950):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
port = 4950
self.socket.connect((server, port))
self.CPAD_BOUND = 0x5d0
self.CPP_BOUND = 0x7f #what does this stand for? Circle Pad Pro?
self.SQRT_ONEHALF = 0.707106781186547524401
self.TOUCHSCREEN_SIZES = [320,240]
self.current_pressed_buttons = HIDButtons.A ^ HIDButtons.A #no buttons
self.current_special_buttons = Special_Buttons.HOME ^ Special_Buttons.HOME
self.circle_pad_coords = [0,0] #0,0 is the center
self.touch_pressed = False
self.current_touch_coords = [0,0]
self.cstick_coords = [0,0] #n3ds c-stick, not the circle pad
self.zlzr_state = N3DS_Buttons.ZL ^ N3DS_Buttons.ZL #n3ds zl and zr
#button-pressing functions
#these do nothing until self.send() is called.
def press(self, btn):
"""Press the given button. This function accepts any value from any of the enums defined here and will call the appropriate pressing function. Ideally, this function should be the only one you need to press a button.
To control the circle pad, use self.circle_pad_set() instead.
To control the touch screen, use self.touch() instead.
To control the N3DS c-stick, use self.n3ds_cstick_set() instead.
Example usage: press(Special_Buttons.HOME)
press(N3DS_Buttons.ZL)
press(HID_Buttons.A)
"""
if btn in HIDButtons:
self.hid_press(btn)
elif btn in N3DS_Buttons:
self.n3ds_zlzr_press(btn)
elif btn in Special_Buttons:
self.special_press(btn)
else:
raise ValueError("Invalid button!")
def unpress(self, btn):
"""Unpress the given button. This is the opposite of self.press(), and will do nothing if a button is not already pressed. Ideally, this function should be the only one you need to unpress a button.
To control the circle pad, use self.circle_pad_set() or self.circle_pad_neutral() instead.
To unpress the touch screen, use self.clear_touch() instead.
To control the N3DS c-stick, use self.n3ds_cstick_set() self.or circle_pad_neutral() instead.
Special buttons can be unpressed individually with this function, but clear_special() exists to clear them all.
Example usage: unpress(Special_Buttons.HOME)
unpress(N3DS_Buttons.ZL)
unpress(HID_Buttons.A)
"""
if btn in HIDButtons:
self.hid_unpress(btn)
elif btn in N3DS_Buttons:
self.n3ds_zlzr_unpress(btn)
elif btn in Special_Buttons:
self.special_unpress(btn)
else:
raise ValueError("Invalid button!")
def clear_everything(self):
"""Function to reset the 3DS to no-inputs. All buttons are unpressed, the c-pad and c-stick are returned to neutral, and any touch pad inputs are cleared."""
for btn in chain(HIDButtons,N3DS_Buttons,Special_Buttons):
self.unpress(btn)
self.clear_touch()
self.circle_pad_neutral()
self.n3ds_cstick_neutral()
def hid_press(self, button):
if button not in self.current_pressed_buttons:
self.current_pressed_buttons |= button
def hid_unpress(self, button):
if button in self.current_pressed_buttons:
self.current_pressed_buttons ^= button
def hid_toggle(self, button):
self.current_pressed_buttons ^= button
def n3ds_zlzr_press(self, button):
if button not in self.zlzr_state:
self.n3ds_zlzr_toggle(button)
def n3ds_zlzr_unpress(self, button):
if button in self.zlzr_state:
self.n3ds_zlzr_toggle(button)
def n3ds_zlzr_toggle(self, button):
self.zlzr_state ^= button
def touch(self,x,y):
if x >= self.TOUCHSCREEN_SIZES[0] or y >= self.TOUCHSCREEN_SIZES[1] or x < 0 or y < 0:
raise ValueError
self.touch_pressed = True
self.current_touch_coords = [int(x),int(y)]
def special_press(self, button):
if button not in self.current_special_buttons:
self.current_special_buttons |= button
def special_unpress(self, button):
if button in self.current_special_buttons:
self.current_special_buttons ^= button
def clear_special(self, button): #just in case
self.current_special_buttons ^= self.current_special_buttons
def clear_touch(self):
self.touch_pressed = False
def circle_pad_set(self, button, multiplier=1):
if button == CPAD_Commands.CPADUP:
self.circle_pad_coords[1] = int(32767*multiplier)
if button == CPAD_Commands.CPADDOWN:
self.circle_pad_coords[1] = int(-32767*multiplier)
if button == CPAD_Commands.CPADLEFT:
self.circle_pad_coords[0] = int(-32767*multiplier)
if button == CPAD_Commands.CPADRIGHT:
self.circle_pad_coords[0] = int(32767*multiplier)
if button == CPAD_Commands.CPADNEUTRAL: #resets cpad
self.circle_pad_coords = [0,0]
def circle_pad_neutral(self):
self.circle_pad_set(CPAD_Commands.CPADNEUTRAL)
def n3ds_cstick_set(self, button, multiplier=1):
if button == CSTICK_Commands.CSTICKUP:
self.cstick_coords[1] = 32767*multiplier
if button == CSTICK_Commands.CSTICKDOWN:
self.cstick_coords[1] = -32767*multiplier
if button == CSTICK_Commands.CSTICKLEFT:
self.cstick_coords[0] = -32767*multiplier
if button == CSTICK_Commands.CSTICKRIGHT:
self.cstick_coords[0] = 32767*multiplier
if button == CSTICK_Commands.CSTICKNEUTRAL:
self.cstick_coords = [0,0]
def n3ds_cstick_neutral(self):
self.n3ds_cstick_set(CSTICK_Commands.CSTICKNEUTRAL)
def send(self, print_sent=True):
hid_buttons = self.current_pressed_buttons.to_bytes(4,byteorder='little')
hid_state = bytearray_not(hid_buttons)
circle_state = bytearray.fromhex("00088000")
if self.circle_pad_coords[0] != 0 or self.circle_pad_coords[1] != 0: # "0x5d0 is the upper/lower bound of circle pad input", says stary2001
x,y = self.circle_pad_coords
x = ((x * self.CPAD_BOUND) // 32768) + 2048
y = ((y * self.CPAD_BOUND) // 32768) + 2048
circle_state = (x | (y << 12)).to_bytes(4,byteorder='little')
touch_state = bytearray.fromhex("20000000")
if(self.touch_pressed):
x,y = self.current_touch_coords
x = (x * 4096) // self.TOUCHSCREEN_SIZES[0]
y = (y * 4096) // self.TOUCHSCREEN_SIZES[1]
touch_state = (x | (y << 12) | (0x01 << 24)).to_bytes(4,byteorder='little')
n3ds_exclusives_state = bytearray.fromhex("81008080")
if self.cstick_coords[0] != 0 or self.cstick_coords[1] != 0 or self.zlzr_state != 0:
x = self.cstick_coords[0] / 32768.0
y = self.cstick_coords[1] / 32768.0
#TuxSH note: We have to rotate the c-stick position 45deg. Thanks, Nintendo.
rotated_x = int(((x+y) * self.SQRT_ONEHALF * self.CPP_BOUND) + 0x80)
rotated_y = int(((y-x) * self.SQRT_ONEHALF * self.CPP_BOUND) + 0x80)
#rotated_x and rotated_y are between 0 and 0xff now
n3ds_exclusives_state = ((rotated_y&0xff) << 24 | (rotated_x&0xff) << 16 | (self.zlzr_state&0xff) << 8 | 0x81).to_bytes(4,byteorder='little')
special_buttons = self.current_special_buttons.to_bytes(4,byteorder='little')
toSend = bytearray(20) #create empty byte array
toSend[0:4] = hid_state
toSend[4:8] = touch_state
toSend[8:12] = circle_state
toSend[12:16] = n3ds_exclusives_state
toSend[16:20] = special_buttons
self.socket.send(toSend)
if print_sent:
print(toSend)
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print("To run as an executable: python3 lumainput.py <3ds ip>")
quit()
server = sys.argv[1]
server = LumaInputServer(server)
#example commands
server.hid_press(HIDButtons.X) # hold x
server.circle_pad_set(CPAD_Commands.CPADUP)
server.touch(319,239) #touch the bottom-right of the screen
#send inputs to 3DS
server.send()
| 33.901961
| 228
| 0.730943
|
642b27a3e880bea0b09640ccd9d5ad9c0ce51763
| 7,010
|
py
|
Python
|
src/main/nluas/language/word_checker.py
|
ErickKramer/ecg_framework_code
|
65c2966a37ffe829b81d610dd2b9902898101cc2
|
[
"Apache-2.0"
] | null | null | null |
src/main/nluas/language/word_checker.py
|
ErickKramer/ecg_framework_code
|
65c2966a37ffe829b81d610dd2b9902898101cc2
|
[
"Apache-2.0"
] | null | null | null |
src/main/nluas/language/word_checker.py
|
ErickKramer/ecg_framework_code
|
65c2966a37ffe829b81d610dd2b9902898101cc2
|
[
"Apache-2.0"
] | null | null | null |
"""
Combines the features of spell checking and swapping for synonyms with a token.
------
See LICENSE.txt for licensing information.
------
"""
import enchant
import string
import os
import re
import nltk
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
from nluas.language.spell_checker import Color
from itertools import chain
# mapping between NLTK POS tags and celex morphology types
TAG_DICT = {'JJ':'Positive', 'JJR':'Comparative', 'JJS':'Superlative',
'NN':'Singular', 'NNS':'Plural', 'NNP':'Singular', 'NNPS':'Plural',
'RB':'Positive', 'RBR':'Comparative', 'RBS':'Superlative',
'VB':'Infinitive', 'VBD':'FirstPersonPastTenseSingular',
'VBG':'ParticiplePresentTense', 'VBN':'ParticiplePastTense',
'VBP':'FirstPersonPresentTenseSingular', 'VBZ':'PresentTenseSingularThirdPerson'}
class WordChecker(object):
def __init__(self, prefs_path, lexicon):
self.general = enchant.Dict("en_US")
self.morph_files = []
self.token_files = []
self.read_prefs(prefs_path)
self.tokens_in_grammar = set()
self.tokens_info = dict()
self.read_tokens()
self.lemma_to_word = dict()
self.word_to_lemma = dict()
self.read_morphs()
self.lexicon = enchant.pypwl.PyPWL()
self.load_lexicon(lexicon)
def read_prefs(self, prefs_path):
"""
Reads a prefs file and gets the morphology files and token files.
"""
prefs_folder = "/".join(prefs_path.split("/")[0:-1])
reading_morphs, reading_tokens = False, False
with open(prefs_path) as f:
for line in f:
line = line.strip()
if "MORPHOLOGY_PATH ::==" in line:
reading_morphs = True
elif "TOKEN_PATH ::==" in line:
reading_tokens = True
elif ";" in line:
reading_morphs, reading_tokens = False, False
elif reading_morphs == True:
self.morph_files.append(os.path.join(prefs_folder, line))
elif reading_tokens == True:
self.token_files.append(os.path.join(prefs_folder, line))
if reading_morphs and reading_tokens:
raise Error("Invalid file")
def read_tokens(self):
for token_file in self.token_files:
with open(token_file) as f:
for line in f:
token = line.split('::')[0].strip()
info = line.split('::')[1:]
self.tokens_in_grammar.add(token)
self.tokens_info[token] = info
def read_morphs(self):
for morph_file in self.morph_files:
with open(morph_file) as f:
for line in f:
morph = line.split()
word = morph[0]
for lemma, tense in zip(morph[1::2], morph[2::2]):
self.word_to_lemma[word] = lemma
tense_key = ''.join(sorted(re.split('/|,', tense)))
if lemma in self.lemma_to_word:
self.lemma_to_word[lemma][tense_key] = word
else:
self.lemma_to_word[lemma] = {tense_key : word}
def load_lexicon(self, lexicon):
for word in lexicon:
self.lexicon.add(word)
def check(self, sentence):
'''
* Tokenize, and POS-Tag the sentence
* Check the tagged_words in the sentence
* Returns a dictionary containing the checked, modified and failed words
'''
tagged_words = nltk.pos_tag(nltk.word_tokenize(sentence))
checked, modified, failed = [], [], []
for i in range(len(tagged_words)):
checked_word, is_modified = self.check_word(i, tagged_words)
if is_modified is None:
failed.append(True)
else:
failed.append(False)
checked.append(checked_word)
modified.append(bool(is_modified))
return {'checked': checked, 'modified': modified, 'failed': failed}
def check_word(self, i, tagged_words):
word, pos_tag = tagged_words[i]
if self.lexicon.check(word) or word in string.punctuation:
return word, False
if i+1 < len(tagged_words) and self.lexicon.check("{}_{}".format(word, tagged_words[i+1][0])):
return word, False
if i-1 >= 0 and self.lexicon.check("{}_{}".format(tagged_words[i-1][0], word)):
return word, False
if self.general.check(word):
synonym = self.get_synonym(word, pos_tag)
if synonym:
return synonym, True
try:
int(word)
return word, False
except:
pass
lexicon_suggestions = self.lexicon.suggest(word)
if len(lexicon_suggestions) > 0:
return lexicon_suggestions[0], True
general_suggestions = self.general.suggest(word)
if len(general_suggestions) > 0:
for suggestion in general_suggestions:
synonym = self.get_synonym(suggestion, pos_tag)
if synonym:
return synonym, True
if self.general.check(word):
synonym = self.get_synonym(word, None)
if synonym:
return synonym, True
return word, None
def get_synonym(self, word, pos_tag):
if pos_tag:
tense = TAG_DICT[pos_tag] if pos_tag in TAG_DICT else 'NoMorphology'
pos = self.penn_to_wn(pos_tag)
if pos is None:
return None
wnl = WordNetLemmatizer()
# # https://stackoverflow.com/questions/19258652/how-to-get-synonyms-from-nltk-wordnet-python
lemma = wnl.lemmatize(word, pos=pos)
else:
lemma = word
synonym_synsets = wordnet.synsets(lemma)
synonyms = set(chain.from_iterable([s.lemma_names() for s in synonym_synsets]))
valid = []
for synonym in synonyms:
if synonym in self.tokens_in_grammar:
if tense in self.lemma_to_word[synonym]:
if self.lexicon.check(self.lemma_to_word[synonym][tense]):
valid.append(self.lemma_to_word[synonym][tense])
return valid[0] if len(valid) > 0 else None
# Source: https://stackoverflow.com/questions/27591621/nltk-convert-tokenized-sentence-to-synset-format
def penn_to_wn(self, tag):
if not tag:
return None
elif tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('R'):
return wordnet.ADV
elif tag.startswith('V'):
return wordnet.VERB
return None
def join_checked(self, checked):
corrected = ""
for word in checked:
if word in string.punctuation:
corrected += word
else:
corrected += " " + word
return corrected.strip()
def print_modified(self, checked, modified):
corrected = ""
index = 0
while index < len(checked):
if checked[index] in string.punctuation:
corrected += checked[index]
else:
if modified[index]:
corrected += " " + Color.RED + checked[index] + Color.END
else:
corrected += " " + checked[index]
index += 1
return corrected.strip()
def get_failed(self, table):
checked, failed = table['checked'], table['failed']
failures = []
for word, is_fail in zip(checked, failed):
if is_fail:
failures.append(word)
return failures
| 32.155963
| 105
| 0.636519
|
e91b01be9c97ca794ee2a0adfedbdd01944d94dd
| 1,369
|
py
|
Python
|
wst/version.py
|
martingkelly/ws
|
541fc015db7c9c7042aed064b00d3de4dd7ca856
|
[
"BSD-3-Clause"
] | null | null | null |
wst/version.py
|
martingkelly/ws
|
541fc015db7c9c7042aed064b00d3de4dd7ca856
|
[
"BSD-3-Clause"
] | null | null | null |
wst/version.py
|
martingkelly/ws
|
541fc015db7c9c7042aed064b00d3de4dd7ca856
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python3
#
# Module version. Though this could go in wst/__init__.py, it's nice to keep
# all "bump version" commits contained in one file and not mucking up the rest
# of the code.
#
# Copyright (c) 2018-2019 Xevo Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
_VERSION = '1.0.17'
def version():
return _VERSION
| 40.264706
| 79
| 0.763331
|
e2fc498a3fa3a82f6d96a41549f7b9d73f356da1
| 608
|
py
|
Python
|
codeswiftr/home/migrations/0007_applanding_reviews.py
|
bogdan-veliscu/dev-portfolio-website
|
43eb323c67f3fd691388e79039e32479c1bc0974
|
[
"Apache-2.0"
] | null | null | null |
codeswiftr/home/migrations/0007_applanding_reviews.py
|
bogdan-veliscu/dev-portfolio-website
|
43eb323c67f3fd691388e79039e32479c1bc0974
|
[
"Apache-2.0"
] | 4
|
2021-03-30T13:40:00.000Z
|
2021-09-22T19:12:56.000Z
|
codeswiftr/home/migrations/0007_applanding_reviews.py
|
bogdan-veliscu/dev-portfolio-website
|
43eb323c67f3fd691388e79039e32479c1bc0974
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-06-25 03:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('testimonials', '0001_initial'),
('home', '0006_applanding_about_info'),
]
operations = [
migrations.AddField(
model_name='applanding',
name='reviews',
field=models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to='testimonials.TestimonialsSection', verbose_name='reviews'),
preserve_default=False,
),
]
| 27.636364
| 158
| 0.652961
|
decc91308aa37a5094a37e597c92f90ae3534513
| 61
|
py
|
Python
|
settings/__init__.py
|
ihoru/todoist_bot
|
a83648786a1a6f7d6ef50248e1f931b166013216
|
[
"MIT"
] | 37
|
2017-07-21T10:45:03.000Z
|
2022-02-02T20:13:40.000Z
|
settings/__init__.py
|
mateuszasdf/todoist_bot
|
e56c2d729f8cbb31642e3774708208c67295483a
|
[
"MIT"
] | 4
|
2018-01-10T23:30:00.000Z
|
2020-01-30T19:43:29.000Z
|
settings/__init__.py
|
mateuszasdf/todoist_bot
|
e56c2d729f8cbb31642e3774708208c67295483a
|
[
"MIT"
] | 6
|
2017-12-12T15:32:54.000Z
|
2022-03-24T14:21:43.000Z
|
from .global_settings import *
from .local_settings import *
| 20.333333
| 30
| 0.803279
|
aab9d3d9b028cd7c952ab8da0f7da661474aeaa2
| 26,099
|
py
|
Python
|
test/test_functional_tensor.py
|
StadlerMaximilian/vision
|
5dae7a69c18ed3f59c2b6b37a6899d2ef7c08c97
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_functional_tensor.py
|
StadlerMaximilian/vision
|
5dae7a69c18ed3f59c2b6b37a6899d2ef7c08c97
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_functional_tensor.py
|
StadlerMaximilian/vision
|
5dae7a69c18ed3f59c2b6b37a6899d2ef7c08c97
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import random
import colorsys
import math
from PIL import Image
from PIL.Image import NEAREST, BILINEAR, BICUBIC
import numpy as np
import torch
import torchvision.transforms as transforms
import torchvision.transforms.functional_tensor as F_t
import torchvision.transforms.functional_pil as F_pil
import torchvision.transforms.functional as F
class Tester(unittest.TestCase):
def _create_data(self, height=3, width=3, channels=3):
tensor = torch.randint(0, 255, (channels, height, width), dtype=torch.uint8)
pil_img = Image.fromarray(tensor.permute(1, 2, 0).contiguous().numpy())
return tensor, pil_img
def compareTensorToPIL(self, tensor, pil_image, msg=None):
pil_tensor = torch.as_tensor(np.array(pil_image).transpose((2, 0, 1)))
if msg is None:
msg = "tensor:\n{} \ndid not equal PIL tensor:\n{}".format(tensor, pil_tensor)
self.assertTrue(tensor.equal(pil_tensor), msg)
def approxEqualTensorToPIL(self, tensor, pil_image, tol=1e-5, msg=None):
pil_tensor = torch.as_tensor(np.array(pil_image).transpose((2, 0, 1))).to(tensor)
mae = torch.abs(tensor - pil_tensor).mean().item()
self.assertTrue(
mae < tol,
msg="{}: mae={}, tol={}: \n{}\nvs\n{}".format(msg, mae, tol, tensor[0, :10, :10], pil_tensor[0, :10, :10])
)
def test_vflip(self):
script_vflip = torch.jit.script(F_t.vflip)
img_tensor = torch.randn(3, 16, 16)
img_tensor_clone = img_tensor.clone()
vflipped_img = F_t.vflip(img_tensor)
vflipped_img_again = F_t.vflip(vflipped_img)
self.assertEqual(vflipped_img.shape, img_tensor.shape)
self.assertTrue(torch.equal(img_tensor, vflipped_img_again))
self.assertTrue(torch.equal(img_tensor, img_tensor_clone))
# scriptable function test
vflipped_img_script = script_vflip(img_tensor)
self.assertTrue(torch.equal(vflipped_img, vflipped_img_script))
def test_hflip(self):
script_hflip = torch.jit.script(F_t.hflip)
img_tensor = torch.randn(3, 16, 16)
img_tensor_clone = img_tensor.clone()
hflipped_img = F_t.hflip(img_tensor)
hflipped_img_again = F_t.hflip(hflipped_img)
self.assertEqual(hflipped_img.shape, img_tensor.shape)
self.assertTrue(torch.equal(img_tensor, hflipped_img_again))
self.assertTrue(torch.equal(img_tensor, img_tensor_clone))
# scriptable function test
hflipped_img_script = script_hflip(img_tensor)
self.assertTrue(torch.equal(hflipped_img, hflipped_img_script))
def test_crop(self):
script_crop = torch.jit.script(F_t.crop)
img_tensor, pil_img = self._create_data(16, 18)
test_configs = [
(1, 2, 4, 5), # crop inside top-left corner
(2, 12, 3, 4), # crop inside top-right corner
(8, 3, 5, 6), # crop inside bottom-left corner
(8, 11, 4, 3), # crop inside bottom-right corner
]
for top, left, height, width in test_configs:
pil_img_cropped = F.crop(pil_img, top, left, height, width)
img_tensor_cropped = F.crop(img_tensor, top, left, height, width)
self.compareTensorToPIL(img_tensor_cropped, pil_img_cropped)
img_tensor_cropped = script_crop(img_tensor, top, left, height, width)
self.compareTensorToPIL(img_tensor_cropped, pil_img_cropped)
def test_hsv2rgb(self):
shape = (3, 100, 150)
for _ in range(20):
img = torch.rand(*shape, dtype=torch.float)
ft_img = F_t._hsv2rgb(img).permute(1, 2, 0).flatten(0, 1)
h, s, v, = img.unbind(0)
h = h.flatten().numpy()
s = s.flatten().numpy()
v = v.flatten().numpy()
rgb = []
for h1, s1, v1 in zip(h, s, v):
rgb.append(colorsys.hsv_to_rgb(h1, s1, v1))
colorsys_img = torch.tensor(rgb, dtype=torch.float32)
max_diff = (ft_img - colorsys_img).abs().max()
self.assertLess(max_diff, 1e-5)
def test_rgb2hsv(self):
shape = (3, 150, 100)
for _ in range(20):
img = torch.rand(*shape, dtype=torch.float)
ft_hsv_img = F_t._rgb2hsv(img).permute(1, 2, 0).flatten(0, 1)
r, g, b, = img.unbind(0)
r = r.flatten().numpy()
g = g.flatten().numpy()
b = b.flatten().numpy()
hsv = []
for r1, g1, b1 in zip(r, g, b):
hsv.append(colorsys.rgb_to_hsv(r1, g1, b1))
colorsys_img = torch.tensor(hsv, dtype=torch.float32)
ft_hsv_img_h, ft_hsv_img_sv = torch.split(ft_hsv_img, [1, 2], dim=1)
colorsys_img_h, colorsys_img_sv = torch.split(colorsys_img, [1, 2], dim=1)
max_diff_h = ((colorsys_img_h * 2 * math.pi).sin() - (ft_hsv_img_h * 2 * math.pi).sin()).abs().max()
max_diff_sv = (colorsys_img_sv - ft_hsv_img_sv).abs().max()
max_diff = max(max_diff_h, max_diff_sv)
self.assertLess(max_diff, 1e-5)
def test_adjustments(self):
script_adjust_brightness = torch.jit.script(F_t.adjust_brightness)
script_adjust_contrast = torch.jit.script(F_t.adjust_contrast)
script_adjust_saturation = torch.jit.script(F_t.adjust_saturation)
fns = ((F.adjust_brightness, F_t.adjust_brightness, script_adjust_brightness),
(F.adjust_contrast, F_t.adjust_contrast, script_adjust_contrast),
(F.adjust_saturation, F_t.adjust_saturation, script_adjust_saturation))
for _ in range(20):
channels = 3
dims = torch.randint(1, 50, (2,))
shape = (channels, dims[0], dims[1])
if torch.randint(0, 2, (1,)) == 0:
img = torch.rand(*shape, dtype=torch.float)
else:
img = torch.randint(0, 256, shape, dtype=torch.uint8)
factor = 3 * torch.rand(1)
img_clone = img.clone()
for f, ft, sft in fns:
ft_img = ft(img, factor)
sft_img = sft(img, factor)
if not img.dtype.is_floating_point:
ft_img = ft_img.to(torch.float) / 255
sft_img = sft_img.to(torch.float) / 255
img_pil = transforms.ToPILImage()(img)
f_img_pil = f(img_pil, factor)
f_img = transforms.ToTensor()(f_img_pil)
# F uses uint8 and F_t uses float, so there is a small
# difference in values caused by (at most 5) truncations.
max_diff = (ft_img - f_img).abs().max()
max_diff_scripted = (sft_img - f_img).abs().max()
self.assertLess(max_diff, 5 / 255 + 1e-5)
self.assertLess(max_diff_scripted, 5 / 255 + 1e-5)
self.assertTrue(torch.equal(img, img_clone))
# test for class interface
f = transforms.ColorJitter(brightness=factor.item())
scripted_fn = torch.jit.script(f)
scripted_fn(img)
f = transforms.ColorJitter(contrast=factor.item())
scripted_fn = torch.jit.script(f)
scripted_fn(img)
f = transforms.ColorJitter(saturation=factor.item())
scripted_fn = torch.jit.script(f)
scripted_fn(img)
f = transforms.ColorJitter(brightness=1)
scripted_fn = torch.jit.script(f)
scripted_fn(img)
def test_rgb_to_grayscale(self):
script_rgb_to_grayscale = torch.jit.script(F_t.rgb_to_grayscale)
img_tensor = torch.randint(0, 255, (3, 16, 16), dtype=torch.uint8)
img_tensor_clone = img_tensor.clone()
grayscale_tensor = F_t.rgb_to_grayscale(img_tensor).to(int)
grayscale_pil_img = torch.tensor(np.array(F.to_grayscale(F.to_pil_image(img_tensor)))).to(int)
max_diff = (grayscale_tensor - grayscale_pil_img).abs().max()
self.assertLess(max_diff, 1.0001)
self.assertTrue(torch.equal(img_tensor, img_tensor_clone))
# scriptable function test
grayscale_script = script_rgb_to_grayscale(img_tensor).to(int)
self.assertTrue(torch.equal(grayscale_script, grayscale_tensor))
def test_center_crop(self):
script_center_crop = torch.jit.script(F.center_crop)
img_tensor, pil_img = self._create_data(32, 34)
cropped_pil_image = F.center_crop(pil_img, [10, 11])
cropped_tensor = F.center_crop(img_tensor, [10, 11])
self.compareTensorToPIL(cropped_tensor, cropped_pil_image)
cropped_tensor = script_center_crop(img_tensor, [10, 11])
self.compareTensorToPIL(cropped_tensor, cropped_pil_image)
def test_five_crop(self):
script_five_crop = torch.jit.script(F.five_crop)
img_tensor, pil_img = self._create_data(32, 34)
cropped_pil_images = F.five_crop(pil_img, [10, 11])
cropped_tensors = F.five_crop(img_tensor, [10, 11])
for i in range(5):
self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i])
cropped_tensors = script_five_crop(img_tensor, [10, 11])
for i in range(5):
self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i])
def test_ten_crop(self):
script_ten_crop = torch.jit.script(F.ten_crop)
img_tensor, pil_img = self._create_data(32, 34)
cropped_pil_images = F.ten_crop(pil_img, [10, 11])
cropped_tensors = F.ten_crop(img_tensor, [10, 11])
for i in range(10):
self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i])
cropped_tensors = script_ten_crop(img_tensor, [10, 11])
for i in range(10):
self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i])
def test_pad(self):
script_fn = torch.jit.script(F_t.pad)
tensor, pil_img = self._create_data(7, 8)
for dt in [None, torch.float32, torch.float64]:
if dt is not None:
# This is a trivial cast to float of uint8 data to test all cases
tensor = tensor.to(dt)
for pad in [2, [3, ], [0, 3], (3, 3), [4, 2, 4, 3]]:
configs = [
{"padding_mode": "constant", "fill": 0},
{"padding_mode": "constant", "fill": 10},
{"padding_mode": "constant", "fill": 20},
{"padding_mode": "edge"},
{"padding_mode": "reflect"},
{"padding_mode": "symmetric"},
]
for kwargs in configs:
pad_tensor = F_t.pad(tensor, pad, **kwargs)
pad_pil_img = F_pil.pad(pil_img, pad, **kwargs)
pad_tensor_8b = pad_tensor
# we need to cast to uint8 to compare with PIL image
if pad_tensor_8b.dtype != torch.uint8:
pad_tensor_8b = pad_tensor_8b.to(torch.uint8)
self.compareTensorToPIL(pad_tensor_8b, pad_pil_img, msg="{}, {}".format(pad, kwargs))
if isinstance(pad, int):
script_pad = [pad, ]
else:
script_pad = pad
pad_tensor_script = script_fn(tensor, script_pad, **kwargs)
self.assertTrue(pad_tensor.equal(pad_tensor_script), msg="{}, {}".format(pad, kwargs))
with self.assertRaises(ValueError, msg="Padding can not be negative for symmetric padding_mode"):
F_t.pad(tensor, (-2, -3), padding_mode="symmetric")
def test_adjust_gamma(self):
script_fn = torch.jit.script(F_t.adjust_gamma)
tensor, pil_img = self._create_data(26, 36)
for dt in [torch.float64, torch.float32, None]:
if dt is not None:
tensor = F.convert_image_dtype(tensor, dt)
gammas = [0.8, 1.0, 1.2]
gains = [0.7, 1.0, 1.3]
for gamma, gain in zip(gammas, gains):
adjusted_tensor = F_t.adjust_gamma(tensor, gamma, gain)
adjusted_pil = F_pil.adjust_gamma(pil_img, gamma, gain)
scripted_result = script_fn(tensor, gamma, gain)
self.assertEqual(adjusted_tensor.dtype, scripted_result.dtype)
self.assertEqual(adjusted_tensor.size()[1:], adjusted_pil.size[::-1])
rbg_tensor = adjusted_tensor
if adjusted_tensor.dtype != torch.uint8:
rbg_tensor = F.convert_image_dtype(adjusted_tensor, torch.uint8)
self.compareTensorToPIL(rbg_tensor, adjusted_pil)
self.assertTrue(adjusted_tensor.equal(scripted_result))
def test_resize(self):
script_fn = torch.jit.script(F_t.resize)
tensor, pil_img = self._create_data(26, 36)
for dt in [None, torch.float32, torch.float64]:
if dt is not None:
# This is a trivial cast to float of uint8 data to test all cases
tensor = tensor.to(dt)
for size in [32, 26, [32, ], [32, 32], (32, 32), [26, 35]]:
for interpolation in [BILINEAR, BICUBIC, NEAREST]:
resized_tensor = F_t.resize(tensor, size=size, interpolation=interpolation)
resized_pil_img = F_pil.resize(pil_img, size=size, interpolation=interpolation)
self.assertEqual(
resized_tensor.size()[1:], resized_pil_img.size[::-1], msg="{}, {}".format(size, interpolation)
)
if interpolation != NEAREST:
# We can not check values if mode = NEAREST, as results are different
# E.g. resized_tensor = [[a, a, b, c, d, d, e, ...]]
# E.g. resized_pil_img = [[a, b, c, c, d, e, f, ...]]
resized_tensor_f = resized_tensor
# we need to cast to uint8 to compare with PIL image
if resized_tensor_f.dtype == torch.uint8:
resized_tensor_f = resized_tensor_f.to(torch.float)
# Pay attention to high tolerance for MAE
self.approxEqualTensorToPIL(
resized_tensor_f, resized_pil_img, tol=8.0, msg="{}, {}".format(size, interpolation)
)
if isinstance(size, int):
script_size = [size, ]
else:
script_size = size
resize_result = script_fn(tensor, size=script_size, interpolation=interpolation)
self.assertTrue(resized_tensor.equal(resize_result), msg="{}, {}".format(size, interpolation))
def test_resized_crop(self):
# test values of F.resized_crop in several cases:
# 1) resize to the same size, crop to the same size => should be identity
tensor, _ = self._create_data(26, 36)
for i in [0, 2, 3]:
out_tensor = F.resized_crop(tensor, top=0, left=0, height=26, width=36, size=[26, 36], interpolation=i)
self.assertTrue(tensor.equal(out_tensor), msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5]))
# 2) resize by half and crop a TL corner
tensor, _ = self._create_data(26, 36)
out_tensor = F.resized_crop(tensor, top=0, left=0, height=20, width=30, size=[10, 15], interpolation=0)
expected_out_tensor = tensor[:, :20:2, :30:2]
self.assertTrue(
expected_out_tensor.equal(out_tensor),
msg="{} vs {}".format(expected_out_tensor[0, :10, :10], out_tensor[0, :10, :10])
)
def test_affine(self):
# Tests on square and rectangular images
scripted_affine = torch.jit.script(F.affine)
for tensor, pil_img in [self._create_data(26, 26), self._create_data(32, 26)]:
# 1) identity map
out_tensor = F.affine(tensor, angle=0, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], resample=0)
self.assertTrue(
tensor.equal(out_tensor), msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])
)
out_tensor = scripted_affine(tensor, angle=0, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], resample=0)
self.assertTrue(
tensor.equal(out_tensor), msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])
)
if pil_img.size[0] == pil_img.size[1]:
# 2) Test rotation
test_configs = [
(90, torch.rot90(tensor, k=1, dims=(-1, -2))),
(45, None),
(30, None),
(-30, None),
(-45, None),
(-90, torch.rot90(tensor, k=-1, dims=(-1, -2))),
(180, torch.rot90(tensor, k=2, dims=(-1, -2))),
]
for a, true_tensor in test_configs:
for fn in [F.affine, scripted_affine]:
out_tensor = fn(tensor, angle=a, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], resample=0)
if true_tensor is not None:
self.assertTrue(
true_tensor.equal(out_tensor),
msg="{}\n{} vs \n{}".format(a, out_tensor[0, :5, :5], true_tensor[0, :5, :5])
)
else:
true_tensor = out_tensor
out_pil_img = F.affine(
pil_img, angle=a, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], resample=0
)
out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))
num_diff_pixels = (true_tensor != out_pil_tensor).sum().item() / 3.0
ratio_diff_pixels = num_diff_pixels / true_tensor.shape[-1] / true_tensor.shape[-2]
# Tolerance : less than 6% of different pixels
self.assertLess(
ratio_diff_pixels,
0.06,
msg="{}\n{} vs \n{}".format(
ratio_diff_pixels, true_tensor[0, :7, :7], out_pil_tensor[0, :7, :7]
)
)
else:
test_configs = [
90, 45, 15, -30, -60, -120
]
for a in test_configs:
for fn in [F.affine, scripted_affine]:
out_tensor = fn(tensor, angle=a, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], resample=0)
out_pil_img = F.affine(
pil_img, angle=a, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], resample=0
)
out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))
num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
# Tolerance : less than 3% of different pixels
self.assertLess(
ratio_diff_pixels,
0.03,
msg="{}: {}\n{} vs \n{}".format(
a, ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7]
)
)
# 3) Test translation
test_configs = [
[10, 12], (-12, -13)
]
for t in test_configs:
for fn in [F.affine, scripted_affine]:
out_tensor = fn(tensor, angle=0, translate=t, scale=1.0, shear=[0.0, 0.0], resample=0)
out_pil_img = F.affine(pil_img, angle=0, translate=t, scale=1.0, shear=[0.0, 0.0], resample=0)
self.compareTensorToPIL(out_tensor, out_pil_img)
# 3) Test rotation + translation + scale + share
test_configs = [
(45, [5, 6], 1.0, [0.0, 0.0]),
(33, (5, -4), 1.0, [0.0, 0.0]),
(45, [-5, 4], 1.2, [0.0, 0.0]),
(33, (-4, -8), 2.0, [0.0, 0.0]),
(85, (10, -10), 0.7, [0.0, 0.0]),
(0, [0, 0], 1.0, [35.0, ]),
(-25, [0, 0], 1.2, [0.0, 15.0]),
(-45, [-10, 0], 0.7, [2.0, 5.0]),
(-45, [-10, -10], 1.2, [4.0, 5.0]),
(-90, [0, 0], 1.0, [0.0, 0.0]),
]
for r in [0, ]:
for a, t, s, sh in test_configs:
out_pil_img = F.affine(pil_img, angle=a, translate=t, scale=s, shear=sh, resample=r)
out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))
for fn in [F.affine, scripted_affine]:
out_tensor = fn(tensor, angle=a, translate=t, scale=s, shear=sh, resample=r)
num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
# Tolerance : less than 5% of different pixels
self.assertLess(
ratio_diff_pixels,
0.05,
msg="{}: {}\n{} vs \n{}".format(
(r, a, t, s, sh), ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7]
)
)
def test_rotate(self):
# Tests on square image
scripted_rotate = torch.jit.script(F.rotate)
for tensor, pil_img in [self._create_data(26, 26), self._create_data(32, 26)]:
img_size = pil_img.size
centers = [
None,
(int(img_size[0] * 0.3), int(img_size[0] * 0.4)),
[int(img_size[0] * 0.5), int(img_size[0] * 0.6)]
]
for r in [0, ]:
for a in range(-180, 180, 17):
for e in [True, False]:
for c in centers:
out_pil_img = F.rotate(pil_img, angle=a, resample=r, expand=e, center=c)
out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))
for fn in [F.rotate, scripted_rotate]:
out_tensor = fn(tensor, angle=a, resample=r, expand=e, center=c)
self.assertEqual(
out_tensor.shape,
out_pil_tensor.shape,
msg="{}: {} vs {}".format(
(img_size, r, a, e, c), out_tensor.shape, out_pil_tensor.shape
)
)
num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
# Tolerance : less than 2% of different pixels
self.assertLess(
ratio_diff_pixels,
0.02,
msg="{}: {}\n{} vs \n{}".format(
(img_size, r, a, e, c),
ratio_diff_pixels,
out_tensor[0, :7, :7],
out_pil_tensor[0, :7, :7]
)
)
def test_perspective(self):
from torchvision.transforms import RandomPerspective
for tensor, pil_img in [self._create_data(26, 34), self._create_data(26, 26)]:
scripted_tranform = torch.jit.script(F.perspective)
test_configs = [
[[[0, 0], [33, 0], [33, 25], [0, 25]], [[3, 2], [32, 3], [30, 24], [2, 25]]],
[[[3, 2], [32, 3], [30, 24], [2, 25]], [[0, 0], [33, 0], [33, 25], [0, 25]]],
[[[3, 2], [32, 3], [30, 24], [2, 25]], [[5, 5], [30, 3], [33, 19], [4, 25]]],
]
n = 10
test_configs += [
RandomPerspective.get_params(pil_img.size[0], pil_img.size[1], i / n) for i in range(n)
]
for r in [0, ]:
for spoints, epoints in test_configs:
out_pil_img = F.perspective(pil_img, startpoints=spoints, endpoints=epoints, interpolation=r)
out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))
for fn in [F.perspective, scripted_tranform]:
out_tensor = fn(tensor, startpoints=spoints, endpoints=epoints, interpolation=r)
num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
# Tolerance : less than 5% of different pixels
self.assertLess(
ratio_diff_pixels,
0.05,
msg="{}: {}\n{} vs \n{}".format(
(r, spoints, epoints),
ratio_diff_pixels,
out_tensor[0, :7, :7],
out_pil_tensor[0, :7, :7]
)
)
if __name__ == '__main__':
unittest.main()
| 45.86819
| 119
| 0.523123
|
88999df5a26e49f3577699f660527ea3613b70bf
| 5,367
|
py
|
Python
|
tools/pubsub2inbox/helpers/grant-gsuite-role.py
|
ruchirjain86/professional-services
|
739ac0f5ffc8237f750804fa9f0f14d4d918a0fa
|
[
"Apache-2.0"
] | 3
|
2019-03-22T12:55:55.000Z
|
2020-09-23T11:41:07.000Z
|
tools/pubsub2inbox/helpers/grant-gsuite-role.py
|
ruchirjain86/professional-services
|
739ac0f5ffc8237f750804fa9f0f14d4d918a0fa
|
[
"Apache-2.0"
] | 7
|
2021-08-18T19:14:12.000Z
|
2022-03-27T07:20:02.000Z
|
tools/pubsub2inbox/helpers/grant-gsuite-role.py
|
ruchirjain86/professional-services
|
739ac0f5ffc8237f750804fa9f0f14d4d918a0fa
|
[
"Apache-2.0"
] | 2
|
2019-03-22T12:55:59.000Z
|
2019-10-24T20:21:55.000Z
|
#!/usr/bin/env python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
import argparse
import pickle
from googleapiclient import discovery, errors
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
SCOPES = [
'https://www.googleapis.com/auth/iam',
'https://www.googleapis.com/auth/admin.directory.rolemanagement'
]
parser = argparse.ArgumentParser(
description='Tool to grant G Suite admin roles to a service account.')
parser.add_argument('--role',
default='Groups Administrator',
help='Role to grant.')
parser.add_argument('--project',
default='',
help='Set the project where the service account is.')
parser.add_argument('--credentials',
default='credentials.json',
help='An OAuth2 client secrets file in JSON.')
parser.add_argument(
'customer_id',
help='Customer ID from admin.google.com > Account > Account settings')
parser.add_argument('service_account', help='Service account email.')
args = parser.parse_args()
if not os.path.exists(args.credentials):
print(
'You\'ll need to create an OAuth2 client ID and secret for this application.',
file=sys.stderr)
print(
'In Cloud Console, go to APIs & Services > Credentials > Create Credential, ',
file=sys.stderr)
print('Then click on Create credentials and select OAuth client ID.\n',
file=sys.stderr)
print(
'Select Desktop app and type application name (eg. "Workspace Role Granter")',
file=sys.stderr)
print(
'Click Ok and find the application the list and click the download button.\n',
file=sys.stderr)
print(
'Place the file in current directory named as credentials.json\nor specify path with --credentials argument.',
file=sys.stderr)
sys.exit(1)
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
credentials = None
PICKLE_FILE = 'grant-gsuite-role.pickle'
if os.path.exists(PICKLE_FILE):
with open(PICKLE_FILE, 'rb') as token:
credentials = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not credentials or not credentials.valid:
if credentials and credentials.expired and credentials.refresh_token:
credentials.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
args.credentials, SCOPES)
credentials = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(PICKLE_FILE, 'wb') as token:
pickle.dump(credentials, token)
if not args.project:
p = re.compile('(@)(.+?)(\.iam\.gserviceaccount.com)')
m = p.search(args.service_account)
if m:
args.project = m.group(2)
admin_service = discovery.build('admin',
'directory_v1',
credentials=credentials)
request = admin_service.roles().list(customer=args.customer_id)
response = request.execute()
roleId = None
while request:
for role in response['items']:
if role['roleName'] == args.role or role['roleDescription'] == args.role:
roleId = role['roleId']
break
request = admin_service.roles().list_next(request, response)
if request:
response = request.execute()
if not roleId:
print('Unable to find role "%s"!' % (args.role), file=sys.stderr)
sys.exit(1)
iam_service = discovery.build('iam', 'v1', credentials=credentials)
request = iam_service.projects().serviceAccounts().get(
name="projects/%s/serviceAccounts/%s" %
(args.project, args.service_account))
response = request.execute()
if not 'uniqueId' in response:
print('Unable to find service account "%s"!' % (args.service_account),
file=sys.stderr)
sys.exit(1)
uniqueId = response['uniqueId']
try:
admin_service.roleAssignments().insert(customer=args.customer_id,
body={
'assignedTo': uniqueId,
'roleId': roleId,
'scopeType': 'CUSTOMER'
}).execute()
print('Service account "%s" (%s) has been granted role %s!' %
(args.service_account, uniqueId, roleId),
file=sys.stderr)
except errors.HttpError as exc:
if 'HttpError 500' in str(exc):
print(
'Received error 500, which probably means service account already has permissions.',
file=sys.stderr)
else:
raise exc
| 38.891304
| 118
| 0.647289
|
6a70540db41d25943aacfbef2e7e1c5391cb006b
| 9,113
|
py
|
Python
|
karbor/tests/unit/protection/test_resource_flow.py
|
Hybrid-Cloud/hybrid-smaug
|
a50b4fefb1677134aa8206724342fdff6e3b058b
|
[
"Apache-2.0"
] | null | null | null |
karbor/tests/unit/protection/test_resource_flow.py
|
Hybrid-Cloud/hybrid-smaug
|
a50b4fefb1677134aa8206724342fdff6e3b058b
|
[
"Apache-2.0"
] | null | null | null |
karbor/tests/unit/protection/test_resource_flow.py
|
Hybrid-Cloud/hybrid-smaug
|
a50b4fefb1677134aa8206724342fdff6e3b058b
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from functools import partial
import mock
from karbor.common import constants
from karbor.resource import Resource
from karbor.services.protection.flows.workflow import TaskFlowEngine
from karbor.services.protection import graph
from karbor.services.protection import resource_flow
from karbor.services.protection import restore_heat
from karbor.tests import base
from karbor.tests.unit.protection import fakes
from oslo_config import cfg
CONF = cfg.CONF
(
parent_type,
child_type,
grandchild_type,
) = fakes.FakeProtectionPlugin.SUPPORTED_RESOURCES
parent = Resource(id='A1', name='parent', type=parent_type)
child = Resource(id='B1', name='child', type=child_type)
grandchild = Resource(id='C1', name='grandchild', type=grandchild_type)
class ResourceFlowTest(base.TestCase):
def setUp(self):
super(ResourceFlowTest, self).setUp()
self.resource_graph = {
parent: [child],
child: [grandchild],
grandchild: [],
}
self.provider = fakes.FakeProvider()
self.test_graph = graph.build_graph([parent],
self.resource_graph.__getitem__)
self.taskflow_engine = TaskFlowEngine()
def _walk_operation(self, protection, operation_type,
checkpoint='checkpoint', parameters={}, context=None,
**kwargs):
plugin_map = {
parent_type: protection,
child_type: protection,
grandchild_type: protection,
}
flow = resource_flow.build_resource_flow(operation_type,
context,
self.taskflow_engine,
plugin_map,
self.test_graph,
parameters)
store = {
'checkpoint': checkpoint
}
store.update(kwargs)
engine = self.taskflow_engine.get_engine(flow,
engine='parallel',
store=store)
self.taskflow_engine.run_engine(engine)
@mock.patch('karbor.tests.unit.protection.fakes.FakeProtectionPlugin')
def test_resource_no_impl(self, mock_protection):
for operation in constants.OPERATION_TYPES:
kwargs = {}
if operation == constants.OPERATION_RESTORE:
kwargs['heat_template'] = restore_heat.HeatTemplate()
kwargs['restore'] = None
self._walk_operation(mock_protection, operation, **kwargs)
@mock.patch('karbor.tests.unit.protection.fakes.FakeProtectionPlugin')
def test_resource_flow_callbacks(self, mock_protection):
for operation in constants.OPERATION_TYPES:
mock_operation = fakes.MockOperation()
get_operation_attr = 'get_{}_operation'.format(operation)
getattr(
mock_protection,
get_operation_attr
).return_value = mock_operation
kwargs = {}
if operation == constants.OPERATION_RESTORE:
kwargs['heat_template'] = restore_heat.HeatTemplate()
kwargs['restore'] = None
self._walk_operation(mock_protection, operation, **kwargs)
self.assertEqual(mock_operation.on_prepare_begin.call_count,
len(self.resource_graph))
self.assertEqual(mock_operation.on_prepare_finish.call_count,
len(self.resource_graph))
self.assertEqual(mock_operation.on_main.call_count,
len(self.resource_graph))
self.assertEqual(mock_operation.on_complete.call_count,
len(self.resource_graph))
@mock.patch('karbor.tests.unit.protection.fakes.FakeProtectionPlugin')
def test_resource_flow_parameters(self, mock_protection):
resource_a1_id = "{}#{}".format(parent_type, 'A1')
resource_b1_id = "{}#{}".format(child_type, 'B1')
parameters = {
resource_a1_id: {'option1': 'value1'},
resource_b1_id: {'option2': 'value2', 'option3': 'value3'},
parent_type: {'option4': 'value4'},
child_type: {'option3': 'value5'}
}
for operation in constants.OPERATION_TYPES:
mock_operation = fakes.MockOperation()
get_operation_attr = 'get_{}_operation'.format(operation)
getattr(
mock_protection,
get_operation_attr
).return_value = mock_operation
kwargs = {
'checkpoint': 'A',
'context': 'B',
}
if operation == constants.OPERATION_RESTORE:
kwargs['heat_template'] = restore_heat.HeatTemplate()
kwargs['restore'] = None
self._walk_operation(mock_protection, operation,
parameters=parameters, **kwargs)
for resource in self.resource_graph:
resource_params = parameters.get(resource.type, {})
resource_id = '{}#{}'.format(resource.type, resource.id)
resource_params.update(parameters.get(resource_id, {}))
mock_operation.on_prepare_begin.assert_any_call(
resource=resource,
parameters=resource_params,
**kwargs)
mock_operation.on_prepare_finish.assert_any_call(
resource=resource,
parameters=resource_params,
**kwargs)
mock_operation.on_main.assert_any_call(
resource=resource,
parameters=resource_params,
**kwargs)
mock_operation.on_complete.assert_any_call(
resource=resource,
parameters=resource_params,
**kwargs)
@mock.patch('karbor.tests.unit.protection.fakes.FakeProtectionPlugin')
def test_resource_flow_order(self, mock_protection):
def test_order(order_list, hook_type, resource, *args, **kwargs):
order_list.append((hook_type, resource.id))
operation = constants.OPERATION_PROTECT
mock_operation = fakes.MockOperation()
get_operation_attr = 'get_{}_operation'.format(operation)
getattr(
mock_protection,
get_operation_attr
).return_value = mock_operation
order_list = []
mock_operation.on_prepare_begin = partial(test_order, order_list,
'pre_begin')
mock_operation.on_prepare_finish = partial(test_order, order_list,
'pre_finish')
mock_operation.on_main = partial(test_order, order_list, 'main')
mock_operation.on_complete = partial(test_order, order_list,
'complete')
self._walk_operation(mock_protection, operation)
self.assertLess(order_list.index(('pre_begin', parent.id)),
order_list.index(('pre_begin', child.id)))
self.assertLess(order_list.index(('pre_begin', child.id)),
order_list.index(('pre_begin', grandchild.id)))
self.assertGreater(order_list.index(('pre_finish', parent.id)),
order_list.index(('pre_finish', child.id)))
self.assertGreater(order_list.index(('pre_finish', child.id)),
order_list.index(('pre_finish', grandchild.id)))
self.assertGreater(order_list.index(('complete', parent.id)),
order_list.index(('complete', child.id)))
self.assertGreater(order_list.index(('complete', child.id)),
order_list.index(('complete', grandchild.id)))
for resource_id in (parent.id, child.id, grandchild.id):
self.assertLess(order_list.index(('pre_begin', resource_id)),
order_list.index(('pre_finish', resource_id)))
self.assertLess(order_list.index(('pre_finish', resource_id)),
order_list.index(('main', resource_id)))
self.assertLess(order_list.index(('main', resource_id)),
order_list.index(('complete', resource_id)))
| 43.189573
| 78
| 0.587293
|
354965bde44164268dd7b09a596453eab29201d4
| 252
|
py
|
Python
|
discounts/src/database/migration/config.py
|
dalmarcogd/mobstore
|
0b542b9267771a1f4522990d592028dc30ee246f
|
[
"Apache-2.0"
] | null | null | null |
discounts/src/database/migration/config.py
|
dalmarcogd/mobstore
|
0b542b9267771a1f4522990d592028dc30ee246f
|
[
"Apache-2.0"
] | null | null | null |
discounts/src/database/migration/config.py
|
dalmarcogd/mobstore
|
0b542b9267771a1f4522990d592028dc30ee246f
|
[
"Apache-2.0"
] | null | null | null |
from alembic.config import Config
from src.settings import BASE_DIR, DATABASE_URI
alembic_cfg = Config()
alembic_cfg.set_main_option("script_location", f"{BASE_DIR}/src/database/migration")
alembic_cfg.set_main_option("sqlalchemy.url", DATABASE_URI)
| 31.5
| 84
| 0.825397
|
3f4b0717a7b75f4c6c4f7feebe36531561085149
| 25,153
|
py
|
Python
|
silx/io/test/test_spech5.py
|
PiRK/silx
|
db6c1d2bdccfc6ec0811f2068dfbe9edefc38f20
|
[
"CC0-1.0"
] | null | null | null |
silx/io/test/test_spech5.py
|
PiRK/silx
|
db6c1d2bdccfc6ec0811f2068dfbe9edefc38f20
|
[
"CC0-1.0"
] | 1
|
2019-05-16T14:18:23.000Z
|
2019-05-16T14:18:23.000Z
|
silx/io/test/test_spech5.py
|
PiRK/silx
|
db6c1d2bdccfc6ec0811f2068dfbe9edefc38f20
|
[
"CC0-1.0"
] | 1
|
2022-01-24T16:19:27.000Z
|
2022-01-24T16:19:27.000Z
|
# coding: utf-8
# /*##########################################################################
# Copyright (C) 2016-2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ############################################################################*/
"""Tests for spech5"""
import gc
from numpy import array_equal
import os
import sys
import tempfile
import unittest
import datetime
from functools import partial
from ..spech5 import (SpecH5, SpecH5Group,
SpecH5Dataset, spec_date_to_iso8601)
try:
import h5py
except ImportError:
h5py = None
__authors__ = ["P. Knobel"]
__license__ = "MIT"
__date__ = "12/01/2017"
sftext = """#F /tmp/sf.dat
#E 1455180875
#D Thu Feb 11 09:54:35 2016
#C imaging User = opid17
#O0 Pslit HGap MRTSlit UP MRTSlit DOWN
#O1 Sslit1 VOff Sslit1 HOff Sslit1 VGap
#o0 pshg mrtu mrtd
#o2 ss1vo ss1ho ss1vg
#J0 Seconds IA ion.mono Current
#J1 xbpmc2 idgap1 Inorm
#S 1 ascan ss1vo -4.55687 -0.556875 40 0.2
#D Thu Feb 11 09:55:20 2016
#T 0.2 (Seconds)
#P0 180.005 -0.66875 0.87125
#P1 14.74255 16.197579 12.238283
#N 4
#L MRTSlit UP second column 3rd_col
-1.23 5.89 8
8.478100E+01 5 1.56
3.14 2.73 -3.14
1.2 2.3 3.4
#S 25 ascan c3th 1.33245 1.52245 40 0.15
#D Sat 2015/03/14 03:53:50
#P0 80.005 -1.66875 1.87125
#P1 4.74255 6.197579 2.238283
#N 5
#L column0 column1 col2 col3
0.0 0.1 0.2 0.3
1.0 1.1 1.2 1.3
2.0 2.1 2.2 2.3
3.0 3.1 3.2 3.3
#S 1 aaaaaa
#D Thu Feb 11 10:00:32 2016
#@MCADEV 1
#@MCA %16C
#@CHANN 3 0 2 1
#@CALIB 1 2 3
#@CTIME 123.4 234.5 345.6
#N 3
#L uno duo
1 2
@A 0 1 2
@A 10 9 8
@A 1 1 1.1
3 4
@A 3.1 4 5
@A 7 6 5
@A 1 1 1
5 6
@A 6 7.7 8
@A 4 3 2
@A 1 1 1
"""
class TestSpecDate(unittest.TestCase):
"""
Test of the spec_date_to_iso8601 function.
"""
# TODO : time zone tests
# TODO : error cases
@classmethod
def setUpClass(cls):
import locale
# FYI : not threadsafe
cls.locale_saved = locale.setlocale(locale.LC_TIME)
locale.setlocale(locale.LC_TIME, 'C')
@classmethod
def tearDownClass(cls):
import locale
# FYI : not threadsafe
locale.setlocale(locale.LC_TIME, cls.locale_saved)
def setUp(self):
# covering all week days
self.n_days = range(1, 10)
# covering all months
self.n_months = range(1, 13)
self.n_years = [1999, 2016, 2020]
self.n_seconds = [0, 5, 26, 59]
self.n_minutes = [0, 9, 42, 59]
self.n_hours = [0, 2, 17, 23]
self.formats = ['%a %b %d %H:%M:%S %Y', '%a %Y/%m/%d %H:%M:%S']
self.check_date_formats = partial(self.__check_date_formats,
year=self.n_years[0],
month=self.n_months[0],
day=self.n_days[0],
hour=self.n_hours[0],
minute=self.n_minutes[0],
second=self.n_seconds[0],
msg=None)
def __check_date_formats(self,
year,
month,
day,
hour,
minute,
second,
msg=None):
dt = datetime.datetime(year, month, day, hour, minute, second)
expected_date = dt.isoformat()
for i_fmt, fmt in enumerate(self.formats):
spec_date = dt.strftime(fmt)
iso_date = spec_date_to_iso8601(spec_date)
self.assertEqual(iso_date,
expected_date,
msg='Testing {0}. format={1}. '
'Expected "{2}", got "{3} ({4})" (dt={5}).'
''.format(msg,
i_fmt,
expected_date,
iso_date,
spec_date,
dt))
def testYearsNominal(self):
for year in self.n_years:
self.check_date_formats(year=year, msg='year')
def testMonthsNominal(self):
for month in self.n_months:
self.check_date_formats(month=month, msg='month')
def testDaysNominal(self):
for day in self.n_days:
self.check_date_formats(day=day, msg='day')
def testHoursNominal(self):
for hour in self.n_hours:
self.check_date_formats(hour=hour, msg='hour')
def testMinutesNominal(self):
for minute in self.n_minutes:
self.check_date_formats(minute=minute, msg='minute')
def testSecondsNominal(self):
for second in self.n_seconds:
self.check_date_formats(second=second, msg='second')
class TestSpecH5(unittest.TestCase):
@classmethod
def setUpClass(cls):
fd, cls.fname = tempfile.mkstemp()
if sys.version < '3.0':
os.write(fd, sftext)
else:
os.write(fd, bytes(sftext, 'ascii'))
os.close(fd)
@classmethod
def tearDownClass(cls):
os.unlink(cls.fname)
def setUp(self):
self.sfh5 = SpecH5(self.fname)
def tearDown(self):
# fix Win32 permission error when deleting temp file
del self.sfh5
gc.collect()
def testContainsFile(self):
self.assertIn("/1.2/measurement", self.sfh5)
self.assertIn("/25.1", self.sfh5)
self.assertIn("25.1", self.sfh5)
self.assertNotIn("25.2", self.sfh5)
# measurement is a child of a scan, full path would be required to
# access from root level
self.assertNotIn("measurement", self.sfh5)
# Groups may or may not have a trailing /
self.assertIn("/1.2/measurement/mca_1/", self.sfh5)
self.assertIn("/1.2/measurement/mca_1", self.sfh5)
# Datasets can't have a trailing /
self.assertNotIn("/1.2/measurement/mca_0/info/calibration/ ", self.sfh5)
# No mca_8
self.assertNotIn("/1.2/measurement/mca_8/info/calibration", self.sfh5)
# Link
self.assertIn("/1.2/measurement/mca_0/info/calibration", self.sfh5)
def testContainsGroup(self):
self.assertIn("measurement", self.sfh5["/1.2/"])
self.assertIn("measurement", self.sfh5["/1.2"])
self.assertIn("25.1", self.sfh5["/"])
self.assertNotIn("25.2", self.sfh5["/"])
self.assertIn("instrument/positioners/Sslit1 HOff", self.sfh5["/1.1"])
# illegal trailing "/" after dataset name
self.assertNotIn("instrument/positioners/Sslit1 HOff/",
self.sfh5["/1.1"])
# full path to element in group (OK)
self.assertIn("/1.1/instrument/positioners/Sslit1 HOff",
self.sfh5["/1.1/instrument"])
# full path to element outside group (illegal)
self.assertNotIn("/1.1/instrument/positioners/Sslit1 HOff",
self.sfh5["/1.1/measurement"])
def testDataColumn(self):
self.assertAlmostEqual(sum(self.sfh5["/1.2/measurement/duo"]),
12.0)
self.assertAlmostEqual(
sum(self.sfh5["1.1"]["measurement"]["MRTSlit UP"]),
87.891, places=4)
def testDate(self):
# start time is in Iso8601 format
self.assertEqual(self.sfh5["/1.1/start_time"],
b"2016-02-11T09:55:20")
self.assertEqual(self.sfh5["25.1/start_time"],
b"2015-03-14T03:53:50")
def testDatasetInstanceAttr(self):
"""The SpecH5Dataset objects must implement some dummy attributes
to improve compatibility with widgets dealing with h5py datasets."""
self.assertIsNone(self.sfh5["/1.1/start_time"].compression)
self.assertIsNone(self.sfh5["1.1"]["measurement"]["MRTSlit UP"].chunks)
# error message must be explicit
with self.assertRaisesRegexp(
AttributeError,
"SpecH5Dataset has no attribute tOTo"):
dummy = self.sfh5["/1.1/start_time"].tOTo
def testGet(self):
"""Test :meth:`SpecH5Group.get`"""
# default value of param *default* is None
self.assertIsNone(self.sfh5.get("toto"))
self.assertEqual(self.sfh5["25.1"].get("toto", default=-3),
-3)
self.assertEqual(self.sfh5.get("/1.1/start_time", default=-3),
b"2016-02-11T09:55:20")
def testGetClass(self):
"""Test :meth:`SpecH5Group.get`"""
if h5py is None:
self.skipTest("h5py is not available")
self.assertIs(self.sfh5["1.1"].get("start_time", getclass=True),
h5py.Dataset)
self.assertIs(self.sfh5["1.1"].get("instrument", getclass=True),
h5py.Group)
# spech5 does not define external link, so there is no way
# a group can *get* a SpecH5 class
def testGetItemGroup(self):
group = self.sfh5["25.1"]["instrument"]
self.assertEqual(group["positioners"].keys(),
["Pslit HGap", "MRTSlit UP", "MRTSlit DOWN",
"Sslit1 VOff", "Sslit1 HOff", "Sslit1 VGap"])
with self.assertRaises(KeyError):
group["Holy Grail"]
def testGetitemSpecH5(self):
self.assertEqual(self.sfh5["/1.2/instrument/positioners"],
self.sfh5["1.2"]["instrument"]["positioners"])
@unittest.skipIf(h5py is None, "test requires h5py (not installed)")
def testH5pyClass(self):
"""Test :attr:`h5py_class` returns the corresponding h5py class
(h5py.File, h5py.Group, h5py.Dataset)"""
a_file = self.sfh5
self.assertIs(a_file.h5py_class,
h5py.File)
a_group = self.sfh5["/1.2/measurement"]
self.assertIs(a_group.h5py_class,
h5py.Group)
a_dataset = self.sfh5["/1.1/instrument/positioners/Sslit1 HOff"]
self.assertIs(a_dataset.h5py_class,
h5py.Dataset)
def testHeader(self):
file_header = self.sfh5["/1.2/instrument/specfile/file_header"]
scan_header = self.sfh5["/1.2/instrument/specfile/scan_header"]
# convert ndarray(dtype=numpy.string_) to str
if sys.version < '3.0':
file_header = str(file_header[()])
scan_header = str(scan_header[()])
else:
file_header = str(file_header.astype(str))
scan_header = str(scan_header.astype(str))
# File header has 10 lines
self.assertEqual(len(file_header.split("\n")), 10)
# 1.2 has 9 scan & mca header lines
self.assertEqual(len(scan_header.split("\n")), 9)
# line 4 of file header
self.assertEqual(
file_header.split("\n")[3],
"#C imaging User = opid17")
# line 4 of scan header
scan_header = self.sfh5["25.1/instrument/specfile/scan_header"]
if sys.version < '3.0':
scan_header = str(scan_header[()])
else:
scan_header = str(scan_header[()].astype(str))
self.assertEqual(
scan_header.split("\n")[3],
"#P1 4.74255 6.197579 2.238283")
def testLinks(self):
self.assertTrue(
array_equal(self.sfh5["/1.2/measurement/mca_0/data"],
self.sfh5["/1.2/instrument/mca_0/data"])
)
self.assertTrue(
array_equal(self.sfh5["/1.2/measurement/mca_0/info/data"],
self.sfh5["/1.2/instrument/mca_0/data"])
)
self.assertTrue(
array_equal(self.sfh5["/1.2/measurement/mca_0/info/channels"],
self.sfh5["/1.2/instrument/mca_0/channels"])
)
self.assertEqual(self.sfh5["/1.2/measurement/mca_0/info/"].keys(),
self.sfh5["/1.2/instrument/mca_0/"].keys())
self.assertEqual(self.sfh5["/1.2/measurement/mca_0/info/preset_time"],
self.sfh5["/1.2/instrument/mca_0/preset_time"])
self.assertEqual(self.sfh5["/1.2/measurement/mca_0/info/live_time"],
self.sfh5["/1.2/instrument/mca_0/live_time"])
self.assertEqual(self.sfh5["/1.2/measurement/mca_0/info/elapsed_time"],
self.sfh5["/1.2/instrument/mca_0/elapsed_time"])
def testListScanIndices(self):
self.assertEqual(self.sfh5.keys(),
["1.1", "25.1", "1.2"])
self.assertEqual(self.sfh5["1.2"].attrs,
{"NX_class": "NXentry", })
def testMcaAbsent(self):
def access_absent_mca():
"""This must raise a KeyError, because scan 1.1 has no MCA"""
return self.sfh5["/1.1/measurement/mca_0/"]
self.assertRaises(KeyError, access_absent_mca)
def testMcaCalib(self):
mca0_calib = self.sfh5["/1.2/measurement/mca_0/info/calibration"]
mca1_calib = self.sfh5["/1.2/measurement/mca_1/info/calibration"]
self.assertEqual(mca0_calib.tolist(),
[1, 2, 3])
# calibration is unique in this scan and applies to all analysers
self.assertEqual(mca0_calib.tolist(),
mca1_calib.tolist())
def testMcaChannels(self):
mca0_chann = self.sfh5["/1.2/measurement/mca_0/info/channels"]
mca1_chann = self.sfh5["/1.2/measurement/mca_1/info/channels"]
self.assertEqual(mca0_chann.tolist(),
[0, 1, 2])
self.assertEqual(mca0_chann.tolist(),
mca1_chann.tolist())
def testMcaCtime(self):
"""Tests for #@CTIME mca header"""
datasets = ["preset_time", "live_time", "elapsed_time"]
for ds in datasets:
self.assertNotIn("/1.1/instrument/mca_0/" + ds, self.sfh5)
self.assertIn("/1.2/instrument/mca_0/" + ds, self.sfh5)
mca0_preset_time = self.sfh5["/1.2/instrument/mca_0/preset_time"]
mca1_preset_time = self.sfh5["/1.2/instrument/mca_1/preset_time"]
self.assertLess(mca0_preset_time - 123.4,
10**-5)
# ctime is unique in a this scan and applies to all analysers
self.assertEqual(mca0_preset_time,
mca1_preset_time)
mca0_live_time = self.sfh5["/1.2/instrument/mca_0/live_time"]
mca1_live_time = self.sfh5["/1.2/instrument/mca_1/live_time"]
self.assertLess(mca0_live_time - 234.5,
10**-5)
self.assertEqual(mca0_live_time,
mca1_live_time)
mca0_elapsed_time = self.sfh5["/1.2/instrument/mca_0/elapsed_time"]
mca1_elapsed_time = self.sfh5["/1.2/instrument/mca_1/elapsed_time"]
self.assertLess(mca0_elapsed_time - 345.6,
10**-5)
self.assertEqual(mca0_elapsed_time,
mca1_elapsed_time)
def testMcaData(self):
# sum 1st MCA in scan 1.2 over rows
mca_0_data = self.sfh5["/1.2/measurement/mca_0/data"]
for summed_row, expected in zip(mca_0_data.sum(axis=1).tolist(),
[3.0, 12.1, 21.7]):
self.assertAlmostEqual(summed_row, expected, places=4)
# sum 3rd MCA in scan 1.2 along both axis
mca_2_data = self.sfh5["1.2"]["measurement"]["mca_2"]["data"]
self.assertAlmostEqual(sum(sum(mca_2_data)), 9.1, places=5)
# attrs
self.assertEqual(mca_0_data.attrs, {"interpretation": "spectrum"})
def testMotorPosition(self):
positioners_group = self.sfh5["/1.1/instrument/positioners"]
# MRTSlit DOWN position is defined in #P0 san header line
self.assertAlmostEqual(float(positioners_group["MRTSlit DOWN"]),
0.87125)
# MRTSlit UP position is defined in first data column
for a, b in zip(positioners_group["MRTSlit UP"].tolist(),
[-1.23, 8.478100E+01, 3.14, 1.2]):
self.assertAlmostEqual(float(a), b, places=4)
def testNumberMcaAnalysers(self):
"""Scan 1.2 has 2 data columns + 3 mca spectra per data line."""
self.assertEqual(len(self.sfh5["1.2"]["measurement"]), 5)
def testTitle(self):
self.assertEqual(self.sfh5["/25.1/title"],
b"25 ascan c3th 1.33245 1.52245 40 0.15")
# visit and visititems ignore links
def testVisit(self):
name_list = []
self.sfh5.visit(name_list.append)
self.assertIn('/1.2/instrument/positioners/Pslit HGap', name_list)
self.assertIn("/1.2/instrument/specfile/scan_header", name_list)
self.assertEqual(len(name_list), 78)
def testVisitItems(self):
dataset_name_list = []
def func(name, obj):
if isinstance(obj, SpecH5Dataset):
dataset_name_list.append(name)
self.sfh5.visititems(func)
self.assertIn('/1.2/instrument/positioners/Pslit HGap', dataset_name_list)
self.assertEqual(len(dataset_name_list), 57)
def testNotSpecH5(self):
fd, fname = tempfile.mkstemp()
os.write(fd, b"Not a spec file!")
os.close(fd)
self.assertRaises(IOError, SpecH5, fname)
os.unlink(fname)
sftext_multi_mca_headers = """
#S 1 aaaaaa
#@MCA %16C
#@CHANN 3 0 2 1
#@CALIB 1 2 3
#@CTIME 123.4 234.5 345.6
#@MCA %16C
#@CHANN 3 1 3 1
#@CALIB 5.5 6.6 7.7
#@CTIME 10 11 12
#N 3
#L uno duo
1 2
@A 0 1 2
@A 10 9 8
3 4
@A 3.1 4 5
@A 7 6 5
5 6
@A 6 7.7 8
@A 4 3 2
"""
class TestSpecH5MultiMca(unittest.TestCase):
@classmethod
def setUpClass(cls):
fd, cls.fname = tempfile.mkstemp(text=False)
if sys.version < '3.0':
os.write(fd, sftext_multi_mca_headers)
else:
os.write(fd, bytes(sftext_multi_mca_headers, 'ascii'))
os.close(fd)
@classmethod
def tearDownClass(cls):
os.unlink(cls.fname)
def setUp(self):
self.sfh5 = SpecH5(self.fname)
def tearDown(self):
# fix Win32 permission error when deleting temp file
del self.sfh5
gc.collect()
def testMcaCalib(self):
mca0_calib = self.sfh5["/1.1/measurement/mca_0/info/calibration"]
mca1_calib = self.sfh5["/1.1/measurement/mca_1/info/calibration"]
self.assertEqual(mca0_calib.tolist(),
[1, 2, 3])
self.assertAlmostEqual(sum(mca1_calib.tolist()),
sum([5.5, 6.6, 7.7]),
places=5)
def testMcaChannels(self):
mca0_chann = self.sfh5["/1.1/measurement/mca_0/info/channels"]
mca1_chann = self.sfh5["/1.1/measurement/mca_1/info/channels"]
self.assertEqual(mca0_chann.tolist(),
[0., 1., 2.])
# @CHANN is unique in this scan and applies to all analysers
self.assertEqual(mca1_chann.tolist(),
[1., 2., 3.])
def testMcaCtime(self):
"""Tests for #@CTIME mca header"""
mca0_preset_time = self.sfh5["/1.1/instrument/mca_0/preset_time"]
mca1_preset_time = self.sfh5["/1.1/instrument/mca_1/preset_time"]
self.assertLess(mca0_preset_time - 123.4,
10**-5)
self.assertLess(mca1_preset_time - 10,
10**-5)
mca0_live_time = self.sfh5["/1.1/instrument/mca_0/live_time"]
mca1_live_time = self.sfh5["/1.1/instrument/mca_1/live_time"]
self.assertLess(mca0_live_time - 234.5,
10**-5)
self.assertLess(mca1_live_time - 11,
10**-5)
mca0_elapsed_time = self.sfh5["/1.1/instrument/mca_0/elapsed_time"]
mca1_elapsed_time = self.sfh5["/1.1/instrument/mca_1/elapsed_time"]
self.assertLess(mca0_elapsed_time - 345.6,
10**-5)
self.assertLess(mca1_elapsed_time - 12,
10**-5)
sftext_no_cols = r"""#F C:/DATA\test.mca
#D Thu Jul 7 08:40:19 2016
#S 1 31oct98.dat 22.1 If4
#D Thu Jul 7 08:40:19 2016
#C no data cols, one mca analyser, single spectrum
#@MCA %16C
#@CHANN 151 29 29 1
#@CALIB 0 2 0
@A 789 784 788 814 847 862 880 904 925 955 987 1015 1031 1070 1111 1139 \
1203 1236 1290 1392 1492 1558 1688 1813 1977 2119 2346 2699 3121 3542 4102 4970 \
6071 7611 10426 16188 28266 40348 50539 55555 56162 54162 47102 35718 24588 17034 12994 11444 \
11808 13461 15687 18885 23827 31578 41999 49556 58084 59415 59456 55698 44525 28219 17680 12881 \
9518 7415 6155 5246 4646 3978 3612 3299 3020 2761 2670 2472 2500 2310 2286 2106 \
1989 1890 1782 1655 1421 1293 1135 990 879 757 672 618 532 488 445 424 \
414 373 351 325 307 284 270 247 228 213 199 187 183 176 164 156 \
153 140 142 130 118 118 103 101 97 86 90 86 87 81 75 82 \
80 76 77 75 76 77 62 69 74 60 65 68 65 58 63 64 \
63 59 60 56 57 60 55
#S 2 31oct98.dat 22.1 If4
#D Thu Jul 7 08:40:19 2016
#C no data cols, one mca analyser, multiple spectra
#@MCA %16C
#@CHANN 3 0 2 1
#@CALIB 1 2 3
#@CTIME 123.4 234.5 345.6
@A 0 1 2
@A 10 9 8
@A 1 1 1.1
@A 3.1 4 5
@A 7 6 5
@A 1 1 1
@A 6 7.7 8
@A 4 3 2
@A 1 1 1
#S 3 31oct98.dat 22.1 If4
#D Thu Jul 7 08:40:19 2016
#C no data cols, 3 mca analysers, multiple spectra
#@MCADEV 1
#@MCA %16C
#@CHANN 3 0 2 1
#@CALIB 1 2 3
#@CTIME 123.4 234.5 345.6
#@MCADEV 2
#@MCA %16C
#@CHANN 3 0 2 1
#@CALIB 1 2 3
#@CTIME 123.4 234.5 345.6
#@MCADEV 3
#@MCA %16C
#@CHANN 3 0 2 1
#@CALIB 1 2 3
#@CTIME 123.4 234.5 345.6
@A 0 1 2
@A 10 9 8
@A 1 1 1.1
@A 3.1 4 5
@A 7 6 5
@A 1 1 1
@A 6 7.7 8
@A 4 3 2
@A 1 1 1
"""
class TestSpecH5NoDataCols(unittest.TestCase):
"""Test reading SPEC files with only MCA data"""
@classmethod
def setUpClass(cls):
fd, cls.fname = tempfile.mkstemp()
if sys.version < '3.0':
os.write(fd, sftext_no_cols)
else:
os.write(fd, bytes(sftext_no_cols, 'ascii'))
os.close(fd)
@classmethod
def tearDownClass(cls):
os.unlink(cls.fname)
def setUp(self):
self.sfh5 = SpecH5(self.fname)
def tearDown(self):
# fix Win32 permission error when deleting temp file
del self.sfh5
gc.collect()
def testScan1(self):
# 1.1: single analyser, single spectrum, 151 channels
self.assertIn("mca_0",
self.sfh5["1.1/instrument/"])
self.assertEqual(self.sfh5["1.1/instrument/mca_0/data"].shape,
(1, 151))
self.assertNotIn("mca_1",
self.sfh5["1.1/instrument/"])
def testScan2(self):
# 2.1: single analyser, 9 spectra, 3 channels
self.assertIn("mca_0",
self.sfh5["2.1/instrument/"])
self.assertEqual(self.sfh5["2.1/instrument/mca_0/data"].shape,
(9, 3))
self.assertNotIn("mca_1",
self.sfh5["2.1/instrument/"])
def testScan3(self):
# 3.1: 3 analysers, 3 spectra/analyser, 3 channels
for i in range(3):
self.assertIn("mca_%d" % i,
self.sfh5["3.1/instrument/"])
self.assertEqual(
self.sfh5["3.1/instrument/mca_%d/data" % i].shape,
(3, 3))
self.assertNotIn("mca_3",
self.sfh5["3.1/instrument/"])
def suite():
test_suite = unittest.TestSuite()
test_suite.addTest(
unittest.defaultTestLoader.loadTestsFromTestCase(TestSpecH5))
test_suite.addTest(
unittest.defaultTestLoader.loadTestsFromTestCase(TestSpecDate))
test_suite.addTest(
unittest.defaultTestLoader.loadTestsFromTestCase(TestSpecH5MultiMca))
test_suite.addTest(
unittest.defaultTestLoader.loadTestsFromTestCase(TestSpecH5NoDataCols))
return test_suite
if __name__ == '__main__':
unittest.main(defaultTest="suite")
| 34.789765
| 97
| 0.580885
|
daa85232c721e599e0c6aeef4c365e0e007ff16d
| 848
|
py
|
Python
|
models/models.py
|
cx921003/UPG-GAN
|
b2d2f2f2a36edb4b92495056d207ca10c3b8c621
|
[
"BSD-3-Clause"
] | 7
|
2019-06-10T13:46:34.000Z
|
2019-11-27T12:44:43.000Z
|
models/models.py
|
cx921003/UPG-GAN
|
b2d2f2f2a36edb4b92495056d207ca10c3b8c621
|
[
"BSD-3-Clause"
] | 1
|
2020-01-15T13:30:55.000Z
|
2021-02-23T18:48:25.000Z
|
models/models.py
|
xuchen-ethz/UPG-GAN
|
b2d2f2f2a36edb4b92495056d207ca10c3b8c621
|
[
"BSD-3-Clause"
] | null | null | null |
def create_model(opt):
model = None
print(opt.model)
if opt.model == 'ae_cycle_gan_all':
assert (opt.dataset_mode in ['unaligned','unaligned_with_label'])
from .ae_cycle_gan_model import AECycleGANModel
model = AECycleGANModel()
elif opt.model == 'vae_cycle_gan':
assert (opt.dataset_mode in ['unaligned','unaligned_with_label'])
from .vae_cycle_gan_model import VAECycleGANModel
model = VAECycleGANModel()
elif opt.model == 'ultimate':
assert (opt.dataset_mode in ['unaligned','unaligned_with_label'])
from .ultimate_model import VAECycleGANModelAll
model = VAECycleGANModelAll()
else:
raise ValueError("Model [%s] not recognized." % opt.model)
model.initialize(opt)
print("model [%s] was created" % (model.name()))
return model
| 36.869565
| 73
| 0.669811
|
a2de2103a640aecf9d6257c29cb2330e52afd9e9
| 2,170
|
py
|
Python
|
lisa/extern/sPickle/sPickle.py
|
mjirik/lisa
|
06c5cb8f375f51302341e768512f02236774c8a3
|
[
"BSD-3-Clause"
] | 22
|
2015-01-26T12:58:54.000Z
|
2021-04-15T17:48:13.000Z
|
lisa/extern/sPickle/sPickle.py
|
mjirik/lisa
|
06c5cb8f375f51302341e768512f02236774c8a3
|
[
"BSD-3-Clause"
] | 31
|
2015-01-23T14:46:13.000Z
|
2018-05-18T14:47:18.000Z
|
lisa/extern/sPickle/sPickle.py
|
mjirik/lisa
|
06c5cb8f375f51302341e768512f02236774c8a3
|
[
"BSD-3-Clause"
] | 13
|
2015-06-30T08:54:27.000Z
|
2020-09-11T16:08:19.000Z
|
"""Streaming pickle implementation for efficiently serializing and
de-serializing an iterable (e.g., list)
Created on 2010-06-19 by Philip Guo
http://code.google.com/p/streaming-pickle/
Modified by Brian Thorne 2013 to add base64 encoding to support
python3 bytearray and the like.
"""
import base64
from pickle import dumps, loads
import unittest
import tempfile
def s_dump(iterable_to_pickle, file_obj):
"""dump contents of an iterable iterable_to_pickle to file_obj, a file
opened in write mode"""
for elt in iterable_to_pickle:
s_dump_elt(elt, file_obj)
def s_dump_elt(elt_to_pickle, file_obj):
"""dumps one element to file_obj, a file opened in write mode"""
pickled_elt = dumps(elt_to_pickle)
encoded = base64.b64encode(pickled_elt)
file_obj.write(encoded)
# record separator is a blank line
# (since pickled_elt as base64 encoded cannot contain its own newlines)
file_obj.write(b'\n\n')
def s_load(file_obj):
"""load contents from file_obj, returning a generator that yields one
element at a time"""
cur_elt = []
for line in file_obj:
if line == b'\n':
encoded_elt = b''.join(cur_elt)
try:
pickled_elt = base64.b64decode(encoded_elt)
elt = loads(pickled_elt)
except EOFError:
print("EOF found while unpickling data")
print(pickled_elt)
raise StopIteration
cur_elt = []
yield elt
else:
cur_elt.append(line)
class TestStreamingPickle(unittest.TestCase):
def setUp(self):
pass
def testSimpleList(self):
data = [1, 2, 3, 4, None, b'test', '\n', '\x00', 3, b'\n\n\n\n', 5, 7, 9, 11, "hello", bytearray([2, 4, 4])]
with tempfile.TemporaryFile() as f:
s_dump(data, f)
# reset the temporary file
f.seek(0)
i = 0
for i, element in enumerate(s_load(f)):
self.assertEqual(data[i], element)
# print(i, element)
self.assertEqual(i, len(data)-1)
if __name__ == "__main__":
unittest.main()
| 29.726027
| 116
| 0.620276
|
adc967abff0718d3e01829f07a99ada02bd37766
| 2,166
|
py
|
Python
|
mangecko/utilities/library_scanner.py
|
jjsmall009/manga-volume-tracker
|
306157f95233d1f1c2adaa2e1a6f89048532c899
|
[
"BSD-3-Clause"
] | null | null | null |
mangecko/utilities/library_scanner.py
|
jjsmall009/manga-volume-tracker
|
306157f95233d1f1c2adaa2e1a6f89048532c899
|
[
"BSD-3-Clause"
] | null | null | null |
mangecko/utilities/library_scanner.py
|
jjsmall009/manga-volume-tracker
|
306157f95233d1f1c2adaa2e1a6f89048532c899
|
[
"BSD-3-Clause"
] | null | null | null |
# JJ Small
# Manga Volume Tracker
# library_scanner.py - Code to scan a directory and get a list of manga title/volume count
from pathlib import Path
class LibraryScanner():
"""
This is a class because it's easier to track the state of which folder in the directory is what.
"""
FILE_EXT: tuple = (".cbz", ".cbr", ".zip", ".pdf")
def __init__(self, path: Path) -> None:
self.path = path
self.valid_folders: dict[str, int] = {}
self.invalid_folders: list[str] = []
def scan_directory(self) -> None:
"""
Scans the specified folder and finds each manga title and how many volumes there are.
Notes:
Ignores standalone files and empty directories. I.e, folder with stuff in them.
"""
manga_folders: list[Path] = [folder for folder in self.path.iterdir() if folder.is_dir()]
for manga in manga_folders:
num_volumes = 0 #[vol for vol in manga.iterdir() if vol.suffix in self.FILE_EXT]
for vol in manga.iterdir():
if vol.suffix in self.FILE_EXT:
num_volumes += 1
if num_volumes > 0:
self.valid_folders[manga.name.split(" [", 1)[0]] = num_volumes
else:
self.invalid_folders.append(manga.name)
def print_valid(self) -> None:
"""Helper function to print out the valid folders"""
print("\n==================================================================")
print("Valid folders - These are the manga series found in this directory")
for title, volumes in self.valid_folders.items():
print(f"\t{title} - {volumes} volumes")
print(f"There are {len(self.valid_folders)} valid series")
def print_invalid(self) -> None:
"""Helper function to print out the invalid foldesr"""
print("\n=======================================================")
print("Invalid folders - Folders that don't meet the criteria")
for folder in self.invalid_folders:
print(f"\t{folder}")
print(f"There are {len(self.invalid_folders)} invalid series")
| 38.678571
| 100
| 0.574331
|
e3d6cb7cb73ee3dc70bbb33bb67da14b2b2ab63d
| 2,926
|
py
|
Python
|
tests/integration/controllers/test_notify_gateway.py
|
uk-gov-mirror/ONSdigital.ras-frontstage
|
4bf0f779f37d2a9ce7efe9f11c9b55a7a398faae
|
[
"MIT"
] | null | null | null |
tests/integration/controllers/test_notify_gateway.py
|
uk-gov-mirror/ONSdigital.ras-frontstage
|
4bf0f779f37d2a9ce7efe9f11c9b55a7a398faae
|
[
"MIT"
] | null | null | null |
tests/integration/controllers/test_notify_gateway.py
|
uk-gov-mirror/ONSdigital.ras-frontstage
|
4bf0f779f37d2a9ce7efe9f11c9b55a7a398faae
|
[
"MIT"
] | null | null | null |
import unittest
from config import TestingConfig
from frontstage import app
from frontstage.controllers.notify_controller import NotifyGateway
from frontstage.exceptions.exceptions import RasNotifyError
class TestNotifyController(unittest.TestCase):
"""Tests that the notify controller is working as expected"""
def setUp(self):
app.testing = True
app_config = TestingConfig()
app.config.from_object(app_config)
self.app = app.test_client()
self.app_config = self.app.application.config
self.email_form = {"email_address": "test@email.com"}
def test_request_to_notify_with_pubsub_no_personalisation(self):
"""Tests what is sent to pubsub when no personalisation is added"""
publisher = unittest.mock.MagicMock()
publisher.topic_path.return_value = 'projects/test-project-id/topics/ras-rm-notify-test'
# Given a mocked notify gateway
notify = NotifyGateway(self.app_config)
notify.publisher = publisher
result = notify.request_to_notify('test@email.com')
data = b'{"notify": {"email_address": "test@email.com", ' \
b'"template_id": "request_password_change_id"}}'
publisher.publish.assert_called()
publisher.publish.assert_called_with('projects/test-project-id/topics/ras-rm-notify-test', data=data)
self.assertIsNone(result)
def test_a_successful_send_with_personalisation(self):
"""Tests what is sent to pubsub when personalisation is added"""
publisher = unittest.mock.MagicMock()
publisher.topic_path.return_value = 'projects/test-project-id/topics/ras-rm-notify-test'
# Given a mocked notify gateway
notify = NotifyGateway(self.app_config)
notify.publisher = publisher
personalisation = {"first_name": "testy", "last_name": "surname"}
result = notify.request_to_notify('test@email.com', personalisation)
data = b'{"notify": {"email_address": "test@email.com", "template_id": "request_password_change_id",' \
b' "personalisation": {"first_name": "testy", "last_name": "surname"}}}'
publisher.publish.assert_called()
publisher.publish.assert_called_with('projects/test-project-id/topics/ras-rm-notify-test', data=data)
self.assertIsNone(result)
def test_request_to_notify_with_pubsub_timeout_error(self):
"""Tests if the future.result() raises a TimeoutError then the function raises a RasNotifyError"""
future = unittest.mock.MagicMock()
future.result.side_effect = TimeoutError("bad")
publisher = unittest.mock.MagicMock()
publisher.publish.return_value = future
# Given a mocked notify gateway
notify = NotifyGateway(self.app_config)
notify.publisher = publisher
with self.assertRaises(RasNotifyError):
notify.request_to_notify('test@email.com')
| 47.193548
| 111
| 0.698906
|
a9972d4ed4edcd150e11582abb591f4b1bcc9f2a
| 1,725
|
py
|
Python
|
src/probnum/randprocs/kernels/_exponentiated_quadratic.py
|
treid5/probnum
|
fabb51243d0952fbd35e542aeb5c2dc9a449ec81
|
[
"MIT"
] | 1
|
2021-04-16T14:45:26.000Z
|
2021-04-16T14:45:26.000Z
|
src/probnum/randprocs/kernels/_exponentiated_quadratic.py
|
simeoncarstens/probnum
|
b69587b07e2fffbdcd4c850acc98bb3de97a6e0b
|
[
"MIT"
] | 42
|
2021-03-08T07:20:40.000Z
|
2022-03-28T05:04:48.000Z
|
src/probnum/randprocs/kernels/_exponentiated_quadratic.py
|
JonathanWenger/probnum
|
1c5499883672cfa029c12045848ea04491c69e08
|
[
"MIT"
] | null | null | null |
"""Exponentiated quadratic kernel."""
from typing import Optional
import numpy as np
import probnum.utils as _utils
from probnum.typing import IntArgType, ScalarArgType
from ._kernel import IsotropicMixin, Kernel
class ExpQuad(Kernel, IsotropicMixin):
r"""Exponentiated quadratic / RBF kernel.
Covariance function defined by
.. math ::
k(x_0, x_1) = \exp \left( -\frac{\lVert x_0 - x_1 \rVert_2^2}{2 l^2} \right).
This kernel is also known as the squared
exponential or radial basis function kernel.
Parameters
----------
input_dim :
Input dimension of the kernel.
lengthscale
Lengthscale :math:`l` of the kernel. Describes the input scale on which the
process varies.
See Also
--------
RatQuad : Rational quadratic kernel.
Matern : Matern kernel.
Examples
--------
>>> import numpy as np
>>> from probnum.randprocs.kernels import ExpQuad
>>> K = ExpQuad(input_dim=1, lengthscale=0.1)
>>> xs = np.linspace(0, 1, 3)[:, None]
>>> K.matrix(xs)
array([[1.00000000e+00, 3.72665317e-06, 1.92874985e-22],
[3.72665317e-06, 1.00000000e+00, 3.72665317e-06],
[1.92874985e-22, 3.72665317e-06, 1.00000000e+00]])
"""
def __init__(self, input_dim: IntArgType, lengthscale: ScalarArgType = 1.0):
self.lengthscale = _utils.as_numpy_scalar(lengthscale)
super().__init__(input_dim=input_dim)
def _evaluate(self, x0: np.ndarray, x1: Optional[np.ndarray] = None) -> np.ndarray:
if x1 is None:
return np.ones_like(x0[..., 0])
return np.exp(
-self._squared_euclidean_distances(x0, x1) / (2.0 * self.lengthscale ** 2)
)
| 28.75
| 87
| 0.635362
|
a0468e17b0b29a01b067dd8000c79f7c8f4ee95d
| 1,632
|
py
|
Python
|
accounts/views.py
|
victorbrittoferreira/product_hunt
|
e304359a903da3c84bf342832e92666c514ec42a
|
[
"BSD-3-Clause"
] | null | null | null |
accounts/views.py
|
victorbrittoferreira/product_hunt
|
e304359a903da3c84bf342832e92666c514ec42a
|
[
"BSD-3-Clause"
] | null | null | null |
accounts/views.py
|
victorbrittoferreira/product_hunt
|
e304359a903da3c84bf342832e92666c514ec42a
|
[
"BSD-3-Clause"
] | null | null | null |
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.contrib import auth
# Create your views here.
# SIGN UP
def signup(request):
if request.method == 'POST':
#POST the info and wants an account now!
if request.POST['password1'] == request.POST['password2']:
try:
user = User.objects.get(username=request.POST['username'])
return render(request, 'accounts/signup.html',
{'error':'That username already exist'})
except User.DoesNotExist:
user = User.objects.create_user(request.POST['username'],
password=request.POST['password1'])
auth.login(request,user)
return redirect('home')
else:
return render(request, 'accounts/signup.html',
{'error':'Passwords must be equal'})
else:
# GET - enter
return render(request, 'accounts/signup.html')
#LOG IN
def login(request):
if request.method == 'POST':
user = auth.authenticate(username=request.POST['username'],
password=request.POST['password'])
if user is not None:
auth.login(request, user)
return redirect('home')
else:
return render(request, 'accounts/login.html',{'error':'username or password is incorrect'})
else:
return render(request, 'accounts/login.html')
#LOG OUT
def logout(request):
if request.method == 'POST':
auth.logout(request)
return redirect('home')
| 29.142857
| 103
| 0.584559
|
155e2252c8ee2c292f4c1bd41ce629d9571b2790
| 36
|
py
|
Python
|
python/matplotlib-tests/run_test.py
|
jaimergp/conda-recipes
|
b663b2edeb3381b57601af92db4b8074c6ab6681
|
[
"BSD-3-Clause"
] | 302
|
2015-01-04T18:21:56.000Z
|
2021-11-16T12:14:37.000Z
|
python/matplotlib-tests/run_test.py
|
jaimergp/conda-recipes
|
b663b2edeb3381b57601af92db4b8074c6ab6681
|
[
"BSD-3-Clause"
] | 393
|
2015-01-03T14:35:48.000Z
|
2019-12-09T15:09:07.000Z
|
python/matplotlib-tests/run_test.py
|
jaimergp/conda-recipes
|
b663b2edeb3381b57601af92db4b8074c6ab6681
|
[
"BSD-3-Clause"
] | 325
|
2015-01-04T17:26:39.000Z
|
2021-11-04T16:25:54.000Z
|
import matplotlib
matplotlib.test()
| 12
| 17
| 0.833333
|
3c857692d80b07d91c5d3422acf3be5dc3a3e37c
| 1,200
|
py
|
Python
|
bspider/agent/controller/data_source.py
|
littlebai3618/bspider
|
ff4d003cd0825247db4efe62db95f9245c0a303c
|
[
"BSD-3-Clause"
] | 3
|
2020-06-19T03:52:29.000Z
|
2021-05-21T05:50:46.000Z
|
bspider/agent/controller/data_source.py
|
littlebai3618/bspider
|
ff4d003cd0825247db4efe62db95f9245c0a303c
|
[
"BSD-3-Clause"
] | 2
|
2021-03-31T19:39:03.000Z
|
2021-05-12T02:10:26.000Z
|
bspider/agent/controller/data_source.py
|
littlebai3618/bspider
|
ff4d003cd0825247db4efe62db95f9245c0a303c
|
[
"BSD-3-Clause"
] | null | null | null |
"""
封装对cache中data_source信息的操作为api
"""
from flask import Blueprint
from bspider.core.api import auth
from .validators.data_source_form import AddForm, UpdateForm
from bspider.agent.service.data_source import DataSourceService
data_source = Blueprint('data_source_bp', __name__)
data_source_service = DataSourceService()
@data_source.route('/data_source', methods=['POST'])
@auth.login_required
def add_data_source():
form = AddForm()
return data_source_service.add_data_source(**form.to_dict())
@data_source.route('/data_source/<string:name>', methods=['PATCH'])
@auth.login_required
def update_data_source(name):
form = UpdateForm()
return data_source_service.update_data_source(name, form.to_dict())
@data_source.route('/data_source/<string:name>', methods=['DELETE'])
@auth.login_required
def delete_data_source(name):
return data_source_service.delete_data_source(name)
@data_source.route('/data_source', methods=['GET'])
@auth.login_required
def get_data_sources():
return data_source_service.get_data_sources()
@data_source.route('/data_source/<string:name>', methods=['GET'])
def get_data_source(name):
return data_source_service.get_data_source(name)
| 29.268293
| 71
| 0.780833
|
b0cb706376a538650edd7453f6bf6fb4e04d0fa0
| 789
|
py
|
Python
|
deps/mozjs/src/gdb/lib-for-tests/catcher.py
|
ktrzeciaknubisa/jxcore-binary-packaging
|
5759df084be10a259a4a4f1b38c214c6084a7c0f
|
[
"Apache-2.0"
] | 2,494
|
2015-02-11T04:34:13.000Z
|
2022-03-31T14:21:47.000Z
|
deps/mozjs/src/gdb/lib-for-tests/catcher.py
|
ktrzeciaknubisa/jxcore-binary-packaging
|
5759df084be10a259a4a4f1b38c214c6084a7c0f
|
[
"Apache-2.0"
] | 685
|
2015-02-11T17:14:26.000Z
|
2021-04-13T09:58:39.000Z
|
deps/mozjs/src/gdb/lib-for-tests/catcher.py
|
ktrzeciaknubisa/jxcore-binary-packaging
|
5759df084be10a259a4a4f1b38c214c6084a7c0f
|
[
"Apache-2.0"
] | 442
|
2015-02-12T13:45:46.000Z
|
2022-03-21T05:28:05.000Z
|
# Apparently, there's simply no way to ask GDB to exit with a non-zero
# status when the script run with the --eval-command option fails. Thus, if
# we have --eval-command run prolog.py directly, syntax errors there will
# lead GDB to exit with no indication anything went wrong.
#
# To avert that, we use this very small launcher script to run prolog.py
# and catch errors.
#
# Remember, errors in this file will cause spurious passes, so keep this as
# simple as possible!
import os
import sys
import traceback
try:
# testlibdir is set on the GDB command line, via:
# --eval-command python testlibdir=...
execfile(os.path.join(testlibdir, 'prolog.py'))
except Exception as err:
sys.stderr.write('Error running GDB prologue:\n')
traceback.print_exc()
sys.exit(1)
| 34.304348
| 75
| 0.731305
|
c179828e87d67c62a7992e482ca59c45b45a1279
| 1,621
|
py
|
Python
|
mitmproxy/contentviews/graphql.py
|
fedosgad/mitmproxy
|
7eacc41f3b1079e000cf6b6c19c0f337d6e01177
|
[
"MIT"
] | null | null | null |
mitmproxy/contentviews/graphql.py
|
fedosgad/mitmproxy
|
7eacc41f3b1079e000cf6b6c19c0f337d6e01177
|
[
"MIT"
] | null | null | null |
mitmproxy/contentviews/graphql.py
|
fedosgad/mitmproxy
|
7eacc41f3b1079e000cf6b6c19c0f337d6e01177
|
[
"MIT"
] | null | null | null |
import json
from typing import Any, Optional
from mitmproxy.contentviews import base
from mitmproxy.contentviews.json import parse_json, PARSE_ERROR
def format_graphql(data):
query = data["query"]
header_data = data.copy()
header_data["query"] = "..."
return """{header}
---
{query}
""".format(
header=json.dumps(header_data, indent=2), query=query
)
def format_query_list(data: list[Any]):
num_queries = len(data) - 1
result = ""
for i, op in enumerate(data):
result += f"--- {i}/{num_queries}\n"
result += format_graphql(op)
return result
def is_graphql_query(data):
return isinstance(data, dict) and "query" in data and "\n" in data["query"]
def is_graphql_batch_query(data):
return isinstance(data, list) and isinstance(data[0], dict) and "query" in data[0]
class ViewGraphQL(base.View):
name = "GraphQL"
def __call__(self, data, **metadata):
data = parse_json(data)
if data is not PARSE_ERROR:
if is_graphql_query(data):
return "GraphQL", base.format_text(format_graphql(data))
elif is_graphql_batch_query(data):
return "GraphQL", base.format_text(format_query_list(data))
def render_priority(
self, data: bytes, *, content_type: Optional[str] = None, **metadata
) -> float:
if content_type != "application/json" or not data:
return 0
data = parse_json(data)
if data is not PARSE_ERROR:
if is_graphql_query(data) or is_graphql_batch_query(data):
return 2
return 0
| 26.57377
| 86
| 0.63541
|
be4f438aab8ffde6c41977406d72a72672a60551
| 28,135
|
py
|
Python
|
mosfit/main.py
|
klukosiute/MOSFiT
|
4bc6c74f4b592e4f023aad69fd17fe95078d518d
|
[
"MIT"
] | null | null | null |
mosfit/main.py
|
klukosiute/MOSFiT
|
4bc6c74f4b592e4f023aad69fd17fe95078d518d
|
[
"MIT"
] | null | null | null |
mosfit/main.py
|
klukosiute/MOSFiT
|
4bc6c74f4b592e4f023aad69fd17fe95078d518d
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""The main function."""
import argparse
import codecs
import locale
import os
import shutil
import sys
import time
from operator import attrgetter
from unicodedata import normalize
import numpy as np
from astropy.time import Time as astrotime
from mosfit import __author__, __contributors__, __version__
from mosfit.fitter import Fitter
from mosfit.printer import Printer
from mosfit.utils import get_mosfit_hash, is_master, open_atomic, speak
from six import string_types
class SortingHelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
"""Sort argparse arguments by argument name."""
def add_arguments(self, actions):
"""Add sorting action based on `option_strings`."""
actions = sorted(actions, key=attrgetter('option_strings'))
super(SortingHelpFormatter, self).add_arguments(actions)
def get_parser(only=None, printer=None):
"""Retrieve MOSFiT's `argparse.ArgumentParser` object."""
prt = Printer() if printer is None else printer
parser = argparse.ArgumentParser(
prog='mosfit',
description='Fit astrophysical transients.',
formatter_class=SortingHelpFormatter,
add_help=only is None)
parser.add_argument(
'--language',
dest='language',
type=str,
const='select',
default='en',
nargs='?',
help=("Language for output text."))
if only == 'language':
return parser
parser.add_argument(
'--events',
'-e',
dest='events',
default=[],
nargs='+',
help=prt.text('parser_events'))
parser.add_argument(
'--models',
'-m',
dest='models',
default=[],
nargs='?',
help=prt.text('parser_models'))
parser.add_argument(
'--parameter-paths',
'-P',
dest='parameter_paths',
default=['parameters.json'],
nargs='+',
help=prt.text('parser_parameter_paths'))
parser.add_argument(
'--walker-paths',
'-w',
dest='walker_paths',
nargs='+',
help=prt.text('parser_walker_paths'))
parser.add_argument(
'--max-time',
dest='max_time',
type=float,
default=1000.,
help=prt.text('parser_max_time'))
parser.add_argument(
'--limiting-magnitude',
'-l',
dest='limiting_magnitude',
default=None,
nargs='+',
help=prt.text('parser_limiting_magnitude'))
parser.add_argument(
'--prefer-fluxes',
dest='prefer_fluxes',
default=False,
action='store_true',
help=prt.text('parser_prefer_fluxes'))
parser.add_argument(
'--time-list',
'--extra-times',
dest='time_list',
default=[],
nargs='+',
help=prt.text('parser_time_list'))
parser.add_argument(
'--extra-dates',
dest='date_list',
default=[],
nargs='+',
help=prt.text('parser_time_list'))
parser.add_argument(
'--extra-mjds',
dest='mjd_list',
default=[],
nargs='+',
help=prt.text('parser_time_list'))
parser.add_argument(
'--extra-jds',
dest='jd_list',
default=[],
nargs='+',
help=prt.text('parser_time_list'))
parser.add_argument(
'--extra-phases',
dest='phase_list',
default=[],
nargs='+',
help=prt.text('parser_time_list'))
parser.add_argument(
'--band-list',
'--extra-bands',
dest='band_list',
default=[],
nargs='+',
help=prt.text('parser_band_list'))
parser.add_argument(
'--band-systems',
'--extra-systems',
dest='band_systems',
default=[],
nargs='+',
help=prt.text('parser_band_systems'))
parser.add_argument(
'--band-instruments',
'--extra-instruments',
dest='band_instruments',
default=[],
nargs='+',
help=prt.text('parser_band_instruments'))
parser.add_argument(
'--band-bandsets',
'--extra-bandsets',
dest='band_bandsets',
default=[],
nargs='+',
help=prt.text('parser_band_bandsets'))
parser.add_argument(
'--band-sampling-points',
dest='band_sampling_points',
type=int,
default=17,
help=prt.text('parser_band_sampling_points'))
parser.add_argument(
'--exclude-bands',
dest='exclude_bands',
default=[],
nargs='+',
help=prt.text('parser_exclude_bands'))
parser.add_argument(
'--exclude-instruments',
dest='exclude_instruments',
default=[],
nargs='+',
help=prt.text('parser_exclude_instruments'))
parser.add_argument(
'--exclude-systems',
dest='exclude_systems',
default=[],
nargs='+',
help=prt.text('parser_exclude_systems'))
parser.add_argument(
'--exclude-sources',
dest='exclude_sources',
default=[],
nargs='+',
help=prt.text('parser_exclude_sources'))
parser.add_argument(
'--exclude-kinds',
dest='exclude_kinds',
default=[],
nargs='+',
help=prt.text('parser_exclude_kinds'))
parser.add_argument(
'--fix-parameters',
'-F',
dest='user_fixed_parameters',
default=[],
nargs='+',
help=prt.text('parser_user_fixed_parameters'))
parser.add_argument(
'--release-parameters',
'-r',
dest='user_released_parameters',
default=[],
nargs='+',
help=prt.text('parser_user_released_parameters'))
parser.add_argument(
'--iterations',
'-i',
dest='iterations',
type=int,
const=0,
default=-1,
nargs='?',
help=prt.text('parser_iterations'))
parser.add_argument(
'--generative',
'-G',
dest='generative',
default=False,
action='store_true',
help=prt.text('parser_generative'))
parser.add_argument(
'--smooth-times',
'--plot-points',
'-S',
dest='smooth_times',
type=int,
const=0,
default=21,
nargs='?',
action='store',
help=prt.text('parser_smooth_times'))
parser.add_argument(
'--extrapolate-time',
'-E',
dest='extrapolate_time',
type=float,
default=0.0,
nargs='*',
help=prt.text('parser_extrapolate_time'))
parser.add_argument(
'--limit-fitting-mjds',
'-L',
dest='limit_fitting_mjds',
type=float,
default=False,
nargs=2,
help=prt.text('parser_limit_fitting_mjds'))
parser.add_argument(
'--output-path',
'-o',
dest='output_path',
default='',
help=prt.text('parser_output_path'))
parser.add_argument(
'--suffix',
'-s',
dest='suffix',
default='',
help=prt.text('parser_suffix'))
parser.add_argument(
'--num-walkers',
'-N',
dest='num_walkers',
type=int,
default=None,
help=prt.text('parser_num_walkers'))
parser.add_argument(
'--num-temps',
'-T',
dest='num_temps',
type=int,
help=prt.text('parser_num_temps'))
parser.add_argument(
'--no-fracking',
dest='fracking',
default=True,
action='store_false',
help=prt.text('parser_fracking'))
parser.add_argument(
'--no-write',
dest='write',
default=True,
action='store_false',
help=prt.text('parser_write'))
parser.add_argument(
'--quiet',
dest='quiet',
default=False,
action='store_true',
help=prt.text('parser_quiet'))
parser.add_argument(
'--cuda',
dest='cuda',
default=False,
action='store_true',
help=prt.text('parser_cuda'))
parser.add_argument(
'--no-copy-at-launch',
dest='copy',
default=True,
action='store_false',
help=prt.text('parser_copy'))
parser.add_argument(
'--force-copy-at-launch',
dest='force_copy',
default=False,
action='store_true',
help=prt.text('parser_force_copy'))
parser.add_argument(
'--offline',
dest='offline',
default=False,
action='store_true',
help=prt.text('parser_offline'))
parser.add_argument(
'--prefer-cache',
dest='prefer_cache',
default=False,
action='store_true',
help=prt.text('parser_prefer_cache'))
parser.add_argument(
'--frack-step',
'-f',
dest='frack_step',
type=int,
help=prt.text('parser_frack_step'))
parser.add_argument(
'--burn',
'-b',
dest='burn',
type=int,
help=prt.text('parser_burn'))
parser.add_argument(
'--post-burn',
'-p',
dest='post_burn',
type=int,
help=prt.text('parser_post_burn'))
parser.add_argument(
'--upload',
'-u',
dest='upload',
default=False,
action='store_true',
help=prt.text('parser_upload'))
parser.add_argument(
'--run-until-converged',
'-R',
dest='run_until_converged',
type=float,
default=False,
const=True,
nargs='?',
help=prt.text('parser_run_until_converged'))
parser.add_argument(
'--run-until-uncorrelated',
'-U',
dest='run_until_uncorrelated',
type=int,
default=None,
const=5,
nargs='?',
help=prt.text('parser_run_until_uncorrelated'))
parser.add_argument(
'--maximum-walltime',
'-W',
dest='maximum_walltime',
type=float,
default=False,
help=prt.text('parser_maximum_walltime'))
parser.add_argument(
'--maximum-memory',
'-M',
dest='maximum_memory',
type=float,
help=prt.text('parser_maximum_memory'))
parser.add_argument(
'--seed',
dest='seed',
type=int,
help=prt.text('parser_seed'))
parser.add_argument(
'--draw-above-likelihood',
'-d',
dest='draw_above_likelihood',
type=float,
const=True,
nargs='?',
help=prt.text('parser_draw_above_likelihood'))
parser.add_argument(
'--gibbs',
'-g',
dest='gibbs',
action='store_const',
const=True,
help=prt.text('parser_gibbs'))
parser.add_argument(
'--save-full-chain',
'-c',
dest='save_full_chain',
action='store_const',
const=True,
help=prt.text('parser_save_full_chain'))
parser.add_argument(
'--print-trees',
dest='print_trees',
default=False,
action='store_true',
help=prt.text('parser_print_trees'))
parser.add_argument(
'--set-upload-token',
dest='set_upload_token',
const=True,
default=False,
nargs='?',
help=prt.text('parser_set_upload_token'))
parser.add_argument(
'--ignore-upload-quality',
dest='check_upload_quality',
default=True,
action='store_false',
help=prt.text('parser_check_upload_quality'))
parser.add_argument(
'--test',
dest='test',
default=False,
action='store_true',
help=prt.text('parser_test'))
parser.add_argument(
'--variance-for-each',
dest='variance_for_each',
default=[],
nargs='+',
help=prt.text('parser_variance_for_each'))
parser.add_argument(
'--speak',
dest='speak',
const='en',
default=False,
nargs='?',
help=prt.text('parser_speak'))
parser.add_argument(
'--version',
dest='version',
default=False,
action='store_true',
help=prt.text('parser_version'))
parser.add_argument(
'--extra-outputs',
'-x',
dest='extra_outputs',
default=None,
nargs='*',
help=prt.text('parser_extra_outputs'))
parser.add_argument(
'--catalogs',
'-C',
dest='catalogs',
default=[],
nargs='+',
help=prt.text('parser_catalogs'))
parser.add_argument(
'--open-in-browser',
'-O',
dest='open_in_browser',
default=False,
action='store_true',
help=prt.text('parser_open_in_browser'))
parser.add_argument(
'--exit-on-prompt',
dest='exit_on_prompt',
default=False,
action='store_true',
help=prt.text('parser_exit_on_prompt'))
parser.add_argument(
'--download-recommended-data',
dest='download_recommended_data',
default=False,
action='store_true',
help=prt.text('parser_download_recommended_data'))
parser.add_argument(
'--local-data-only',
dest='local_data_only',
default=False,
action='store_true',
help=prt.text('parser_local_data_only'))
parser.add_argument(
'--method',
'-D',
dest='method',
type=str,
const='select',
default='ensembler',
nargs='?',
help=prt.text('parser_method'))
return parser
def main():
"""Run MOSFiT."""
prt = Printer(
wrap_length=100, quiet=False, language='en', exit_on_prompt=False)
parser = get_parser(only='language')
args, remaining = parser.parse_known_args()
if args.language == 'en':
loc = locale.getlocale()
if loc[0]:
args.language = loc[0].split('_')[0]
if args.language != 'en':
try:
from googletrans.constants import LANGUAGES
except Exception:
raise RuntimeError(
'`--language` requires `googletrans` package, '
'install with `pip install googletrans`.')
if args.language == 'select' or args.language not in LANGUAGES:
languages = list(
sorted([LANGUAGES[x].title().replace('_', ' ') +
' (' + x + ')' for x in LANGUAGES]))
sel = prt.prompt(
'Select a language:', kind='select', options=languages,
message=False)
args.language = sel.split('(')[-1].strip(')')
prt = Printer(language=args.language)
language = args.language
parser = get_parser(printer=prt)
args = parser.parse_args()
args.language = language
prt = Printer(
wrap_length=100, quiet=args.quiet, language=args.language,
exit_on_prompt=args.exit_on_prompt)
if args.version:
print('MOSFiT v{}'.format(__version__))
return
dir_path = os.path.dirname(os.path.realpath(__file__))
if args.speak:
speak('Mosfit', args.speak)
args.start_time = time.time()
if args.limiting_magnitude == []:
args.limiting_magnitude = 20.0
args.return_fits = False
if (isinstance(args.extrapolate_time, list) and
len(args.extrapolate_time) == 0):
args.extrapolate_time = 100.0
if len(args.band_list) and args.smooth_times == -1:
prt.message('enabling_s')
args.smooth_times = 0
args.method = 'nester' if args.method.lower() in [
'nest', 'nested', 'nested_sampler', 'nester'] else 'ensembler'
if is_master():
if args.method == 'nester':
unused_args = [
[args.burn, '-b'],
[args.post_burn, '-p'],
[args.frack_step, '-f'],
[args.num_temps, '-T'],
[args.run_until_uncorrelated, '-U'],
[args.draw_above_likelihood, '-d'],
[args.gibbs, '-g'],
[args.save_full_chain, '-c'],
[args.maximum_memory, '-M']
]
for ua in unused_args:
if ua[0] is not None:
prt.message('argument_not_used',
reps=[ua[1], '-D nester'], warning=True)
if args.method == 'nester':
if args.run_until_converged and args.iterations >= 0:
raise ValueError(prt.text('R_i_mutually_exclusive'))
if args.walker_paths is not None:
raise ValueError(prt.text('w_nester_mutually_exclusive'))
if args.generative:
if args.iterations > 0:
prt.message('generative_supercedes', warning=True)
args.iterations = 0
no_events = False
if args.iterations == -1:
if len(args.events) == 0:
no_events = True
args.iterations = 0
else:
args.iterations = 5000
if len(args.time_list):
if any([any([y in x]) for y in ['-', '/'] for x in args.time_list]):
try:
args.time_list = [astrotime(
x.replace('/', '-')).mjd for x in args.time_list]
except ValueError:
if len(args.time_list) == 1 and isinstance(
args.time_list[0], string_types):
args.time_list = args.time_list[0].split()
args.time_list = [float(x) for x in args.time_list]
args.time_unit = 'phase'
else:
if any(['+' in x for x in args.time_list]):
args.time_unit = 'phase'
args.time_list = [float(x) for x in args.time_list]
if len(args.date_list):
if no_events:
prt.message('no_dates_gen', warning=True)
else:
args.time_list += [str(astrotime(x.replace('/', '-')).mjd)
for x in args.date_list]
args.time_unit = 'mjd'
if len(args.mjd_list):
if no_events:
prt.message('no_dates_gen', warning=True)
else:
args.time_list += [float(x) for x in args.mjd_list]
args.time_unit = 'mjd'
if len(args.jd_list):
if no_events:
prt.message('no_dates_gen', warning=True)
else:
args.time_list += [str(astrotime(
float(x), format='jd').mjd) for x in args.jd_list]
args.time_unit = 'mjd'
if len(args.phase_list):
if no_events:
prt.message('no_dates_gen', warning=True)
else:
args.time_list += [float(x) for x in args.phase_list]
args.time_unit = 'phase'
if len(args.time_list):
if min(args.time_list) > 2400000:
prt.message('assuming_jd')
args.time_list = [x - 2400000.5 for x in args.time_list]
args.time_unit = 'mjd'
elif min(args.time_list) > 50000:
prt.message('assuming_mjd')
args.time_unit = 'mjd'
if args.burn is None and args.post_burn is None:
args.burn = int(np.floor(args.iterations / 2))
if args.frack_step == 0:
args.fracking = False
if (args.run_until_uncorrelated is not None and
args.run_until_converged):
raise ValueError(
'`-R` and `-U` options are incompatible, please use one or the '
'other.')
if args.run_until_uncorrelated is not None:
args.convergence_type = 'acor'
args.convergence_criteria = args.run_until_uncorrelated
elif args.run_until_converged:
if args.method == 'ensembler':
args.convergence_type = 'psrf'
args.convergence_criteria = (
1.1 if args.run_until_converged is True else
args.run_until_converged)
else:
args.convergence_type = 'dlogz'
if args.method == 'nester':
args.convergence_criteria = (
0.02 if args.run_until_converged is True else
args.run_until_converged)
if is_master():
# Get hash of ourselves
mosfit_hash = get_mosfit_hash()
# Print our amazing ASCII logo.
if not args.quiet:
with codecs.open(os.path.join(dir_path, 'logo.txt'),
'r', 'utf-8') as f:
logo = f.read()
firstline = logo.split('\n')[0]
# if isinstance(firstline, bytes):
# firstline = firstline.decode('utf-8')
width = len(normalize('NFC', firstline))
prt.prt(logo, colorify=True)
prt.message(
'byline', reps=[
__version__, mosfit_hash, __author__, __contributors__],
center=True, colorify=True, width=width, wrapped=False)
# Get/set upload token
upload_token = ''
get_token_from_user = False
if args.set_upload_token:
if args.set_upload_token is not True:
upload_token = args.set_upload_token
get_token_from_user = True
upload_token_path = os.path.join(dir_path, 'cache', 'dropbox.token')
# Perform a few checks on upload before running (to keep size
# manageable)
if args.upload and not args.test and args.smooth_times > 100:
response = prt.prompt('ul_warning_smooth')
if response:
args.upload = False
else:
sys.exit()
if (args.upload and not args.test and
args.num_walkers is not None and args.num_walkers < 100):
response = prt.prompt('ul_warning_few_walkers')
if response:
args.upload = False
else:
sys.exit()
if (args.upload and not args.test and args.num_walkers and
args.num_walkers * args.num_temps > 500):
response = prt.prompt('ul_warning_too_many_walkers')
if response:
args.upload = False
else:
sys.exit()
if args.upload:
if not os.path.isfile(upload_token_path):
get_token_from_user = True
else:
with open(upload_token_path, 'r') as f:
upload_token = f.read().splitlines()
if len(upload_token) != 1:
get_token_from_user = True
elif len(upload_token[0]) != 64:
get_token_from_user = True
else:
upload_token = upload_token[0]
if get_token_from_user:
if args.test:
upload_token = ('1234567890abcdefghijklmnopqrstuvwxyz'
'1234567890abcdefghijklmnopqr')
while len(upload_token) != 64:
prt.message('no_ul_token', ['https://sne.space/mosfit/'],
wrapped=True)
upload_token = prt.prompt('paste_token', kind='string')
if len(upload_token) != 64:
prt.prt(
'Error: Token must be exactly 64 characters in '
'length.', wrapped=True)
continue
break
with open_atomic(upload_token_path, 'w') as f:
f.write(upload_token)
if args.upload:
prt.prt(
"Upload flag set, will upload results after completion.",
wrapped=True)
prt.prt("Dropbox token: " + upload_token, wrapped=True)
args.upload_token = upload_token
if no_events:
prt.message('iterations_0', wrapped=True)
# Create the user directory structure, if it doesn't already exist.
if args.copy:
prt.message('copying')
fc = False
if args.force_copy:
fc = prt.prompt('force_copy')
if not os.path.exists('jupyter'):
os.mkdir(os.path.join('jupyter'))
if not os.path.isfile(os.path.join('jupyter',
'mosfit.ipynb')) or fc:
shutil.copy(
os.path.join(dir_path, 'jupyter', 'mosfit.ipynb'),
os.path.join(os.getcwd(), 'jupyter', 'mosfit.ipynb'))
if not os.path.exists('modules'):
os.mkdir(os.path.join('modules'))
module_dirs = next(os.walk(os.path.join(dir_path, 'modules')))[1]
for mdir in module_dirs:
if mdir.startswith('__'):
continue
full_mdir = os.path.join(dir_path, 'modules', mdir)
copy_path = os.path.join(full_mdir, '.copy')
to_copy = []
if os.path.isfile(copy_path):
to_copy = list(filter(None, open(
copy_path, 'r').read().split()))
mdir_path = os.path.join('modules', mdir)
if not os.path.exists(mdir_path):
os.mkdir(mdir_path)
for tc in to_copy:
tc_path = os.path.join(full_mdir, tc)
if os.path.isfile(tc_path):
shutil.copy(tc_path, os.path.join(mdir_path, tc))
elif os.path.isdir(tc_path) and not os.path.exists(
os.path.join(mdir_path, tc)):
os.mkdir(os.path.join(mdir_path, tc))
readme_path = os.path.join(mdir_path, 'README')
if not os.path.exists(readme_path):
txt = prt.message('readme-modules', [
os.path.join(dir_path, 'modules', 'mdir'),
os.path.join(dir_path, 'modules')], prt=False)
open(readme_path, 'w').write(txt)
if not os.path.exists('models'):
os.mkdir(os.path.join('models'))
model_dirs = next(os.walk(os.path.join(dir_path, 'models')))[1]
for mdir in model_dirs:
if mdir.startswith('__'):
continue
mdir_path = os.path.join('models', mdir)
if not os.path.exists(mdir_path):
os.mkdir(mdir_path)
model_files = next(
os.walk(os.path.join(dir_path, 'models', mdir)))[2]
readme_path = os.path.join(mdir_path, 'README')
if not os.path.exists(readme_path):
txt = prt.message('readme-models', [
os.path.join(dir_path, 'models', mdir),
os.path.join(dir_path, 'models')], prt=False)
with open(readme_path, 'w') as f:
f.write(txt)
for mfil in model_files:
if 'parameters.json' not in mfil:
continue
fil_path = os.path.join(mdir_path, mfil)
if os.path.isfile(fil_path) and not fc:
continue
shutil.copy(
os.path.join(dir_path, 'models', mdir, mfil),
os.path.join(fil_path))
# Set some default values that we checked above.
if args.frack_step == 0:
args.fracking = False
elif args.frack_step is None:
args.frack_step = 50
if args.burn is None and args.post_burn is None:
args.burn = int(np.floor(args.iterations / 2))
if args.draw_above_likelihood is None:
args.draw_above_likelihood = False
if args.maximum_memory is None:
args.maximum_memory = np.inf
if args.gibbs is None:
args.gibbs = False
if args.save_full_chain is None:
args.save_full_chain = False
if args.num_temps is None:
args.num_temps = 1
if args.walker_paths is None:
args.walker_paths = []
# Then, fit the listed events with the listed models.
fitargs = vars(args)
Fitter(**fitargs).fit_events(**fitargs)
if __name__ == "__main__":
main()
| 29.491614
| 77
| 0.536805
|
be23aa79adc8d79558ddcdc53353c22199cfefdb
| 2,938
|
py
|
Python
|
keras/layers/preprocessing/benchmarks/category_vocab_list_indicator_dense_benchmark.py
|
tsheaff/keras
|
ee227dda766d769b7499a5549e8ed77b5e88105b
|
[
"Apache-2.0"
] | 1
|
2022-03-21T02:42:58.000Z
|
2022-03-21T02:42:58.000Z
|
keras/layers/preprocessing/benchmarks/category_vocab_list_indicator_dense_benchmark.py
|
tsheaff/keras
|
ee227dda766d769b7499a5549e8ed77b5e88105b
|
[
"Apache-2.0"
] | 1
|
2022-03-31T16:40:51.000Z
|
2022-03-31T16:40:51.000Z
|
keras/layers/preprocessing/benchmarks/category_vocab_list_indicator_dense_benchmark.py
|
tsheaff/keras
|
ee227dda766d769b7499a5549e8ed77b5e88105b
|
[
"Apache-2.0"
] | 1
|
2020-12-13T22:14:48.000Z
|
2020-12-13T22:14:48.000Z
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for KPL implementation of vocabulary columns + indicator from lists with dense inputs."""
import tensorflow.compat.v2 as tf
import keras
from tensorflow.python.eager.def_function import function as tf_function
from keras.layers.preprocessing import category_encoding
from keras.layers.preprocessing import string_lookup
from keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm
NUM_REPEATS = 10
BATCH_SIZES = [32, 256]
def embedding_varlen(batch_size, max_length):
"""Benchmark a variable-length embedding."""
# Data and constants.
vocab_size = 32768
vocab = fc_bm.create_vocabulary(vocab_size)
data = fc_bm.create_string_data(
max_length, batch_size * NUM_REPEATS, vocab, pct_oov=0.15)
# Keras implementation
model = keras.Sequential()
model.add(keras.Input(shape=(max_length,), name="data", dtype=tf.string))
model.add(string_lookup.StringLookup(vocabulary=vocab, mask_token=None))
model.add(
category_encoding.CategoryEncoding(
num_tokens=vocab_size + 1, output_mode="count"))
# FC implementation
fc = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
key="data", vocabulary_list=vocab, num_oov_buckets=1))
# Wrap the FC implementation in a tf.function for a fair comparison
@tf_function()
def fc_fn(tensors):
fc.transform_feature(tf.__internal__.feature_column.FeatureTransformationCache(tensors), None)
# Benchmark runs
keras_data = {
"data": data.to_tensor(default_value="", shape=(batch_size, max_length))
}
k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)
fc_data = {
"data": data.to_tensor(default_value="", shape=(batch_size, max_length))
}
fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)
return k_avg_time, fc_avg_time
class BenchmarkLayer(fc_bm.LayerBenchmark):
"""Benchmark the layer forward pass."""
def benchmark_layer(self):
for batch in BATCH_SIZES:
name = "vocab_list_indicator|dense|batch_%s" % batch
k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)
self.report(name, k_time, f_time, NUM_REPEATS)
if __name__ == "__main__":
tf.test.main()
| 36.271605
| 102
| 0.736215
|
e15c89f3fddcf39a3ae5722f091571510ba82164
| 191
|
py
|
Python
|
esp32/tilt switch example.py
|
Steve-Fisher/PiExperiments
|
81a08b99b53ed9921353cf436b09b40650032a2b
|
[
"MIT"
] | null | null | null |
esp32/tilt switch example.py
|
Steve-Fisher/PiExperiments
|
81a08b99b53ed9921353cf436b09b40650032a2b
|
[
"MIT"
] | null | null | null |
esp32/tilt switch example.py
|
Steve-Fisher/PiExperiments
|
81a08b99b53ed9921353cf436b09b40650032a2b
|
[
"MIT"
] | null | null | null |
from machine import Pin
import time
switch = Pin(23, Pin.IN)
while True:
state = not(switch.value())
print("Switch is " + ("on " if state else "off"), end='\r')
time.sleep(0.2)
| 19.1
| 63
| 0.617801
|
25a4e4993a953b1fc344d3e06ee53f78b695122e
| 12,304
|
py
|
Python
|
qp_klp/klp.py
|
antgonza/qp-knight-lab-processing
|
23e5646c6a47d894372e7d5d972169511f47570f
|
[
"BSD-3-Clause"
] | null | null | null |
qp_klp/klp.py
|
antgonza/qp-knight-lab-processing
|
23e5646c6a47d894372e7d5d972169511f47570f
|
[
"BSD-3-Clause"
] | null | null | null |
qp_klp/klp.py
|
antgonza/qp-knight-lab-processing
|
23e5646c6a47d894372e7d5d972169511f47570f
|
[
"BSD-3-Clause"
] | null | null | null |
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from functools import partial
from inspect import stack
from os import environ, walk
from os import makedirs
from os.path import basename, join, exists
from qiita_client import ArtifactInfo
from sequence_processing_pipeline.ConvertJob import ConvertJob
from sequence_processing_pipeline.FastQCJob import FastQCJob
from sequence_processing_pipeline.GenPrepFileJob import GenPrepFileJob
from sequence_processing_pipeline.Pipeline import Pipeline
from sequence_processing_pipeline.PipelineError import PipelineError
from sequence_processing_pipeline.QCJob import QCJob
from sequence_processing_pipeline.SequenceDirectory import SequenceDirectory
from subprocess import Popen, PIPE
CONFIG_FP = environ["QP_KLP_CONFIG_FP"]
def sequence_processing_pipeline(qclient, job_id, parameters, out_dir):
"""Sequence Processing Pipeline command
Parameters
----------
qclient : tgp.qiita_client.QiitaClient
The Qiita server client
job_id : str
The job id
parameters : dict
The parameter values for this job
out_dir : str
The path to the job's output directory
Returns
-------
bool, list, str
The results of the job
"""
run_identifier = parameters.pop('run_identifier')
sample_sheet = parameters.pop('sample_sheet')
job_pool_size = 30
# checking if this is running as part of the unittest
# https://stackoverflow.com/a/25025987
skip_exec = True if [x for x in stack() if
'unittest' in x.filename] else False
success = True
ainfo = None
msg = None
qclient.update_job_step(job_id, "Step 1 of 6: Setting up pipeline")
if {'body', 'content_type', 'filename'} == set(sample_sheet):
# Create a Pipeline object
try:
pipeline = Pipeline(CONFIG_FP, run_identifier, out_dir, job_id)
except PipelineError as e:
# Pipeline is the object that finds the input fp, based on
# a search directory set in configuration.json and a run_id.
if str(e).endswith("could not be found"):
msg = f"A path for {run_identifier} could not be found."
return False, None, msg
else:
raise e
outpath = partial(join, out_dir)
final_results_path = outpath('final_results')
makedirs(final_results_path, exist_ok=True)
# the user is uploading a sample-sheet to us, but we need the
# sample-sheet as a file to pass to the Pipeline().
sample_sheet_path = outpath(sample_sheet['filename'])
with open(sample_sheet_path, 'w') as f:
f.write(sample_sheet['body'])
msgs, val_sheet = pipeline.validate(sample_sheet_path)
if val_sheet is None:
# only pass the top message to update_job_step, due to
# limited display width.
msg = str(msgs[0]) if msgs else "Sample sheet failed validation."
qclient.update_job_step(job_id, msg)
raise ValueError(msg)
else:
# if we're passed a val_sheet, assume any msgs are warnings only.
# unfortunately, we can only display the top msg.
msg = msgs[0] if msgs else None
qclient.update_job_step(job_id, f'warning: {msg}')
# get project names and their associated qiita ids
bioinformatics = val_sheet.Bioinformatics
lst = bioinformatics.to_dict('records')
sifs = pipeline.generate_sample_information_files(sample_sheet_path)
# find the uploads directory all trimmed files will need to be
# moved to.
results = qclient.get("/qiita_db/artifacts/types/")
# trimmed files are stored by qiita_id. Find the qiita_id
# associated with each project and ensure a subdirectory exists
# for when it comes time to move the trimmed files.
special_map = []
for result in lst:
project_name = result['Sample_Project']
qiita_id = result['QiitaID']
upload_path = join(results['uploads'], qiita_id)
makedirs(upload_path, exist_ok=True)
special_map.append((project_name, upload_path))
# Create a SequenceDirectory object
sdo = SequenceDirectory(pipeline.run_dir, sample_sheet_path)
qclient.update_job_step(job_id,
"Step 2 of 6: Converting BCL to fastq")
config = pipeline.configuration['bcl-convert']
convert_job = ConvertJob(pipeline.run_dir,
pipeline.output_path,
sdo.sample_sheet_path,
config['queue'],
config['nodes'],
config['nprocs'],
config['wallclock_time_in_hours'],
config['per_process_memory_limit'],
config['executable_path'],
config['modules_to_load'],
job_id)
# if skip_execution is True, then each Pipeline object will be
# initialized, their assertions tested, and an ainfo will be
# returned to the caller. However the Jobs will not actually
# be executed. This is useful for testing.
if not skip_exec:
convert_job.run()
qclient.update_job_step(job_id,
"Step 3 of 6: Adaptor & Host [optional] "
"trimming")
raw_fastq_files_path = join(pipeline.output_path, 'ConvertJob')
config = pipeline.configuration['qc']
qc_job = QCJob(raw_fastq_files_path,
pipeline.output_path,
sdo.sample_sheet_path,
config['mmi_db'],
config['queue'],
config['nodes'],
config['nprocs'],
config['wallclock_time_in_hours'],
config['job_total_memory_limit'],
config['fastp_executable_path'],
config['minimap2_executable_path'],
config['samtools_executable_path'],
config['modules_to_load'],
job_id,
job_pool_size,
config['job_max_array_length'])
if not skip_exec:
qc_job.run()
qclient.update_job_step(job_id, "Step 4 of 6: Generating FastQC & "
"MultiQC reports")
config = pipeline.configuration['fastqc']
raw_fastq_files_path = join(pipeline.output_path, 'ConvertJob')
processed_fastq_files_path = join(pipeline.output_path, 'QCJob')
fastqc_job = FastQCJob(pipeline.run_dir,
pipeline.output_path,
raw_fastq_files_path,
processed_fastq_files_path,
config['nprocs'],
config['nthreads'],
config['fastqc_executable_path'],
config['modules_to_load'],
job_id,
config['queue'],
config['nodes'],
config['wallclock_time_in_hours'],
config['job_total_memory_limit'],
job_pool_size,
config['multiqc_config_file_path'],
config['job_max_array_length'])
if not skip_exec:
fastqc_job.run()
project_list = fastqc_job.project_names
qclient.update_job_step(job_id, "Step 5 of 6: Generating Prep "
"Information Files")
config = pipeline.configuration['seqpro']
gpf_job = GenPrepFileJob(
pipeline.run_dir,
raw_fastq_files_path,
processed_fastq_files_path,
pipeline.output_path,
sdo.sample_sheet_path,
config['seqpro_path'],
project_list,
config['modules_to_load'],
job_id)
if not skip_exec:
gpf_job.run()
qclient.update_job_step(job_id, "Step 6 of 6: Copying results to "
"archive")
# just use the filenames for tarballing the sifs.
# this will prevent the tarball from having an arbitrarily nested
# tree.
# the sifs should all be stored in the {out_dir} by default.
sifs = [basename(x) for x in sifs]
# convert sifs into a list of filenames.
sifs = ' '.join(sifs)
cmds = [f'cd {out_dir}; tar zcvf logs-ConvertJob.tgz ConvertJob/logs',
f'cd {out_dir}; tar zcvf reports-ConvertJob.tgz '
'ConvertJob/Reports ConvertJob/Logs',
f'cd {out_dir}; tar zcvf logs-QCJob.tgz QCJob/logs',
f'cd {out_dir}; tar zcvf logs-FastQCJob.tgz '
'FastQCJob/logs',
f'cd {out_dir}; tar zcvf reports-FastQCJob.tgz '
'FastQCJob/fastqc',
f'cd {out_dir}; tar zcvf logs-GenPrepFileJob.tgz '
'GenPrepFileJob/logs',
f'cd {out_dir}; tar zcvf prep-files.tgz '
'GenPrepFileJob/PrepFiles']
# just use the filenames for tarballing the sifs.
# this will prevent the tarball from having an arbitrarily nested
# tree.
# the sifs should all be stored in the {out_dir} by default.
if sifs:
tmp = [basename(x) for x in sifs]
# convert sifs into a list of filenames.
tmp = ' '.join(tmp)
cmds.append(f'cd {out_dir}; tar zcvf sample-files.tgz {tmp}')
csv_fps = []
for root, dirs, files in walk(join(gpf_job.output_path, 'PrepFiles')):
for csv_file in files:
csv_fps.append(join(root, csv_file))
for project, upload_dir in special_map:
cmds.append(f'cd {out_dir}; tar zcvf reports-QCJob.tgz '
f'QCJob/{project}/fastp_reports_dir')
if exists(f'{out_dir}/QCJob/{project}/filtered_sequences'):
cmds.append(f'cd {out_dir}; mv '
f'QCJob/{project}/filtered_sequences/* '
f'{upload_dir}')
else:
cmds.append(f'cd {out_dir}; mv '
f'QCJob/{project}/trimmed_sequences/* '
f'{upload_dir}')
for csv_file in csv_fps:
if project in csv_file:
cmds.append(f'cd {out_dir}; mv {csv_file} {upload_dir}')
break
cmds.append(f'cd {out_dir}; mv *.tgz final_results')
cmds.append(f'cd {out_dir}; mv FastQCJob/multiqc final_results')
if sifs:
cmds.append(f'cd {out_dir}; mv sample-files.tgz {upload_dir}')
if skip_exec:
cmds = []
for cmd in cmds:
p = Popen(cmd, universal_newlines=True, shell=True,
stdout=PIPE, stderr=PIPE)
std_out, std_err = p.communicate()
return_code = p.returncode
if return_code != 0:
raise PipelineError(f"'{cmd}' returned {return_code}")
ainfo = [
ArtifactInfo('output', 'job-output-folder',
[(f'{final_results_path}/', 'directory')])
]
else:
success = False
msg = "This doesn't appear to be a valid sample sheet; please review."
qclient.update_job_step(job_id, "Main Pipeline Finished, processing "
"results")
return success, ainfo, msg
| 40.340984
| 79
| 0.556404
|
6a60b84b933a66c710be203379f56bf94339572d
| 3,837
|
py
|
Python
|
examples/scans.py
|
alanfung/Tenable.io-SDK-for-Python
|
21db906b652a700e11e0cd3435413f7dc7c9f6be
|
[
"MIT"
] | null | null | null |
examples/scans.py
|
alanfung/Tenable.io-SDK-for-Python
|
21db906b652a700e11e0cd3435413f7dc7c9f6be
|
[
"MIT"
] | null | null | null |
examples/scans.py
|
alanfung/Tenable.io-SDK-for-Python
|
21db906b652a700e11e0cd3435413f7dc7c9f6be
|
[
"MIT"
] | 1
|
2019-07-24T21:03:04.000Z
|
2019-07-24T21:03:04.000Z
|
import os
from datetime import datetime
from time import time
from tenable_io.api.models import Scan
from tenable_io.api.scans import ScanExportRequest
from tenable_io.client import TenableIOClient
from tenable_io.exceptions import TenableIOApiException
def example(test_name, test_file):
# Generate unique name and file.
scan_name = test_name(u'example scan')
test_nessus_file = test_file(u'example_report.nessus')
test_pdf_file = test_file(u'example_report.pdf')
'''
Instantiate an instance of the TenableIOClient.
'''
client = TenableIOClient()
'''
Create a scan.
'''
scan = client.scan_helper.create(
name=scan_name,
text_targets='tenable.com',
template='discovery'
)
assert scan.name() == scan_name
'''
Retrieve a scan by ID.
'''
scan_b = client.scan_helper.id(scan.id)
assert scan_b is not scan
assert scan_b.name() == scan_name
'''
Select scans by name.
'''
scans = client.scan_helper.scans(name=scan_name)
assert scans[0].name() == scan_name
'''
Select scans by name with regular expression.
'''
scans = client.scan_helper.scans(name_regex=r'.*test scan.*')
assert len(scans) > 0
'''
Launch a scan, then download when scan is completed.
Note: The `download` method blocks until the scan is completed and the report is downloaded.
'''
scan.launch().download(test_pdf_file)
assert os.path.isfile(test_pdf_file)
os.remove(test_pdf_file)
'''
Launch a scan, pause it, resume it, then stop it.
'''
scan.launch().pause()
assert scan.status() == Scan.STATUS_PAUSED
scan.resume().stop()
assert scan.status() == Scan.STATUS_CANCELED
'''
Stop a running scan if it does not complete within a specific duration.
'''
start = time()
scan.launch().wait_or_cancel_after(10)
assert time() - start >= 10
'''
Retrieve the history of a scan since a specific date or all.
Note: The `since` argument is optional, all the history if omitted.
'''
histories = scan.histories(since=datetime(2016, 12, 1))
assert len(histories) > 0
'''
Download the report for a specific scan in history.
'''
scan.download(test_pdf_file, history_id=histories[0].history_id)
assert os.path.isfile(test_pdf_file)
os.remove(test_pdf_file)
'''
Create a new scan by copying a scan.
'''
scan_copy = scan.copy()
assert scan_copy.id != scan.id
assert scan_copy.status() == Scan.STATUS_EMPTY
'''
Export a scan into a NESSUS file.
'''
scan.download(test_nessus_file, format=ScanExportRequest.FORMAT_NESSUS)
assert os.path.isfile(test_nessus_file)
'''
Create a new scan by importing a NESSUS file.
'''
imported_scan = client.scan_helper.import_scan(test_nessus_file)
assert imported_scan.details().info.name == scan.details().info.name
os.remove(test_nessus_file)
'''
Stop all scans.
Note: Use with caution as this will stop all ongoing scans (including any automated test).
'''
# client.scan_helper.stop_all()
'''
Check if a target has recently been scanned (including running scans).
'''
activities = client.scan_helper.activities('tenable.com')
last_history_id = scan.last_history().history_id
assert [a for a in activities if last_history_id == a.history_id]
'''
Delete scans.
'''
scan.delete()
scan_copy.delete()
imported_scan.delete()
try:
scan.details()
assert False
except TenableIOApiException:
pass
try:
scan_copy.details()
assert False
except TenableIOApiException:
pass
try:
imported_scan.details()
assert False
except TenableIOApiException:
pass
| 26.462069
| 96
| 0.661976
|
76d1adb53a921e8a5efbd11a4a61f211baf77fb1
| 307
|
py
|
Python
|
tests/pwb/print_argv.py
|
ZabeMath/pywikibot
|
856a197c53efcb80b16475a8d203a4ecd79eee2f
|
[
"MIT"
] | 326
|
2017-11-21T07:04:19.000Z
|
2022-03-26T01:25:44.000Z
|
tests/pwb/print_argv.py
|
ZabeMath/pywikibot
|
856a197c53efcb80b16475a8d203a4ecd79eee2f
|
[
"MIT"
] | 17
|
2017-12-20T13:41:32.000Z
|
2022-02-16T16:42:41.000Z
|
tests/pwb/print_argv.py
|
ZabeMath/pywikibot
|
856a197c53efcb80b16475a8d203a4ecd79eee2f
|
[
"MIT"
] | 147
|
2017-11-22T19:13:40.000Z
|
2022-03-29T04:47:07.000Z
|
#!/usr/bin/python
"""Script that forms part of pwb_tests.
.. versionadded:: 7.0
"""
#
# (C) Pywikibot team, 2021
#
# Distributed under the terms of the MIT license.
#
import pywikibot
def main() -> None:
"""Print pywikibot.argvu."""
print(pywikibot.argvu)
if __name__ == '__main__':
main()
| 14.619048
| 49
| 0.648208
|
87437d93fb7b1083c4ba63db573a38d1489ca337
| 24,099
|
py
|
Python
|
blender/arm/utils.py
|
anadin/armory
|
a346af59e9d35d51e9b45cfcb6a99824719af295
|
[
"Zlib"
] | null | null | null |
blender/arm/utils.py
|
anadin/armory
|
a346af59e9d35d51e9b45cfcb6a99824719af295
|
[
"Zlib"
] | null | null | null |
blender/arm/utils.py
|
anadin/armory
|
a346af59e9d35d51e9b45cfcb6a99824719af295
|
[
"Zlib"
] | null | null | null |
import bpy
import json
import os
import glob
import platform
import re
import subprocess
import webbrowser
import numpy as np
import arm.lib.armpack
import arm.make_state as state
import arm.log as log
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def write_arm(filepath, output):
if filepath.endswith('.lz4'):
pass
else:
if bpy.data.worlds['Arm'].arm_minimize:
with open(filepath, 'wb') as f:
f.write(arm.lib.armpack.packb(output))
else:
filepath_json = filepath.split('.arm')[0] + '.json'
with open(filepath_json, 'w') as f:
f.write(json.dumps(output, sort_keys=True, indent=4, cls=NumpyEncoder))
def unpack_image(image, path, file_format='JPEG'):
print('Armory Info: Unpacking to ' + path)
image.filepath_raw = path
image.file_format = file_format
image.save()
def convert_image(image, path, file_format='JPEG'):
# Convert image to compatible format
print('Armory Info: Converting to ' + path)
ren = bpy.context.scene.render
orig_quality = ren.image_settings.quality
orig_file_format = ren.image_settings.file_format
orig_color_mode = ren.image_settings.color_mode
ren.image_settings.quality = 90
ren.image_settings.file_format = file_format
if file_format == 'PNG':
ren.image_settings.color_mode = 'RGBA'
image.save_render(path, scene=bpy.context.scene)
ren.image_settings.quality = orig_quality
ren.image_settings.file_format = orig_file_format
ren.image_settings.color_mode = orig_color_mode
def blend_name():
return bpy.path.basename(bpy.context.blend_data.filepath).rsplit('.')[0]
def build_dir():
return 'build_' + safestr(blend_name())
def get_fp():
wrd = bpy.data.worlds['Arm']
if wrd.arm_project_root != '':
return bpy.path.abspath(wrd.arm_project_root)
else:
s = bpy.data.filepath.split(os.path.sep)
s.pop()
return os.path.sep.join(s)
def get_fp_build():
return get_fp() + '/' + build_dir()
def get_os():
s = platform.system()
if s == 'Windows':
return 'win'
elif s == 'Darwin':
return 'mac'
else:
return 'linux'
def get_gapi():
wrd = bpy.data.worlds['Arm']
if state.is_export:
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
return getattr(item, target_to_gapi(item.arm_project_target))
if wrd.arm_runtime == 'Browser':
return 'webgl'
return arm.utils.get_player_gapi()
def get_rp():
wrd = bpy.data.worlds['Arm']
return wrd.arm_rplist[wrd.arm_rplist_index]
def bundled_sdk_path():
if get_os() == 'mac':
# SDK on MacOS is located in .app folder due to security
p = bpy.app.binary_path
if p.endswith('Contents/MacOS/blender'):
return p[:-len('Contents/MacOS/blender')] + '/armsdk/'
else:
return p[:-len('Contents/MacOS/./blender')] + '/armsdk/'
elif get_os() == 'linux':
# /blender
return bpy.app.binary_path.rsplit('/', 1)[0] + '/armsdk/'
else:
# /blender.exe
return bpy.app.binary_path.replace('\\', '/').rsplit('/', 1)[0] + '/armsdk/'
# Passed by load_post handler when armsdk is found in project folder
use_local_sdk = False
def get_sdk_path():
preferences = bpy.context.preferences
addon_prefs = preferences.addons["armory"].preferences
p = bundled_sdk_path()
if use_local_sdk:
return get_fp() + '/armsdk/'
elif os.path.exists(p) and addon_prefs.sdk_bundled:
return p
else:
return addon_prefs.sdk_path
def get_ide_path():
preferences = bpy.context.preferences
addon_prefs = preferences.addons["armory"].preferences
return '' if not hasattr(addon_prefs, 'ide_path') else addon_prefs.ide_path
def get_ffmpeg_path():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return addon_prefs.ffmpeg_path
def get_renderdoc_path():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
p = addon_prefs.renderdoc_path
if p == '' and get_os() == 'win':
pdefault = 'C:\\Program Files\\RenderDoc\\qrenderdoc.exe'
if os.path.exists(pdefault):
p = pdefault
return p
def get_player_gapi():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 'opengl' if not hasattr(addon_prefs, 'player_gapi_' + get_os()) else getattr(addon_prefs, 'player_gapi_' + get_os())
def get_code_editor():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 'kodestudio' if not hasattr(addon_prefs, 'code_editor') else addon_prefs.code_editor
def get_ui_scale():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 1.0 if not hasattr(addon_prefs, 'ui_scale') else addon_prefs.ui_scale
def get_khamake_threads():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 1 if not hasattr(addon_prefs, 'khamake_threads') else addon_prefs.khamake_threads
def get_compilation_server():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return False if not hasattr(addon_prefs, 'compilation_server') else addon_prefs.compilation_server
def get_save_on_build():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return False if not hasattr(addon_prefs, 'save_on_build') else addon_prefs.save_on_build
def get_viewport_controls():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 'qwerty' if not hasattr(addon_prefs, 'viewport_controls') else addon_prefs.viewport_controls
def get_legacy_shaders():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return False if not hasattr(addon_prefs, 'legacy_shaders') else addon_prefs.legacy_shaders
def get_relative_paths():
# Convert absolute paths to relative
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return False if not hasattr(addon_prefs, 'relative_paths') else addon_prefs.relative_paths
def get_node_path():
if get_os() == 'win':
return get_sdk_path() + '/nodejs/node.exe'
elif get_os() == 'mac':
return get_sdk_path() + '/nodejs/node-osx'
else:
return get_sdk_path() + '/nodejs/node-linux64'
def get_kha_path():
if os.path.exists('Kha'):
return 'Kha'
return get_sdk_path() + '/Kha'
def get_haxe_path():
if get_os() == 'win':
return get_kha_path() + '/Tools/haxe/haxe.exe'
elif get_os() == 'mac':
return get_kha_path() + '/Tools/haxe/haxe-osx'
else:
return get_kha_path() + '/Tools/haxe/haxe-linux64'
def get_khamake_path():
return get_kha_path() + '/make'
def krom_paths(bin_ext=''):
sdk_path = get_sdk_path()
if arm.utils.get_os() == 'win':
krom_location = sdk_path + '/Krom'
krom_path = krom_location + '/Krom' + bin_ext + '.exe'
elif arm.utils.get_os() == 'mac':
krom_location = sdk_path + '/Krom/Krom.app/Contents/MacOS'
krom_path = krom_location + '/Krom' + bin_ext
else:
krom_location = sdk_path + '/Krom'
krom_path = krom_location + '/Krom' + bin_ext
return krom_location, krom_path
def fetch_bundled_script_names():
wrd = bpy.data.worlds['Arm']
wrd.arm_bundled_scripts_list.clear()
os.chdir(get_sdk_path() + '/armory/Sources/armory/trait')
for file in glob.glob('*.hx'):
wrd.arm_bundled_scripts_list.add().name = file.rsplit('.')[0]
script_props = {}
script_props_defaults = {}
def fetch_script_props(file):
with open(file) as f:
name = file.rsplit('.')[0]
if 'Sources' in name:
name = name[name.index('Sources')+8:]
if '/' in name:
name = name.replace('/','.')
if '\\' in file:
name = name.replace('\\','.')
script_props[name] = []
script_props_defaults[name] = []
lines = f.read().splitlines()
read_prop = False
for l in lines:
if not read_prop:
read_prop = l.lstrip().startswith('@prop')
if read_prop and 'var ' in l:
p = l.split('var ')[1]
valid_prop = False
# Has type
if ':' in p:
# Fetch default value
if '=' in p:
s = p.split('=')
ps = s[0].split(':')
prop = (ps[0].strip(), ps[1].split(';')[0].strip())
prop_value = s[1].split(';')[0].replace('\'', '').replace('"', '').strip()
valid_prop = True
else:
ps = p.split(':')
prop = (ps[0].strip(), ps[1].split(';')[0].strip())
prop_value = ''
valid_prop = True
# Fetch default value
elif '=' in p:
s = p.split('=')
prop = (s[0].strip(), None)
prop_value = s[1].split(';')[0].replace('\'', '').replace('"', '').strip()
valid_prop = True
# Register prop
if valid_prop:
script_props[name].append(prop)
script_props_defaults[name].append(prop_value)
read_prop = False
def fetch_script_names():
if bpy.data.filepath == "":
return
wrd = bpy.data.worlds['Arm']
# Sources
wrd.arm_scripts_list.clear()
sources_path = get_fp() + '/Sources/' + safestr(wrd.arm_project_package)
if os.path.isdir(sources_path):
os.chdir(sources_path)
# Glob supports recursive search since python 3.5 so it should cover both blender 2.79 and 2.8 integrated python
for file in glob.glob('**/*.hx', recursive=True):
name = file.rsplit('.')[0]
# Replace the path syntax for package syntax so that it can be searchable in blender traits "Class" dropdown
wrd.arm_scripts_list.add().name = name.replace(os.sep, '.')
fetch_script_props(file)
# Canvas
wrd.arm_canvas_list.clear()
canvas_path = get_fp() + '/Bundled/canvas'
if os.path.isdir(canvas_path):
os.chdir(canvas_path)
for file in glob.glob('*.json'):
wrd.arm_canvas_list.add().name = file.rsplit('.')[0]
os.chdir(get_fp())
def fetch_wasm_names():
if bpy.data.filepath == "":
return
wrd = bpy.data.worlds['Arm']
# WASM modules
wrd.arm_wasm_list.clear()
sources_path = get_fp() + '/Bundled'
if os.path.isdir(sources_path):
os.chdir(sources_path)
for file in glob.glob('*.wasm'):
name = file.rsplit('.')[0]
wrd.arm_wasm_list.add().name = name
os.chdir(get_fp())
def fetch_trait_props():
for o in bpy.data.objects:
fetch_prop(o)
for s in bpy.data.scenes:
fetch_prop(s)
def fetch_prop(o):
for item in o.arm_traitlist:
if item.name not in script_props:
continue
props = script_props[item.name]
defaults = script_props_defaults[item.name]
# Remove old props
for i in range(len(item.arm_traitpropslist) - 1, -1, -1):
ip = item.arm_traitpropslist[i]
# if ip.name not in props:
if ip.name.split('(')[0] not in [p[0] for p in props]:
item.arm_traitpropslist.remove(i)
# Add new props
for i in range(0, len(props)):
p = props[i]
found = False
for ip in item.arm_traitpropslist:
if ip.name.replace(')', '').split('(')[0] == p[0]:
found = ip
break
# Not in list
if not found:
prop = item.arm_traitpropslist.add()
prop.name = p[0] + ('(' + p[1] + ')' if p[1] else '')
prop.value = defaults[i]
if found:
prop = item.arm_traitpropslist[found.name]
f = found.name.replace(')', '').split('(')
# Default value added and current value is blank (no override)
if (not found.value and defaults[i]):
prop.value = defaults[i]
# Type has changed, update displayed name
if (len(f) == 1 or (len(f) > 1 and f[1] != p[1])):
prop.name = p[0] + ('(' + p[1] + ')' if p[1] else '')
def fetch_bundled_trait_props():
# Bundled script props
for o in bpy.data.objects:
for t in o.arm_traitlist:
if t.type_prop == 'Bundled Script':
file_path = get_sdk_path() + '/armory/Sources/armory/trait/' + t.name + '.hx'
if os.path.exists(file_path):
fetch_script_props(file_path)
fetch_prop(o)
def update_trait_collections():
for col in bpy.data.collections:
if col.name.startswith('Trait|'):
bpy.data.collections.remove(col)
for o in bpy.data.objects:
for t in o.arm_traitlist:
if 'Trait|' + t.name not in bpy.data.collections:
col = bpy.data.collections.new('Trait|' + t.name)
else:
col = bpy.data.collections['Trait|' + t.name]
col.objects.link(o)
def to_hex(val):
return '#%02x%02x%02x%02x' % (int(val[3] * 255), int(val[0] * 255), int(val[1] * 255), int(val[2] * 255))
def color_to_int(val):
return (int(val[3] * 255) << 24) + (int(val[0] * 255) << 16) + (int(val[1] * 255) << 8) + int(val[2] * 255)
def safesrc(s):
s = safestr(s).replace('.', '_').replace('-', '_').replace(' ', '')
if s[0].isdigit():
s = '_' + s
return s
def safestr(s):
for c in r'[]/\;,><&*:%=+@!#^()|?^':
s = s.replace(c, '_')
return ''.join([i if ord(i) < 128 else '_' for i in s])
def asset_name(bdata):
s = bdata.name
# Append library name if linked
if bdata.library != None:
s += '_' + bdata.library.name
return s
def asset_path(s):
return s[2:] if s[:2] == '//' else s # Remove leading '//'
def extract_filename(s):
return os.path.basename(asset_path(s))
def get_render_resolution(scene):
render = scene.render
scale = render.resolution_percentage / 100
return int(render.resolution_x * scale), int(render.resolution_y * scale)
def get_project_scene_name():
return get_active_scene().name
def get_active_scene():
if not state.is_export:
return bpy.context.scene
else:
wrd = bpy.data.worlds['Arm']
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
return item.arm_project_scene
def logic_editor_space(context_screen=None):
if context_screen == None:
context_screen = bpy.context.screen
if context_screen != None:
areas = context_screen.areas
for area in areas:
for space in area.spaces:
if space.type == 'NODE_EDITOR':
if space.node_tree != None and space.node_tree.bl_idname == 'ArmLogicTreeType':
return space
return None
def voxel_support():
# macos does not support opengl 4.5, needs metal
return state.target != 'html5' and get_os() != 'mac'
def get_cascade_size(rpdat):
cascade_size = int(rpdat.rp_shadowmap_cascade)
# Clamp to 4096 per cascade
if int(rpdat.rp_shadowmap_cascades) > 1 and cascade_size > 4096:
cascade_size = 4096
return cascade_size
def check_saved(self):
if bpy.data.filepath == "":
msg = "Save blend file first"
self.report({"ERROR"}, msg) if self != None else log.print_info(msg)
return False
return True
def check_path(s):
for c in r'[];><&*%=+@!#^()|?^':
if c in s:
return False
for c in s:
if ord(c) > 127:
return False
return True
def check_sdkpath(self):
s = get_sdk_path()
if check_path(s) == False:
msg = "SDK path '{0}' contains special characters. Please move SDK to different path for now.".format(s)
self.report({"ERROR"}, msg) if self != None else log.print_info(msg)
return False
else:
return True
def check_projectpath(self):
s = get_fp()
if check_path(s) == False:
msg = "Project path '{0}' contains special characters, build process may fail.".format(s)
self.report({"ERROR"}, msg) if self != None else log.print_info(msg)
return False
else:
return True
def disp_enabled(target):
rpdat = get_rp()
if rpdat.arm_rp_displacement == 'Tessellation':
return target == 'krom' or target == 'native'
return rpdat.arm_rp_displacement != 'Off'
def is_object_animation_enabled(bobject):
# Checks if animation is present and enabled
if bobject.arm_animation_enabled == False or bobject.type == 'BONE' or bobject.type == 'ARMATURE':
return False
if bobject.animation_data and bobject.animation_data.action:
return True
return False
def is_bone_animation_enabled(bobject):
# Checks if animation is present and enabled for parented armature
if bobject.parent and bobject.parent.type == 'ARMATURE':
if bobject.parent.arm_animation_enabled == False:
return False
# Check for present actions
adata = bobject.parent.animation_data
has_actions = adata != None and adata.action != None
if not has_actions and adata != None:
if hasattr(adata, 'nla_tracks') and adata.nla_tracks != None:
for track in adata.nla_tracks:
if track.strips == None:
continue
for strip in track.strips:
if strip.action == None:
continue
has_actions = True
break
if has_actions:
break
if adata != None and has_actions:
return True
return False
def export_bone_data(bobject):
return bobject.find_armature() and is_bone_animation_enabled(bobject) and get_rp().arm_skin == 'On'
def kode_studio_mklink_win(sdk_path, ide_path):
# Fight long-path issues on Windows
if not os.path.exists(ide_path + '/resources/app/kodeExtensions/kha/Kha'):
source = ide_path + '/resources/app/kodeExtensions/kha/Kha'
target = sdk_path + '/Kha'
subprocess.check_call('mklink /J "%s" "%s"' % (source, target), shell=True)
if not os.path.exists(ide_path + '/resources/app/kodeExtensions/krom/Krom'):
source = ide_path + '/resources/app/kodeExtensions/krom/Krom'
target = sdk_path + '/Krom'
subprocess.check_call('mklink /J "%s" "%s"' % (source, target), shell=True)
def kode_studio_mklink_linux(sdk_path, ide_path):
if not os.path.exists(ide_path + '/resources/app/kodeExtensions/kha/Kha'):
source = ide_path + '/resources/app/kodeExtensions/kha/Kha'
target = sdk_path + '/Kha'
subprocess.check_call('ln -s "%s" "%s"' % (target, source), shell=True)
if not os.path.exists(ide_path + '/resources/app/kodeExtensions/krom/Krom'):
source = ide_path + '/resources/app/kodeExtensions/krom/Krom'
target = sdk_path + '/Krom'
subprocess.check_call('ln -s "%s" "%s"' % (target, source), shell=True)
def kode_studio_mklink_mac(sdk_path, ide_path):
if not os.path.exists(ide_path + '/Contents/Resources/app/kodeExtensions/kha/Kha'):
source = ide_path + '/Contents/Resources/app/kodeExtensions/kha/Kha'
target = sdk_path + '/Kha'
subprocess.check_call('ln -fs "%s" "%s"' % (target, source), shell=True)
if not os.path.exists(ide_path + '/Contents/Resources/app/kodeExtensions/krom/Krom'):
source = ide_path + '/Contents/Resources/app/kodeExtensions/krom/Krom'
target = sdk_path + '/Krom'
subprocess.check_call('ln -fs "%s" "%s"' % (target, source), shell=True)
def get_kode_path():
p = get_ide_path()
if p == '':
if get_os() == 'win':
p = get_sdk_path() + '/win32'
elif get_os() == 'mac':
p = get_sdk_path() + '/KodeStudio.app'
else:
p = get_sdk_path() + '/linux64'
return p
def get_kode_bin():
p = get_kode_path()
if get_os() == 'win':
return p + '/Kode Studio.exe'
elif get_os() == 'mac':
return p + '/Contents/MacOS/Electron'
else:
return p + '/kodestudio'
def get_vscode_bin():
p = get_kode_path()
if get_os() == 'win':
return p + '/Code.exe'
elif get_os() == 'mac':
return p + '/Contents/MacOS/Electron'
else:
return p + '/code'
def kode_studio(hx_path=None):
project_path = arm.utils.get_fp()
kode_bin = get_kode_bin()
if not os.path.exists(kode_bin):
kode_bin = get_vscode_bin()
if os.path.exists(kode_bin) and get_code_editor() == 'kodestudio':
if arm.utils.get_os() == 'win':
# kode_studio_mklink_win(get_sdk_path(), get_kode_path())
args = [kode_bin, arm.utils.get_fp()]
if hx_path != None:
args.append(hx_path)
subprocess.Popen(args)
elif arm.utils.get_os() == 'mac':
# kode_studio_mklink_mac(get_sdk_path(), get_kode_path())
args = ['"' + kode_bin + '"' + ' "' + arm.utils.get_fp() + '"']
if hx_path != None:
args[0] += ' "' + hx_path + '"'
subprocess.Popen(args, shell=True)
else:
# kode_studio_mklink_linux(get_sdk_path(), get_kode_path())
args = [kode_bin, arm.utils.get_fp()]
if hx_path != None:
args.append(hx_path)
subprocess.Popen(args)
else:
fp = hx_path if hx_path != None else arm.utils.get_fp()
webbrowser.open('file://' + fp)
def def_strings_to_array(strdefs):
defs = strdefs.split('_')
defs = defs[1:]
defs = ['_' + d for d in defs] # Restore _
return defs
def get_kha_target(target_name): # TODO: remove
if target_name == 'macos-hl':
return 'osx-hl'
elif target_name.startswith('krom'): # krom-windows
return 'krom'
elif target_name == 'custom':
return ''
return target_name
def target_to_gapi(arm_project_target):
# TODO: align target names
if arm_project_target == 'krom':
return 'arm_gapi_' + arm.utils.get_os()
elif arm_project_target == 'krom-windows':
return 'arm_gapi_win'
elif arm_project_target == 'windows-hl':
return 'arm_gapi_win'
elif arm_project_target == 'krom-linux':
return 'arm_gapi_linux'
elif arm_project_target == 'linux-hl':
return 'arm_gapi_linux'
elif arm_project_target == 'krom-macos':
return 'arm_gapi_mac'
elif arm_project_target == 'macos-hl':
return 'arm_gapi_mac'
elif arm_project_target == 'android-native-hl':
return 'arm_gapi_android'
elif arm_project_target == 'ios-hl':
return 'arm_gapi_ios'
elif arm_project_target == 'node':
return 'arm_gapi_html5'
else: # html5, custom
return 'arm_gapi_' + arm_project_target
def check_default_props():
wrd = bpy.data.worlds['Arm']
if len(wrd.arm_rplist) == 0:
wrd.arm_rplist.add()
wrd.arm_rplist_index = 0
if wrd.arm_project_name == '':
# Take blend file name
wrd.arm_project_name = arm.utils.blend_name()
def register(local_sdk=False):
global use_local_sdk
use_local_sdk = local_sdk
def unregister():
pass
| 35.808321
| 127
| 0.606332
|
119df43b10ab313670f0d71bdab1a4f24794532a
| 3,488
|
py
|
Python
|
update.py
|
tedder/ffmpeg
|
852756e95100f65e702d791c689d141fe4f5005e
|
[
"Apache-2.0"
] | null | null | null |
update.py
|
tedder/ffmpeg
|
852756e95100f65e702d791c689d141fe4f5005e
|
[
"Apache-2.0"
] | null | null | null |
update.py
|
tedder/ffmpeg
|
852756e95100f65e702d791c689d141fe4f5005e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Get latest release from ffmpeg.org
import os
import sys
import re
import urllib2
from distutils.version import StrictVersion
MIN_VERSION = '2.8'
VARIANTS = ['ubuntu', 'alpine', 'centos', 'scratch', 'vaapi']
FFMPEG_RELEASES = 'https://ffmpeg.org/releases/'
travis = []
response = urllib2.urlopen(FFMPEG_RELEASES)
ffmpeg_releases = response.read()
parse_re = re.compile('ffmpeg-([.0-9]+).tar.bz2.asc<\/a>\s+')
all_versions = parse_re.findall(ffmpeg_releases)
all_versions.sort(key=StrictVersion, reverse=True)
version, all_versions = all_versions[0], all_versions[1:]
last = version.split('.')
keep_version = ['snapshot']
keep_version.append(version)
for cur in all_versions:
if cur < MIN_VERSION:
break
tmp = cur.split('.')
# Check Minor
if len(tmp) >= 2 and tmp[1].isdigit() and tmp[1] < last[1]:
keep_version.append(cur)
last = tmp
# Check Major
elif len(tmp) > 1 and tmp[0].isdigit() and tmp[0] < last[0]:
keep_version.append(cur)
last = tmp
for version in keep_version:
for variant in VARIANTS:
if version == 'snapshot':
dockerfile = 'docker-images/%s/%s/Dockerfile' % (
version, variant)
travis.append(' - VERSION=%s VARIANT=%s' % (version, variant))
else:
dockerfile = 'docker-images/%s/%s/Dockerfile' % (
version[0:3], variant)
travis.append(' - VERSION=%s VARIANT=%s' % (version[0:3], variant))
with open('templates/Dockerfile-env', 'r') as tmpfile:
env_content = tmpfile.read()
with open('templates/Dockerfile-template.' + variant, 'r') as tmpfile:
template = tmpfile.read()
with open('templates/Dockerfile-run', 'r') as tmpfile:
run_content = tmpfile.read()
env_content = env_content.replace('%%FFMPEG_VERSION%%', version)
docker_content = template.replace('%%ENV%%', env_content)
docker_content = docker_content.replace('%%RUN%%', run_content)
# OpenJpeg 2.1 is not supported in 2.8
if version[0:3] == '2.8':
docker_content = docker_content.replace('--enable-libopenjpeg', '')
docker_content = docker_content.replace('--enable-libkvazaar', '')
if (version != 'snapshot' and version[0:3] != '4.0') or variant == 'centos':
docker_content = re.sub(r"--enable-libaom [^\\]*", "", docker_content)
if (version == 'snapshot' or version[0] >= '3') and variant == 'vaapi':
docker_content = docker_content.replace('--disable-ffplay', '--disable-ffplay \\\n --enable-vaapi')
# FFmpeg 3.2 and earlier don't compile correctly on Ubuntu 18.04 due to openssl issues
if variant == 'vaapi' and (version[0] < '3' or (version[0] == '3' and version[2] < '3')):
docker_content = docker_content.replace('ubuntu:18.04', 'ubuntu:16.04')
docker_content = docker_content.replace('libva-drm2', 'libva-drm1')
docker_content = docker_content.replace('libva2', 'libva1')
d = os.path.dirname(dockerfile)
if not os.path.exists(d):
os.makedirs(d)
with open(dockerfile, 'w') as dfile:
dfile.write(docker_content)
with open('templates/travis.template', 'r') as tmpfile:
template = tmpfile.read()
travis = template.replace('%%VERSIONS%%', '\n'.join(travis))
with open('.travis.yml', 'w') as travisfile:
travisfile.write(travis)
| 37.106383
| 118
| 0.622133
|
f558917101aa855f947f41a8ff870dbdbc3c5f30
| 2,618
|
py
|
Python
|
download_futures_equities.py
|
westonplatter/ibdatafetcher
|
ba8f6d873cffeeb4ffc24cdccb4eed401c4a9b49
|
[
"BSD-3-Clause"
] | 10
|
2021-03-14T23:00:35.000Z
|
2021-11-19T22:46:48.000Z
|
download_futures_equities.py
|
westonplatter/ibdatafetcher
|
ba8f6d873cffeeb4ffc24cdccb4eed401c4a9b49
|
[
"BSD-3-Clause"
] | 1
|
2021-06-28T15:50:05.000Z
|
2021-06-28T15:50:05.000Z
|
download_futures_equities.py
|
westonplatter/ibdatafetcher
|
ba8f6d873cffeeb4ffc24cdccb4eed401c4a9b49
|
[
"BSD-3-Clause"
] | 5
|
2020-12-03T10:06:39.000Z
|
2022-03-08T17:23:36.000Z
|
from datetime import date
from dateutil.relativedelta import relativedelta
from ib_insync import Future
from loguru import logger
from ibdatafetcher.security_master import InMemSecuritiesMaster
from ibdatafetcher.spreads import Exchange
from ibdatafetcher.ib_client import gen_ib_client, fetch_data
from ibdatafetcher.models import (
Quote,
gen_engine,
init_db,
db_insert_df_conflict_on_do_nothing,
transform_rename_df_columns,
)
from ibdatafetcher.spreads import INDIVIDUAL_CONTRACT_DATA_POINTS
def save_df(sm, contract, value_type, df):
if df is None:
return
# transform
transform_rename_df_columns(df)
# manually fill in data
df["symbol"] = sm.get_symbol(contract)
df["local_symbol"] = sm.get_local_symbol(contract)
df["con_id"] = contract.conId
df["value_type"] = value_type
df["rth"] = True
# load / insert data in DB
db_insert_df_conflict_on_do_nothing(engine, df, Quote.__tablename__)
# sleep so we don't overload IB and get throttled
ib.sleep(2.02)
def execute_fetch(sm, contracts, yyyymmdd, value_types):
for contract in contracts:
for value_type in value_types:
local_symbol = sec_master.get_local_symbol(contract)
logger.debug(f"{yyyymmdd} - {local_symbol} - {value_type} - fetch")
df = fetch_data(
ib, sec_master, engine, contract, yyyymmdd, value_type, rth=True
)
save_df(sm, contract, value_type, df)
def gen_contract(symbol, exp) -> Future:
return Future(
symbol=symbol,
lastTradeDateOrContractMonth=exp,
exchange=Exchange.GLOBEX.value,
)
def init_contracts(symbols, exps):
contracts = []
for symbol in symbols:
for ex in expirations:
contract = gen_contract(symbol, ex)
contracts.append(contract)
return contracts
def register_contracts_with_sec_master(sm, contracts):
for x in contracts:
sm.set_ref(x.conId, x.localSymbol)
ib = gen_ib_client()
engine = gen_engine()
init_db(engine)
sec_master = InMemSecuritiesMaster()
if __name__ == "__main__":
last_x_days = 20
symbols = ["/MES", "/M2K", "/MNQ"]
expirations = ["202106", "202103"]
contracts = init_contracts(symbols, expirations)
contracts = ib.qualifyContracts(*contracts)
register_contracts_with_sec_master(sec_master, contracts)
for i in range(1, last_x_days):
ago = date.today() - relativedelta(days=i)
yyyymmdd = ago.strftime("%Y%m%d")
execute_fetch(sec_master, contracts, yyyymmdd, INDIVIDUAL_CONTRACT_DATA_POINTS)
| 28.769231
| 87
| 0.699389
|
30e23fd60832ebf9257756d4d37c8dcc1572439c
| 26,276
|
py
|
Python
|
com.geonode.chloropleth/chloropleth/ElementarySchool/migrations/0001_initial.py
|
coderfromanotherearth/School-GIS
|
5675972f8dca8f28172da0cdfc3824a87b33d831
|
[
"MIT"
] | null | null | null |
com.geonode.chloropleth/chloropleth/ElementarySchool/migrations/0001_initial.py
|
coderfromanotherearth/School-GIS
|
5675972f8dca8f28172da0cdfc3824a87b33d831
|
[
"MIT"
] | 1
|
2020-06-05T22:36:02.000Z
|
2020-06-05T22:36:02.000Z
|
com.geonode.chloropleth/chloropleth/ElementarySchool/migrations/0001_initial.py
|
joelabrahamkeerickal/School-GIS
|
5675972f8dca8f28172da0cdfc3824a87b33d831
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-24 09:32
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='district_boundaries',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('district_n', models.CharField(blank=True, max_length=100, null=True)),
('district_c', models.CharField(blank=True, max_length=3, null=True)),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(blank=True, null=True, srid=4326)),
],
),
migrations.CreateModel(
name='SchoolInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('txtbkyear', models.CharField(blank=True, max_length=200, null=True)),
('workdays_hsec', models.CharField(blank=True, max_length=200, null=True)),
('spltrg_place', models.CharField(blank=True, max_length=200, null=True)),
('aad_f', models.CharField(blank=True, max_length=200, null=True)),
('acstartmnth', models.CharField(blank=True, max_length=200, null=True)),
('othrooms', models.CharField(blank=True, max_length=200, null=True)),
('subexp_f', models.CharField(blank=True, max_length=200, null=True)),
('cce_yn', models.CharField(blank=True, max_length=200, null=True)),
('toiletwater_b', models.CharField(blank=True, max_length=200, null=True)),
('edu_block', models.CharField(blank=True, max_length=200, null=True)),
('up_comp_set_fre_textbookrec', models.CharField(blank=True, max_length=200, null=True)),
('toiletwater_g', models.CharField(blank=True, max_length=200, null=True)),
('clgoods', models.CharField(blank=True, max_length=200, null=True)),
('subexp_m', models.CharField(blank=True, max_length=200, null=True)),
('par_in_pos_sec', models.CharField(blank=True, max_length=200, null=True)),
('clsunderconst', models.CharField(blank=True, max_length=200, null=True)),
('handwash_yn', models.CharField(blank=True, max_length=200, null=True)),
('totcomp_func', models.CharField(blank=True, max_length=200, null=True)),
('acname', models.CharField(blank=True, max_length=200, null=True)),
('yearrecog', models.CharField(blank=True, max_length=200, null=True)),
('schmgths', models.CharField(blank=True, max_length=200, null=True)),
('smg_e', models.CharField(blank=True, max_length=200, null=True)),
('meds1', models.CharField(blank=True, max_length=200, null=True)),
('latsec', models.CharField(blank=True, max_length=200, null=True)),
('meds4', models.CharField(blank=True, max_length=200, null=True)),
('pcr_shared', models.CharField(blank=True, max_length=200, null=True)),
('sec_resi_school_yn', models.CharField(blank=True, max_length=200, null=True)),
('p_playmat_av', models.CharField(blank=True, max_length=200, null=True)),
('toiletg_func', models.CharField(blank=True, max_length=200, null=True)),
('spltrg_totev', models.CharField(blank=True, max_length=200, null=True)),
('londeg', models.CharField(blank=True, max_length=200, null=True)),
('smg_r', models.CharField(blank=True, max_length=200, null=True)),
('spltrg_py_enrolled_b', models.CharField(blank=True, max_length=200, null=True)),
('txtbkmnth', models.CharField(blank=True, max_length=200, null=True)),
('sch_category', models.CharField(blank=True, max_length=200, null=True)),
('schhrschild_upr', models.CharField(blank=True, max_length=200, null=True)),
('clmajors', models.CharField(blank=True, max_length=200, null=True)),
('clminorhs', models.CharField(blank=True, max_length=200, null=True)),
('urinals_b', models.CharField(blank=True, max_length=200, null=True)),
('toilet_g', models.CharField(blank=True, max_length=200, null=True)),
('workdays_sec', models.CharField(blank=True, max_length=200, null=True)),
('smc_yn', models.CharField(blank=True, max_length=200, null=True)),
('boardhsec', models.CharField(blank=True, max_length=200, null=True)),
('ebmc_m', models.CharField(blank=True, max_length=200, null=True)),
('lang1', models.CharField(blank=True, max_length=200, null=True)),
('spltrg_material_yn', models.CharField(blank=True, max_length=200, null=True)),
('lang3', models.CharField(blank=True, max_length=200, null=True)),
('lang2', models.CharField(blank=True, max_length=200, null=True)),
('workdays_upr', models.CharField(blank=True, max_length=200, null=True)),
('furntch', models.CharField(blank=True, max_length=200, null=True)),
('spltrg_by', models.CharField(blank=True, max_length=200, null=True)),
('clgood', models.CharField(blank=True, max_length=200, null=True)),
('clmajor_tnt', models.CharField(blank=True, max_length=200, null=True)),
('bookbank_yn', models.CharField(blank=True, max_length=200, null=True)),
('bankname', models.CharField(blank=True, max_length=200, null=True)),
('reg_in_pos_hsec', models.CharField(blank=True, max_length=200, null=True)),
('clgood_tnt', models.CharField(blank=True, max_length=200, null=True)),
('water', models.CharField(blank=True, max_length=200, null=True)),
('toiletb_func', models.CharField(blank=True, max_length=200, null=True)),
('schhrschild_pr', models.CharField(blank=True, max_length=200, null=True)),
('handrails', models.CharField(blank=True, max_length=200, null=True)),
('anganwadi_yn', models.CharField(blank=True, max_length=200, null=True)),
('medh4', models.CharField(blank=True, max_length=200, null=True)),
('medh3', models.CharField(blank=True, max_length=200, null=True)),
('medh2', models.CharField(blank=True, max_length=200, null=True)),
('medh1', models.CharField(blank=True, max_length=200, null=True)),
('spltrg_cy_provided_b', models.CharField(blank=True, max_length=200, null=True)),
('totcls11', models.CharField(blank=True, max_length=200, null=True)),
('totcls12', models.CharField(blank=True, max_length=200, null=True)),
('spltrg_py_provided_b', models.CharField(blank=True, max_length=200, null=True)),
('assembly_constituency', models.CharField(blank=True, max_length=200, null=True)),
('spltrg_cy_provided_g', models.CharField(blank=True, max_length=200, null=True)),
('spltrg_py_provided_g', models.CharField(blank=True, max_length=200, null=True)),
('ac_yn', models.CharField(blank=True, max_length=200, null=True)),
('relminority_type', models.CharField(blank=True, max_length=200, null=True)),
('municipality', models.CharField(blank=True, max_length=200, null=True)),
('conti_e', models.CharField(blank=True, max_length=200, null=True)),
('toiletb', models.CharField(blank=True, max_length=200, null=True)),
('bankac_yn', models.CharField(blank=True, max_length=200, null=True)),
('toiletd', models.CharField(blank=True, max_length=200, null=True)),
('estdyear', models.CharField(blank=True, max_length=200, null=True)),
('conti_r', models.CharField(blank=True, max_length=200, null=True)),
('ptameeting', models.CharField(blank=True, max_length=200, null=True)),
('statname', models.CharField(blank=True, max_length=200, null=True)),
('txtbkrecd_yn', models.CharField(blank=True, max_length=200, null=True)),
('lowclass', models.CharField(blank=True, max_length=200, null=True)),
('website', models.CharField(blank=True, max_length=200, null=True)),
('scst_f', models.CharField(blank=True, max_length=200, null=True)),
('clmajor', models.CharField(blank=True, max_length=200, null=True)),
('clsuconst12', models.CharField(blank=True, max_length=200, null=True)),
('furn_yn10', models.CharField(blank=True, max_length=200, null=True)),
('clsuconst10', models.CharField(blank=True, max_length=200, null=True)),
('clsuconst11', models.CharField(blank=True, max_length=200, null=True)),
('scst_m', models.CharField(blank=True, max_length=200, null=True)),
('clminor_kuc', models.CharField(blank=True, max_length=200, null=True)),
('smcbankac_yn', models.CharField(blank=True, max_length=200, null=True)),
('spltrg_py_enrolled_g', models.CharField(blank=True, max_length=200, null=True)),
('smcmem_f', models.CharField(blank=True, max_length=200, null=True)),
('clmajorhs', models.CharField(blank=True, max_length=200, null=True)),
('village', models.CharField(blank=True, max_length=200, null=True)),
('pri_resi_school_yn', models.CharField(blank=True, max_length=200, null=True)),
('rampsneeded_yn', models.CharField(blank=True, max_length=200, null=True)),
('smcmem_m', models.CharField(blank=True, max_length=200, null=True)),
('anganwadi_tch', models.CharField(blank=True, max_length=200, null=True)),
('visitscrc', models.CharField(blank=True, max_length=200, null=True)),
('sip_yn', models.CharField(blank=True, max_length=200, null=True)),
('deo_m', models.CharField(blank=True, max_length=200, null=True)),
('ramps_yn', models.CharField(blank=True, max_length=200, null=True)),
('osrc_e', models.CharField(blank=True, max_length=200, null=True)),
('totcls10', models.CharField(blank=True, max_length=200, null=True)),
('ifsc', models.CharField(blank=True, max_length=200, null=True)),
('cityname', models.CharField(blank=True, max_length=200, null=True)),
('campusplan_yn', models.CharField(blank=True, max_length=200, null=True)),
('distu', models.CharField(blank=True, max_length=200, null=True)),
('block_name', models.CharField(blank=True, max_length=200, null=True)),
('aidrecd', models.CharField(blank=True, max_length=200, null=True)),
('dists', models.CharField(blank=True, max_length=200, null=True)),
('ppstudent', models.CharField(blank=True, max_length=200, null=True)),
('up_palymat_av', models.CharField(blank=True, max_length=200, null=True)),
('spltrg_evtrnd', models.CharField(blank=True, max_length=200, null=True)),
('p_tle_av', models.CharField(blank=True, max_length=200, null=True)),
('osrc_r', models.CharField(blank=True, max_length=200, null=True)),
('medchk_yn', models.CharField(blank=True, max_length=200, null=True)),
('relminority_yn', models.CharField(blank=True, max_length=200, null=True)),
('clminors', models.CharField(blank=True, max_length=200, null=True)),
('reg_tch_san_hsec', models.CharField(blank=True, max_length=200, null=True)),
('smsparents_m', models.CharField(blank=True, max_length=200, null=True)),
('totcls9', models.CharField(blank=True, max_length=200, null=True)),
('schmgt', models.CharField(blank=True, max_length=200, null=True)),
('smsparents_f', models.CharField(blank=True, max_length=200, null=True)),
('meds2', models.CharField(blank=True, max_length=200, null=True)),
('lonmin', models.CharField(blank=True, max_length=200, null=True)),
('clgood_ppu', models.CharField(blank=True, max_length=200, null=True)),
('up_tle_av', models.CharField(blank=True, max_length=200, null=True)),
('meds3', models.CharField(blank=True, max_length=200, null=True)),
('ebmc_f', models.CharField(blank=True, max_length=200, null=True)),
('schtypes', models.CharField(blank=True, max_length=200, null=True)),
('yearrecogs', models.CharField(blank=True, max_length=200, null=True)),
('deo_f', models.CharField(blank=True, max_length=200, null=True)),
('shift_yn', models.CharField(blank=True, max_length=200, null=True)),
('smschildrec_yn', models.CharField(blank=True, max_length=200, null=True)),
('smcbank', models.CharField(blank=True, max_length=200, null=True)),
('land4pground_yn', models.CharField(blank=True, max_length=200, null=True)),
('electric_yn', models.CharField(blank=True, max_length=200, null=True)),
('cwsnsch_yn', models.CharField(blank=True, max_length=200, null=True)),
('ifsccode', models.CharField(blank=True, max_length=200, null=True)),
('reg_tch_san_sec', models.CharField(blank=True, max_length=200, null=True)),
('anganwadi_stu', models.CharField(blank=True, max_length=200, null=True)),
('schhrstch_upr', models.CharField(blank=True, max_length=200, null=True)),
('spltrg_type', models.CharField(blank=True, max_length=200, null=True)),
('cal_yn', models.CharField(blank=True, max_length=200, null=True)),
('pta_yn', models.CharField(blank=True, max_length=200, null=True)),
('ccesec_yn', models.CharField(blank=True, max_length=200, null=True)),
('ac_year', models.CharField(blank=True, max_length=200, null=True)),
('smdc_yn', models.CharField(blank=True, max_length=200, null=True)),
('custer_name', models.CharField(blank=True, max_length=200, null=True)),
('pground_yn', models.CharField(blank=True, max_length=200, null=True)),
('wsec25p_enrolled', models.CharField(blank=True, max_length=200, null=True)),
('medinstr4', models.CharField(blank=True, max_length=200, null=True)),
('noinspect', models.CharField(blank=True, max_length=200, null=True)),
('reg_in_pos_sec', models.CharField(blank=True, max_length=200, null=True)),
('medinstr1', models.CharField(blank=True, max_length=200, null=True)),
('medinstr3', models.CharField(blank=True, max_length=200, null=True)),
('medinstr2', models.CharField(blank=True, max_length=200, null=True)),
('schhrstch_pr', models.CharField(blank=True, max_length=200, null=True)),
('schname', models.CharField(blank=True, max_length=200, null=True)),
('parents_m', models.CharField(blank=True, max_length=200, null=True)),
('par_in_pos_up', models.CharField(blank=True, max_length=200, null=True)),
('furn_yn9', models.CharField(blank=True, max_length=200, null=True)),
('visitsrtcwsn', models.CharField(blank=True, max_length=200, null=True)),
('reg_in_pos_up', models.CharField(blank=True, max_length=200, null=True)),
('parents_f', models.CharField(blank=True, max_length=200, null=True)),
('spltrg_cy_enrolled_b', models.CharField(blank=True, max_length=200, null=True)),
('clmajor_ppu', models.CharField(blank=True, max_length=200, null=True)),
('hmroom_yn', models.CharField(blank=True, max_length=200, null=True)),
('smcacname', models.CharField(blank=True, max_length=200, null=True)),
('ccehsec_yn', models.CharField(blank=True, max_length=200, null=True)),
('hostelboys', models.CharField(blank=True, max_length=200, null=True)),
('par_in_pos_p', models.CharField(blank=True, max_length=200, null=True)),
('tch_m', models.CharField(blank=True, max_length=200, null=True)),
('bankacno', models.CharField(blank=True, max_length=200, null=True)),
('sbc_yn', models.CharField(blank=True, max_length=200, null=True)),
('latdeg', models.CharField(blank=True, max_length=200, null=True)),
('yearrecogs_1', models.CharField(blank=True, max_length=200, null=True)),
('smdcmeeting', models.CharField(blank=True, max_length=200, null=True)),
('ahm_f', models.CharField(blank=True, max_length=200, null=True)),
('tlm_e', models.CharField(blank=True, max_length=200, null=True)),
('ahm_m', models.CharField(blank=True, max_length=200, null=True)),
('smcmeetings', models.CharField(blank=True, max_length=200, null=True)),
('approachbyroad', models.CharField(blank=True, max_length=200, null=True)),
('panchayat', models.CharField(blank=True, max_length=200, null=True)),
('tot_m', models.CharField(blank=True, max_length=200, null=True)),
('smcsmdc1_yn', models.CharField(blank=True, max_length=200, null=True)),
('spltrg_cy_enrolled_g', models.CharField(blank=True, max_length=200, null=True)),
('schtypehs', models.CharField(blank=True, max_length=200, null=True)),
('clminor', models.CharField(blank=True, max_length=200, null=True)),
('tot_f', models.CharField(blank=True, max_length=200, null=True)),
('furn_yn11', models.CharField(blank=True, max_length=200, null=True)),
('cp_m', models.CharField(blank=True, max_length=200, null=True)),
('hostelb_yn', models.CharField(blank=True, max_length=200, null=True)),
('lonsec', models.CharField(blank=True, max_length=200, null=True)),
('smcsdp_yn', models.CharField(blank=True, max_length=200, null=True)),
('up_resi_school_yn', models.CharField(blank=True, max_length=200, null=True)),
('par_in_pos_hsec', models.CharField(blank=True, max_length=200, null=True)),
('cp_f', models.CharField(blank=True, max_length=200, null=True)),
('clminor_tnt', models.CharField(blank=True, max_length=200, null=True)),
('residential_type', models.CharField(blank=True, max_length=200, null=True)),
('reg_in_pos_p', models.CharField(blank=True, max_length=200, null=True)),
('ppsec_yn', models.CharField(blank=True, max_length=200, null=True)),
('yearupgrdh', models.CharField(blank=True, max_length=200, null=True)),
('smcnomlocal_f', models.CharField(blank=True, max_length=200, null=True)),
('bldstatus', models.CharField(blank=True, max_length=200, null=True)),
('block_name_1', models.CharField(blank=True, max_length=200, null=True)),
('workdays_pr', models.CharField(blank=True, max_length=200, null=True)),
('smcnomlocal_m', models.CharField(blank=True, max_length=200, null=True)),
('schcd', models.CharField(blank=True, max_length=200, null=True)),
('upgrad_sec', models.CharField(blank=True, max_length=200, null=True)),
('p_comp_set_fre_textbookrec', models.CharField(blank=True, max_length=200, null=True)),
('reg_tch_san_p', models.CharField(blank=True, max_length=200, null=True)),
('clrooms', models.CharField(blank=True, max_length=200, null=True)),
('clsuconst9', models.CharField(blank=True, max_length=200, null=True)),
('librarian_yn', models.CharField(blank=True, max_length=200, null=True)),
('bankbranch', models.CharField(blank=True, max_length=200, null=True)),
('clmajor_kuc', models.CharField(blank=True, max_length=200, null=True)),
('hm_f', models.CharField(blank=True, max_length=200, null=True)),
('prevoccourse_yn', models.CharField(blank=True, max_length=200, null=True)),
('eduvocguide_yn', models.CharField(blank=True, max_length=200, null=True)),
('water_func_yn', models.CharField(blank=True, max_length=200, null=True)),
('spltrng_yn', models.CharField(blank=True, max_length=200, null=True)),
('hm_m', models.CharField(blank=True, max_length=200, null=True)),
('wsec25p_applied', models.CharField(blank=True, max_length=200, null=True)),
('stuadmitted', models.CharField(blank=True, max_length=200, null=True)),
('reg_tch_san_up', models.CharField(blank=True, max_length=200, null=True)),
('newspaper_yn', models.CharField(blank=True, max_length=200, null=True)),
('schtype', models.CharField(blank=True, max_length=200, null=True)),
('mtongue_yn', models.CharField(blank=True, max_length=200, null=True)),
('upgrad_up', models.CharField(blank=True, max_length=200, null=True)),
('highclass', models.CharField(blank=True, max_length=200, null=True)),
('bookinlib', models.CharField(blank=True, max_length=200, null=True)),
('hsec_resi_school_yn', models.CharField(blank=True, max_length=200, null=True)),
('schmgts', models.CharField(blank=True, max_length=200, null=True)),
('rururb', models.CharField(blank=True, max_length=200, null=True)),
('latmin', models.CharField(blank=True, max_length=200, null=True)),
('hostelgirls', models.CharField(blank=True, max_length=200, null=True)),
('habitation_name', models.CharField(blank=True, max_length=200, null=True)),
('clgoodhs', models.CharField(blank=True, max_length=200, null=True)),
('clgood_kuc', models.CharField(blank=True, max_length=200, null=True)),
('instr_rte_up', models.CharField(blank=True, max_length=200, null=True)),
('visitsbrc', models.CharField(blank=True, max_length=200, null=True)),
('boardsec', models.CharField(blank=True, max_length=200, null=True)),
('aad_m', models.CharField(blank=True, max_length=200, null=True)),
('ppteacher', models.CharField(blank=True, max_length=200, null=True)),
('women_m', models.CharField(blank=True, max_length=200, null=True)),
('tch_f', models.CharField(blank=True, max_length=200, null=True)),
('tlm_r', models.CharField(blank=True, max_length=200, null=True)),
('smcacno', models.CharField(blank=True, max_length=200, null=True)),
('smcbankbranch', models.CharField(blank=True, max_length=200, null=True)),
('land4cls_yn', models.CharField(blank=True, max_length=200, null=True)),
('city', models.CharField(blank=True, max_length=200, null=True)),
('furnstu', models.CharField(blank=True, max_length=200, null=True)),
('distname', models.CharField(blank=True, max_length=200, null=True)),
('bndrywall', models.CharField(blank=True, max_length=200, null=True)),
('furn_yn12', models.CharField(blank=True, max_length=200, null=True)),
('hostelg_yn', models.CharField(blank=True, max_length=200, null=True)),
('local_m', models.CharField(blank=True, max_length=200, null=True)),
('pcr_maintained', models.CharField(blank=True, max_length=200, null=True)),
('computer', models.CharField(blank=True, max_length=200, null=True)),
('clminor_ppu', models.CharField(blank=True, max_length=200, null=True)),
('local_f', models.CharField(blank=True, max_length=200, null=True)),
('urinals_g', models.CharField(blank=True, max_length=200, null=True)),
],
options={
'db_table': 'school_info',
},
),
migrations.CreateModel(
name='state_maharashtra',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('st_nm', models.CharField(max_length=75)),
('st_cen_cd', models.CharField(max_length=2)),
('dt_cen_cd', models.CharField(max_length=2)),
('district', models.CharField(max_length=50)),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
],
),
migrations.CreateModel(
name='taluka_boundaries',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('district_n', models.CharField(max_length=100)),
('district_c', models.CharField(max_length=3)),
('taluka_nam', models.CharField(max_length=50)),
('taluka_cod', models.CharField(max_length=5)),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
],
),
]
| 80.109756
| 114
| 0.613373
|
df268b6cb8c0f2c3ef1ff5dff8f2e241e9c4dbb4
| 6,642
|
py
|
Python
|
mnist_stdp_multiple_exploration_v19.py
|
nikhil-garg/VDSP_ocl
|
906867f8cd8a899a1ce309c5ec843fa1ce865373
|
[
"MIT"
] | null | null | null |
mnist_stdp_multiple_exploration_v19.py
|
nikhil-garg/VDSP_ocl
|
906867f8cd8a899a1ce309c5ec843fa1ce865373
|
[
"MIT"
] | null | null | null |
mnist_stdp_multiple_exploration_v19.py
|
nikhil-garg/VDSP_ocl
|
906867f8cd8a899a1ce309c5ec843fa1ce865373
|
[
"MIT"
] | 1
|
2021-03-17T20:04:08.000Z
|
2021-03-17T20:04:08.000Z
|
import itertools
import random
import logging
import numpy as np
import matplotlib.pyplot as plt
import os
from mnist_stdp_multiple_baseline import *
from utilis import *
from args_mnist import args as my_args
# from ax import optimize
import pandas as pd
from itertools import product
import time
if __name__ == '__main__':
args = my_args()
print(args.__dict__)
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
# Fix the seed of all random number generator
seed = 50
random.seed(seed)
np.random.seed(seed)
pwd = os.getcwd()
df = pd.DataFrame({
"amp_neuron":[],
"input_nbr":[],
"tau_in" :[],
"tau_out":[],
"alpha_p":[],
"alpha_n":[],
"beta_p":[],
"beta_n":[],
"tau_pre":[],
"tau_post":[],
"iterations":[],
"presentation_time":[],
"pause_time":[],
"dt":[],
"n_neurons":[],
"inhibition_time":[],
"tau_ref_in":[],
"tau_ref_out":[],
"inc_n":[],
"tau_n":[],
"synapse_layer_1":[],
"gain_in":[],
"gain_out":[],
"accuracy":[],
"accuracy_2":[]
})
if args.log_file_path is None:
log_dir = pwd+'/log_dir/'
else :
log_dir = args.log_file_path
df.to_csv(log_dir+'test.csv', index=False)
parameters = dict(
amp_neuron=[0.005]
,input_nbr=[60000]
,tau_in = [0.03]
,tau_out = [0.03]
, alpha_p= [0.7,0.8,0.9]
, alpha_n= [0.01,0.005,0.02]
, beta_p= [1.4,0.8,1.2]
, beta_n= [1,0.8,1.2]
, tau_pre= [0.06]
, tau_post= [0.08]
, iterations=[1]
, presentation_time = [0.20]
, pause_time = [0.1]
, dt = [0.005]
, n_neurons = [10]
, inhibition_time = [10]
, tau_ref_in = [0.005]
, tau_ref_out = [0.005]
, inc_n = [0.01]
, tau_n = [1]
, synapse_layer_1=[0.005]
, gain_in = [4]
, gain_out = [2]
, seed =[100]
)
param_values = [v for v in parameters.values()]
now = time.strftime("%Y%m%d-%H%M%S")
folder = os.getcwd()+"/MNIST_VDSP_explorartion"+now
os.mkdir(folder)
for args.amp_neuron,args.input_nbr,args.tau_in,args.tau_out,args.alpha_p,args.alpha_n,args.beta_p,args.beta_n,args.tau_pre,args.tau_post,args.iterations,args.presentation_time,args.pause_time, args.dt,args.n_neurons,args.inhibition_time,args.tau_ref_in,args.tau_ref_out,args.inc_n,args.tau_n,args.synapse_layer_1,args.gain_in,args.gain_out,args.seed in product(*param_values):
# args.pause_time = 0
# args.filename = 'vprog-'+str(args.vprog)+'-g_max-'+str(args.g_max)+'-tau_in-'+str(args.tau_in)+'-tau_out-'+str(args.tau_out)+'-lr-'+str(args.lr)+'-presentation_time-'+str(args.presentation_time)
args.filename = 'stdp-'+str(args.amp_neuron)+str(args.input_nbr)+str(args.tau_in)+str(args.tau_out)+str(args.alpha_p)+str(args.alpha_n)+str(args.beta_p)+str(args.beta_n)+str(args.tau_pre)+str(args.tau_post)
timestr = time.strftime("%Y%m%d-%H%M%S")
log_file_name = 'accuracy_log'+'.csv'
pwd = os.getcwd()
accuracy, accuracy_2,weights = evaluate_mnist_multiple_baseline(args)
df = df.append({
"amp_neuron":args.amp_neuron,
"input_nbr":args.input_nbr,
"tau_in":args.tau_in,
"tau_out": args.tau_out,
"alpha_p": args.alpha_p,
"alpha_n": args.alpha_n,
"beta_p":args.beta_p,
"beta_n": args.beta_n,
"tau_pre": args.tau_pre,
"tau_post": args.tau_post,
"iterations":args.iterations,
"presentation_time":args.presentation_time,
"pause_time":args.pause_time,
"dt":args.dt,
"n_neurons":args.n_neurons,
"seed":args.seed,
"inhibition_time":args.inhibition_time,
"tau_ref_in":args.tau_ref_in,
"tau_ref_out":args.tau_ref_out,
"inc_n":args.inc_n,
"tau_n":args.tau_n,
"synapse_layer_1":args.synapse_layer_1,
"gain_in":args.gain_in,
"bias_out":args.bias_out,
"accuracy":accuracy,
"accuracy_2":accuracy_2
},ignore_index=True)
plot = True
if plot :
print('accuracy', accuracy)
print(args.filename)
# weights = weights[-1]#Taking only the last weight for plotting
columns = int(args.n_neurons/5)
rows = int(args.n_neurons/columns)
fig, axes = plt.subplots(int(args.n_neurons/columns), int(columns), figsize=(columns*5,rows*5))
for i in range(0,(args.n_neurons)):
axes[int(i/columns)][int(i%columns)].matshow(np.reshape(weights[i],(28,28)),interpolation='nearest', vmax=1, vmin=0)
axes[int(i/columns)][int(i%columns)].get_xaxis().set_visible(False)
axes[int(i/columns)][int(i%columns)].get_yaxis().set_visible(False)
plt.tight_layout()
plt.axis('off')
# fig, axes = plt.subplots(1,1, figsize=(3,3))
# fig = plt.figure()
# ax1 = fig.add_subplot()
# cax = ax1.matshow(np.reshape(weights[0],(28,28)),interpolation='nearest', vmax=1, vmin=0)
# fig.colorbar(cax)
# plt.tight_layout()
if args.log_file_path is None:
log_dir = pwd+'/log_dir/'
else :
log_dir = args.log_file_path
df.to_csv(log_dir+log_file_name, index=False)
fig.savefig(log_dir+args.filename+'weights.png')
plt.close()
plt.clf()
plt.hist(weights.flatten())
plt.tight_layout()
plt.savefig(log_dir+args.filename+'histogram.png')
# plt.figure(figsize=(12,10))
# plt.subplot(2, 1, 1)
# plt.title('Input neurons')
# rasterplot(time_points, p_input_layer)
# plt.xlabel("Time [s]")
# plt.ylabel("Neuron index")
# plt.subplot(2, 1, 2)
# plt.title('Output neurons')
# rasterplot(time_points, p_layer_1)
# plt.xlabel("Time [s]")
# plt.ylabel("Neuron index")
# plt.tight_layout()
# plt.savefig(folder+'/raster'+str(args.filename)+'.png')
timestr = time.strftime("%Y%m%d-%H%M%S")
log_file_name = 'accuracy_log'+'.csv'
pwd = os.getcwd()
if args.log_file_path is None:
log_dir = pwd+'/log_dir/'
else :
log_dir = args.log_file_path
df.to_csv(log_dir+log_file_name, index=False)
df.to_csv(log_file_name, index=False)
logger.info('All done.')
| 30.46789
| 377
| 0.578139
|
7adeb8263019a02593844850c54be7d02cd04458
| 2,448
|
py
|
Python
|
code/tools/resample.py
|
Sirius291/trafficsign-cyclegan
|
181f794a7c31311ab4cb9b76df5f1ab3bc6ef64d
|
[
"MIT"
] | 3
|
2020-06-08T16:58:07.000Z
|
2020-12-29T17:13:58.000Z
|
code/tools/resample.py
|
Sirius291/trafficsign-cyclegan
|
181f794a7c31311ab4cb9b76df5f1ab3bc6ef64d
|
[
"MIT"
] | null | null | null |
code/tools/resample.py
|
Sirius291/trafficsign-cyclegan
|
181f794a7c31311ab4cb9b76df5f1ab3bc6ef64d
|
[
"MIT"
] | 5
|
2019-06-07T13:05:17.000Z
|
2022-02-28T16:57:02.000Z
|
'''
Application for resampling a set of files such that all classes are balanced.
Usage: resample in_path labels_path out_path [extension]
Dominic Spata,
Real-Time Computer Vision,
Institut fuer Neuroinformatik,
Ruhr University Bochum.
'''
import os
import sys
import shutil
import json
def resample(in_path, labels_path, out_path, extension = ""):
'''
Resample a collection of files such that all classes are balanced.
Arguments:
in_path -- The path to the directory from which to take the input files.
labels_path -- The path tot he JSON file containing the class labels for the files.
out_path -- The path to the directory where to output the resampled collection of images.
extension -- The file type extension of the files to resample.
'''
file_names = [file_name for file_name in os.listdir(in_path) if os.path.isfile(os.path.join(in_path, file_name)) and file_name.endswith(extension)]
with open(labels_path, 'r') as label_file: labels = json.load(label_file)
inverse_labels = dict()
for label in set(labels.values()):
inverse_labels[label] = list()
for file_name in file_names:
inverse_labels[labels[file_name]].append(file_name)
target_count = max([len(inverse_labels[label]) for label in inverse_labels.keys()])
new_labels = dict()
for label in inverse_labels.keys():
for i in range(target_count):
old_file_name = inverse_labels[label][i % len(inverse_labels[label])]
new_file_name = old_file_name[:old_file_name.rfind(extension)] + "_" + str(i // len(inverse_labels[label])) + extension
shutil.copyfile(os.path.join(in_path, old_file_name), os.path.join(out_path, new_file_name))
new_labels[new_file_name] = labels[old_file_name]
with open(os.path.join(out_path, "labels.json"), 'w') as labels_file: json.dump(new_labels, labels_file)
def main(argv):
argv.pop(0)
if len(argv) == 0:
print("Input path:\t", end = '')
argv.append(input())
print("Labels path:\t", end = '')
argv.append(input())
print("Output path:\t", end = '')
argv.append(input())
print("Extension:\t", end = '')
argv.append(input())
if len(argv) in (3, 4):
resample(*argv)
else:
print("Usage: resample in_path labels_path out_path [extension]")
if __name__ == '__main__':
main(sys.argv)
| 33.081081
| 151
| 0.668709
|
9e051d57167dd132ef837720152a709124301272
| 622
|
py
|
Python
|
examples/test_parse_html_2.py
|
jkpubsrc/python-module-jk-xmlparser
|
eaf6141f1cb3ab2d90f2a8444175b779584f5efe
|
[
"Apache-1.1"
] | null | null | null |
examples/test_parse_html_2.py
|
jkpubsrc/python-module-jk-xmlparser
|
eaf6141f1cb3ab2d90f2a8444175b779584f5efe
|
[
"Apache-1.1"
] | null | null | null |
examples/test_parse_html_2.py
|
jkpubsrc/python-module-jk-xmlparser
|
eaf6141f1cb3ab2d90f2a8444175b779584f5efe
|
[
"Apache-1.1"
] | null | null | null |
#!/usr/bin/python3
import jk_xmlparser
from jk_simplexml import *
TEXT_DATA = """<!-- This is a comment -->
<html lang="en">
<head>
<meta charset="utf-8">
<link rel="stylesheet" href="css/styles.css?v=1.0">
</head>
<body>
<hr/>
<script src="js/scripts.js"></script>
<h1>Heading 1</h1>
<p>
Test
< br />
<img src = "bla" width = "abc" height = 123 important />
</p>
</body>
</html>
"""
domParser = jk_xmlparser.HtmlDOMParser(True, True)
xRoot = domParser.parseText(TEXT_DATA, None)
xmlWriteSettings = XMLWriteSettings()
HSerializer.printDump(xRoot, xmlWriteSettings)
| 11.518519
| 59
| 0.628617
|
2d20ea6a2c93f1e7db9c1ce32661fb84e0f40354
| 6,278
|
py
|
Python
|
venv/Lib/site-packages/sqlalchemy/event/api.py
|
adityasagam/azurePy
|
bf6c61c1d6f52521602bae2ab3f06ffba4e30444
|
[
"MIT"
] | 5
|
2021-09-05T16:11:12.000Z
|
2022-03-20T12:28:42.000Z
|
venv/Lib/site-packages/sqlalchemy/event/api.py
|
adityasagam/azurePy
|
bf6c61c1d6f52521602bae2ab3f06ffba4e30444
|
[
"MIT"
] | 22
|
2019-01-18T02:57:36.000Z
|
2019-02-24T22:38:54.000Z
|
venv/Lib/site-packages/sqlalchemy/event/api.py
|
adityasagam/azurePy
|
bf6c61c1d6f52521602bae2ab3f06ffba4e30444
|
[
"MIT"
] | 6
|
2019-02-19T09:03:28.000Z
|
2019-02-21T06:38:35.000Z
|
# event/api.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Public API functions for the event system.
"""
from __future__ import absolute_import
from .base import _registrars
from .registry import _EventKey
from .. import exc
from .. import util
CANCEL = util.symbol("CANCEL")
NO_RETVAL = util.symbol("NO_RETVAL")
def _event_key(target, identifier, fn):
for evt_cls in _registrars[identifier]:
tgt = evt_cls._accept_with(target)
if tgt is not None:
return _EventKey(target, identifier, fn, tgt)
else:
raise exc.InvalidRequestError(
"No such event '%s' for target '%s'" % (identifier, target)
)
def listen(target, identifier, fn, *args, **kw):
"""Register a listener function for the given target.
The :func:`.listen` function is part of the primary interface for the
SQLAlchemy event system, documented at :ref:`event_toplevel`.
e.g.::
from sqlalchemy import event
from sqlalchemy.schema import UniqueConstraint
def unique_constraint_name(const, table):
const.name = "uq_%s_%s" % (
table.name,
list(const.columns)[0].name
)
event.listen(
UniqueConstraint,
"after_parent_attach",
unique_constraint_name)
A given function can also be invoked for only the first invocation
of the event using the ``once`` argument::
def on_config():
do_config()
event.listen(Mapper, "before_configure", on_config, once=True)
.. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen`
and :func:`.event.listens_for`.
.. note::
The :func:`.listen` function cannot be called at the same time
that the target event is being run. This has implications
for thread safety, and also means an event cannot be added
from inside the listener function for itself. The list of
events to be run are present inside of a mutable collection
that can't be changed during iteration.
Event registration and removal is not intended to be a "high
velocity" operation; it is a configurational operation. For
systems that need to quickly associate and deassociate with
events at high scale, use a mutable structure that is handled
from inside of a single listener.
.. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now
used as the container for the list of events, which explicitly
disallows collection mutation while the collection is being
iterated.
.. seealso::
:func:`.listens_for`
:func:`.remove`
"""
_event_key(target, identifier, fn).listen(*args, **kw)
def listens_for(target, identifier, *args, **kw):
"""Decorate a function as a listener for the given target + identifier.
The :func:`.listens_for` decorator is part of the primary interface for the
SQLAlchemy event system, documented at :ref:`event_toplevel`.
e.g.::
from sqlalchemy import event
from sqlalchemy.schema import UniqueConstraint
@event.listens_for(UniqueConstraint, "after_parent_attach")
def unique_constraint_name(const, table):
const.name = "uq_%s_%s" % (
table.name,
list(const.columns)[0].name
)
A given function can also be invoked for only the first invocation
of the event using the ``once`` argument::
@event.listens_for(Mapper, "before_configure", once=True)
def on_config():
do_config()
.. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen`
and :func:`.event.listens_for`.
.. seealso::
:func:`.listen` - general description of event listening
"""
def decorate(fn):
listen(target, identifier, fn, *args, **kw)
return fn
return decorate
def remove(target, identifier, fn):
"""Remove an event listener.
The arguments here should match exactly those which were sent to
:func:`.listen`; all the event registration which proceeded as a result
of this call will be reverted by calling :func:`.remove` with the same
arguments.
e.g.::
# if a function was registered like this...
@event.listens_for(SomeMappedClass, "before_insert", propagate=True)
def my_listener_function(*arg):
pass
# ... it's removed like this
event.remove(SomeMappedClass, "before_insert", my_listener_function)
Above, the listener function associated with ``SomeMappedClass`` was also
propagated to subclasses of ``SomeMappedClass``; the :func:`.remove`
function will revert all of these operations.
.. versionadded:: 0.9.0
.. note::
The :func:`.remove` function cannot be called at the same time
that the target event is being run. This has implications
for thread safety, and also means an event cannot be removed
from inside the listener function for itself. The list of
events to be run are present inside of a mutable collection
that can't be changed during iteration.
Event registration and removal is not intended to be a "high
velocity" operation; it is a configurational operation. For
systems that need to quickly associate and deassociate with
events at high scale, use a mutable structure that is handled
from inside of a single listener.
.. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now
used as the container for the list of events, which explicitly
disallows collection mutation while the collection is being
iterated.
.. seealso::
:func:`.listen`
"""
_event_key(target, identifier, fn).remove()
def contains(target, identifier, fn):
"""Return True if the given target/ident/fn is set up to listen.
.. versionadded:: 0.9.0
"""
return _event_key(target, identifier, fn).contains()
| 31.547739
| 79
| 0.652915
|
1c0a6b7bf41a5e50556cfd22e6fc91ee5ff016a8
| 2,183
|
py
|
Python
|
ptsemseg/models/unet.py
|
czha5168/pytorch-semseg
|
10e7b2d199bb6245fde3d1557632d8ee1ebac663
|
[
"MIT"
] | 28
|
2019-04-30T05:05:33.000Z
|
2021-07-15T08:04:28.000Z
|
ptsemseg/models/unet.py
|
linxi159/Pytorch-Semseg
|
5cd845f1c911ab6ee4cbc6a3ce90f01229326fab
|
[
"MIT"
] | 3
|
2019-10-18T07:50:22.000Z
|
2021-02-17T16:10:41.000Z
|
ptsemseg/models/unet.py
|
linxi159/Pytorch-Semseg
|
5cd845f1c911ab6ee4cbc6a3ce90f01229326fab
|
[
"MIT"
] | 7
|
2019-07-29T02:48:15.000Z
|
2020-11-23T13:22:49.000Z
|
import torch.nn as nn
from ptsemseg.models.utils import *
class unet(nn.Module):
def __init__(
self,
feature_scale=4,
n_classes=21,
is_deconv=True,
in_channels=3,
is_batchnorm=True,
):
super(unet, self).__init__()
self.is_deconv = is_deconv
self.in_channels = in_channels
self.is_batchnorm = is_batchnorm
self.feature_scale = feature_scale
filters = [64, 128, 256, 512, 1024]
filters = [int(x / self.feature_scale) for x in filters]
# downsampling
self.conv1 = unetConv2(self.in_channels, filters[0], self.is_batchnorm)
self.maxpool1 = nn.MaxPool2d(kernel_size=2)
self.conv2 = unetConv2(filters[0], filters[1], self.is_batchnorm)
self.maxpool2 = nn.MaxPool2d(kernel_size=2)
self.conv3 = unetConv2(filters[1], filters[2], self.is_batchnorm)
self.maxpool3 = nn.MaxPool2d(kernel_size=2)
self.conv4 = unetConv2(filters[2], filters[3], self.is_batchnorm)
self.maxpool4 = nn.MaxPool2d(kernel_size=2)
self.center = unetConv2(filters[3], filters[4], self.is_batchnorm)
# upsampling
self.up_concat4 = unetUp(filters[4], filters[3], self.is_deconv)
self.up_concat3 = unetUp(filters[3], filters[2], self.is_deconv)
self.up_concat2 = unetUp(filters[2], filters[1], self.is_deconv)
self.up_concat1 = unetUp(filters[1], filters[0], self.is_deconv)
# final conv (without any concat)
self.final = nn.Conv2d(filters[0], n_classes, 1)
def forward(self, inputs):
conv1 = self.conv1(inputs)
maxpool1 = self.maxpool1(conv1)
conv2 = self.conv2(maxpool1)
maxpool2 = self.maxpool2(conv2)
conv3 = self.conv3(maxpool2)
maxpool3 = self.maxpool3(conv3)
conv4 = self.conv4(maxpool3)
maxpool4 = self.maxpool4(conv4)
center = self.center(maxpool4)
up4 = self.up_concat4(conv4, center)
up3 = self.up_concat3(conv3, up4)
up2 = self.up_concat2(conv2, up3)
up1 = self.up_concat1(conv1, up2)
final = self.final(up1)
return final
| 31.185714
| 79
| 0.628035
|
53f9665e80f8ddaf2797bf11312dd4fe038d3cf5
| 35,406
|
py
|
Python
|
tspymfe/global_stats.py
|
FelSiq/ts-pymfe
|
61cc1f63fa055c7466151cfefa7baff8df1702b7
|
[
"MIT"
] | 9
|
2020-05-28T01:24:07.000Z
|
2021-09-03T11:33:04.000Z
|
tspymfe/global_stats.py
|
FelSiq/ts-pymfe
|
61cc1f63fa055c7466151cfefa7baff8df1702b7
|
[
"MIT"
] | 1
|
2021-02-18T19:27:50.000Z
|
2021-02-18T20:27:29.000Z
|
tspymfe/global_stats.py
|
FelSiq/ts-pymfe
|
61cc1f63fa055c7466151cfefa7baff8df1702b7
|
[
"MIT"
] | 2
|
2020-09-18T06:29:38.000Z
|
2021-02-17T23:07:18.000Z
|
"""Module dedicated to global statistics time-series meta-features."""
import typing as t
import warnings
import numpy as np
import nolds
import scipy.stats
import tspymfe._period as _period
import tspymfe._utils as _utils
import tspymfe._summary as _summary
class MFETSGlobalStats:
"""Extract time-series meta-features from Global Statistics group."""
@classmethod
def precompute_period(cls, ts: np.ndarray, **kwargs) -> t.Dict[str, int]:
"""Precompute the time-series period.
Parameters
----------
ts : :obj:`np.ndarray`
One-dimensional time-series values.
kwargs:
Additional arguments and previous precomputed items. May
speed up this precomputation.
Returns
-------
dict
The following precomputed item is returned:
* ``ts_period`` (:obj:`int`): time-series period.
"""
precomp_vals = {} # type: t.Dict[str, int]
if "ts_period" not in kwargs:
precomp_vals["ts_period"] = _period.get_ts_period(ts=ts)
return precomp_vals
@classmethod
def ft_ioe_tdelta_mean(
cls,
ts: np.ndarray,
step_size: float = 0.05,
normalize: bool = True,
differentiate: bool = False,
ts_scaled: t.Optional[np.ndarray] = None,
) -> np.ndarray:
"""Mean change of interval length with iterative outlier exclusion.
This method calculates, at each iteration, the mean of the differences
of the timestamps of instances using the iterative outlier exclusion
strategy.
In the iterative outlier exclusion, a uniformly spaced set of
thresholds over the time-series range is build and, for each iteration,
it is calculated a statistic of the diference of the timestamp values
of instances larger or equal than the current threshold.
Parameters
----------
ts : :obj:`np.ndarray`
One-dimensional time-series values.
ts_scaled : :obj:`np.ndarray`, optional
Standardized time-series values. Used to take advantage of
precomputations.
step_size : float, optional (default=0.05)
Increase of the outlier threshold in each iteration. Must be a
number strictly positive.
normalize : bool, optional (default=True)
If True, normalize the statistic in the [-1, 1] interval. If
False, return the raw mean timestamp values.
differentiate : bool, optional (default=False)
If True, differentiate the timestamps before calculating each
statistic. If False, all statistics will be calculated on the
raw timestamps.
Returns
-------
:obj:`np.ndarray`
If `differentiate` is False, the mean value of outlier timestamps
of all iterations of the iterative outlier exclusion process. If
`differentiate` is True, the mean value of the timestamps interval
of outliers for every iteration. Also, if `normalize` is True,
every value will be normalized to the [-1, 1] range.
References
----------
.. [1] B.D. Fulcher and N.S. Jones, "hctsa: A Computational Framework
for Automated Time-Series Phenotyping Using Massive Feature
Extraction, Cell Systems 5: 527 (2017).
DOI: 10.1016/j.cels.2017.10.001
.. [2] B.D. Fulcher, M.A. Little, N.S. Jones, "Highly comparative
time-series analysis: the empirical structure of time series and
their methods", J. Roy. Soc. Interface 10(83) 20130048 (2013).
DOI: 10.1098/rsif.2013.0048
"""
tdelta_it_mean = _utils.calc_ioe_stats(
ts=ts,
funcs=np.mean,
ts_scaled=ts_scaled,
step_size=step_size,
differentiate=differentiate,
)
if normalize:
tdelta_it_mean = 2 * tdelta_it_mean / ts.size - 1
return tdelta_it_mean
@classmethod
def ft_trend_strenght(
cls,
ts_residuals: np.ndarray,
ts_deseasonalized: np.ndarray,
ddof: int = 1,
) -> float:
"""Ratio of standard deviations of time-series and after detrend.
Parameters
----------
ts_residuals : :obj:`np.ndarray`
Residuals (random noise) of an one-dimensional time-series.
ts_deseasonalized: :obj:`np.ndarray`
One-dimensional deseasonalized time-series values.
ddof : int, optional (default=1)
Degrees of freedom for standard deviation.
Returns
-------
float
Ratio of standard deviation of the original time-series
and the standard deviation of the detrended version.
References
----------
.. [1] R. J. Hyndman, E. Wang and N. Laptev, "Large-Scale Unusual Time
Series Detection," 2015 IEEE International Conference on Data
Mining Workshop (ICDMW), Atlantic City, NJ, 2015, pp. 1616-1619,
doi: 10.1109/ICDMW.2015.104.
.. [2] Hyndman, R. J., Wang, E., Kang, Y., & Talagala, T. (2018).
tsfeatures: Time series feature extraction. R package version 0.1.
.. [3] Pablo Montero-Manso, George Athanasopoulos, Rob J. Hyndman,
Thiyanga S. Talagala, FFORMA: Feature-based forecast model
averaging, International Journal of Forecasting, Volume 36, Issue
1, 2020, Pages 86-92, ISSN 0169-2070,
https://doi.org/10.1016/j.ijforecast.2019.02.011.
"""
trend = 1.0 - (
np.var(ts_residuals, ddof=ddof)
/ np.var(ts_deseasonalized, ddof=ddof)
)
return min(1.0, max(0.0, trend))
@classmethod
def ft_season_strenght(
cls, ts_residuals: np.ndarray, ts_detrended: np.ndarray, ddof: int = 1
) -> float:
"""Ratio of standard deviations of time-series and after deseasoning.
Parameters
----------
ts_residuals : :obj:`np.ndarray`
Residuals (random noise) of an one-dimensional time-series.
ts_deseasonalized: :obj:`np.ndarray`
One-dimensional deseasonalized time-series values.
ddof : int, optional (default=1)
Degrees of freedom for standard deviation.
Returns
-------
float
Ratio of standard deviation of the original time-series
and the standard deviation of the deseasonalized version.
References
----------
.. [1] R. J. Hyndman, E. Wang and N. Laptev, "Large-Scale Unusual Time
Series Detection," 2015 IEEE International Conference on Data
Mining Workshop (ICDMW), Atlantic City, NJ, 2015, pp. 1616-1619,
doi: 10.1109/ICDMW.2015.104.
.. [2] Hyndman, R. J., Wang, E., Kang, Y., & Talagala, T. (2018).
tsfeatures: Time series feature extraction. R package version 0.1.
.. [3] Pablo Montero-Manso, George Athanasopoulos, Rob J. Hyndman,
Thiyanga S. Talagala, FFORMA: Feature-based forecast model
averaging, International Journal of Forecasting, Volume 36, Issue
1, 2020, Pages 86-92, ISSN 0169-2070,
https://doi.org/10.1016/j.ijforecast.2019.02.011.
"""
seas = 1.0 - (
np.var(ts_residuals, ddof=ddof) / np.var(ts_detrended, ddof=ddof)
)
return min(1.0, max(0.0, seas))
@classmethod
def ft_sd_residuals(cls, ts_residuals: np.ndarray, ddof: int = 1) -> float:
"""Compute the standard deviation of the time-series residuals.
Parameters
----------
ts_residuals : :obj:`np.ndarray`
Residuals (random noise) of an one-dimensional time-series.
ddof : int, optional (default=1)
Degrees of freedom for standard deviation.
Returns
-------
float
Standard deviation of the time-series residuals.
"""
return np.std(ts_residuals, ddof=ddof)
@classmethod
def ft_sd_diff(
cls, ts: np.ndarray, num_diff: int = 1, ddof: int = 1
) -> float:
"""Standard deviation of the nth-order differenced time-series.
Parameters
----------
ts : :obj:`np.ndarray`
One-dimensional time-series values.
num_diff : int, optional (default=1)
Order of the differentiation.
ddof : float, optional (default=1)
Degrees of freedom for standard deviation.
Returns
-------
float
Standard deviation of the nth-order differenced time-series.
"""
return np.std(np.diff(ts, n=num_diff), ddof=ddof)
@classmethod
def ft_sd_sdiff(
cls, ts: np.ndarray, ddof: int = 1, ts_period: t.Optional[int] = None
) -> float:
"""Seasonal standard dev. of the first-order differenced time-series.
Parameters
----------
ts : :obj:`np.ndarray`
One-dimensional time-series values.
ddof : int, optional (default=1)
Degrees of freedom for standard deviation.
ts_period : int, optional
Time-series period. Used to take advantage of precomputations.
Returns
-------
float
Standard deviation of the first-order difference of the lagged
time-series by its own period.
"""
_ts_period = _period.get_ts_period(ts=ts, ts_period=ts_period)
ts_sdiff = ts[_ts_period:] - ts[:-_ts_period]
return np.std(ts_sdiff, ddof=ddof)
@classmethod
def ft_skewness_residuals(
cls, ts_residuals: np.ndarray, method: int = 3, adjusted: bool = False
) -> float:
"""Compute the skewness of the time-series residuals.
Parameters
----------
ts_residuals : :obj:`np.ndarray`
Residuals (random noise) of an one-dimensional time-series.
method : int, optional (default=3)
Defines the strategy used for estimate data skewness. This argument
is used fo compatibility with R package `e1071`. The options must
be one of the following:
+--------+-----------------------------------------------+
|Option | Formula |
+--------+-----------------------------------------------+
|1 | Skew_1 = m_3 / m_2**(3/2) |
| | (default of ``scipy.stats``) |
+--------+-----------------------------------------------+
|2 | Skew_2 = Skew_1 * sqrt(n(n-1)) / (n-2) |
+--------+-----------------------------------------------+
|3 | Skew_3 = m_3 / s**3 = Skew_1 ((n-1)/n)**(3/2) |
+--------+-----------------------------------------------+
Where `n` is the number of instances in ``ts``, `s` is the standard
deviation of each attribute in ``ts``, and `m_i` is the ith
statistical momentum of each attribute in ``ts``.
Note that if the selected method is unable to be calculated due to
division by zero, then the first method will be used instead.
adjusted : bool, optional
If True, then the calculations are corrected for statistical bias.
Returns
-------
float
Detrended time-series skewness.
References
----------
.. [1] Donald Michie, David J. Spiegelhalter, Charles C. Taylor, and
John Campbell. Machine Learning, Neural and Statistical
Classification, volume 37. Ellis Horwood Upper Saddle River, 1994.
"""
ts_skew = _summary.sum_skewness(
values=ts_residuals, method=method, bias=not adjusted
)
return float(ts_skew)
@classmethod
def ft_skewness_diff(
cls,
ts: np.ndarray,
num_diff: int = 1,
method: int = 3,
adjusted: bool = False,
) -> float:
"""Skewness of the nth-order differenced time-series.
This method calculates the skewness of the nth-order differenced
time-series (with lag = 1), with `n` being given by `num_diff`.
Parameters
----------
ts : :obj:`np.ndarray`
One-dimensional time-series values.
num_diff : int, optional (default=1)
Order of the differentiation.
method : int, optional (default=3)
Defines the strategy used for estimate data skewness. This argument
is used fo compatibility with R package `e1071`. The options must
be one of the following:
+--------+-----------------------------------------------+
|Option | Formula |
+--------+-----------------------------------------------+
|1 | Skew_1 = m_3 / m_2**(3/2) |
| | (default of ``scipy.stats``) |
+--------+-----------------------------------------------+
|2 | Skew_2 = Skew_1 * sqrt(n(n-1)) / (n-2) |
+--------+-----------------------------------------------+
|3 | Skew_3 = m_3 / s**3 = Skew_1 ((n-1)/n)**(3/2) |
+--------+-----------------------------------------------+
Where `n` is the number of instances in ``ts``, `s` is the standard
deviation of each attribute in ``ts``, and `m_i` is the ith
statistical momentum of each attribute in ``ts``.
Note that if the selected method is unable to be calculated due to
division by zero, then the first method will be used instead.
adjusted : bool, optional
If True, then the calculations are corrected for statistical bias.
Returns
-------
float
Skewness of the nth-order differenced time-series
References
----------
.. [1] Donald Michie, David J. Spiegelhalter, Charles C. Taylor, and
John Campbell. Machine Learning, Neural and Statistical
Classification, volume 37. Ellis Horwood Upper Saddle River, 1994.
"""
ts_diff = np.diff(ts, n=num_diff)
ts_skew = _summary.sum_skewness(
values=ts_diff, method=method, bias=not adjusted
)
return float(ts_skew)
@classmethod
def ft_skewness_sdiff(
cls,
ts: np.ndarray,
method: int = 3,
adjusted: bool = False,
ts_period: t.Optional[int] = None,
) -> float:
"""Seasonal skewness of the first-order differenced time-series.
This method calculates the skewness of the first-order differenced
time-series, lagged with its period.
If the time-series is not seasonal, then its period is assumed to be 1.
Parameters
----------
ts : :obj:`np.ndarray`
One-dimensional time-series values.
method : int, optional (default=3)
Defines the strategy used for estimate data skewness. This argument
is used fo compatibility with R package `e1071`. The options must
be one of the following:
+--------+-----------------------------------------------+
|Option | Formula |
+--------+-----------------------------------------------+
|1 | Skew_1 = m_3 / m_2**(3/2) |
| | (default of ``scipy.stats``) |
+--------+-----------------------------------------------+
|2 | Skew_2 = Skew_1 * sqrt(n(n-1)) / (n-2) |
+--------+-----------------------------------------------+
|3 | Skew_3 = m_3 / s**3 = Skew_1 ((n-1)/n)**(3/2) |
+--------+-----------------------------------------------+
Where `n` is the number of instances in ``ts``, `s` is the standard
deviation of each attribute in ``ts``, and `m_i` is the ith
statistical momentum of each attribute in ``ts``.
Note that if the selected method is unable to be calculated due to
division by zero, then the first method will be used instead.
adjusted : bool, optional
If True, then the calculations are corrected for statistical bias.
ts_period : int, optional
Time-series period. Used to take advantage of precomputations.
Returns
-------
float
Skewness of the first-order difference of the lagged time-series
by its own period.
"""
_ts_period = _period.get_ts_period(ts=ts, ts_period=ts_period)
ts_sdiff = ts[_ts_period:] - ts[:-_ts_period]
ts_skew = _summary.sum_skewness(
values=ts_sdiff, method=method, bias=not adjusted
)
return float(ts_skew)
@classmethod
def ft_kurtosis_residuals(
cls, ts_residuals: np.ndarray, method: int = 3, adjusted: bool = False
) -> float:
"""Compute the kurtosis of the time-series residuals.
Parameters
----------
ts_residuals : :obj:`np.ndarray`
Residuals (random noise) of an one-dimensional time-series.
method : int, optional (default=3)
Defines the strategy used for estimate data kurtosis. Used for
total compatibility with R package ``e1071``. This option must be
one of the following:
+--------+-----------------------------------------------+
|Method | Formula |
+--------+-----------------------------------------------+
|1 | Kurt_1 = (m_4 / m_2**2 - 3) |
| | (default of `scipy.stats` package) |
+--------+-----------------------------------------------+
|2 | Kurt_2 = (((n+1) * Kurt_1 + 6) * (n-1) / f_2),|
| | f_2 = ((n-2)*(n-3)) |
+--------+-----------------------------------------------+
|3 | Kurt_3 = (m_4 / s**4 - 3) |
| | = ((Kurt_1+3) * (1 - 1/n)**2 - 3) |
+--------+-----------------------------------------------+
Where `n` is the number of instances in ``ts``, `s` is the standard
deviation of each attribute in ``ts``, and `m_i` is the ith
statistical momentum of each attribute in ``ts``.
Note that if the selected method is unable to be calculated due
to division by zero, then the first method is used instead.
adjusted : bool, optional
If True, then the calculations are corrected for statistical bias.
Returns
-------
float
Detrended time-series kurtosis.
References
----------
.. [1] Donald Michie, David J. Spiegelhalter, Charles C. Taylor, and
John Campbell. Machine Learning, Neural and Statistical
Classification, volume 37. Ellis Horwood Upper Saddle River, 1994.
"""
ts_kurt = _summary.sum_kurtosis(
values=ts_residuals, method=method, bias=not adjusted
)
return float(ts_kurt)
@classmethod
def ft_kurtosis_diff(
cls,
ts: np.ndarray,
num_diff: int = 1,
method: int = 3,
adjusted: bool = False,
) -> float:
"""Kurtosis of the nth-order differenced time-series.
This method calculates the kurtosis of the nth-order differenced
time-series (with lag = 1), with `n` being given by `num_diff`.
Parameters
----------
ts : :obj:`np.ndarray`
One-dimensional time-series values.
num_diff : int, optional (default=1)
Order of the differentiation.
method : int, optional (default=3)
Defines the strategy used for estimate data kurtosis. Used for
total compatibility with R package ``e1071``. This option must be
one of the following:
+--------+-----------------------------------------------+
|Method | Formula |
+--------+-----------------------------------------------+
|1 | Kurt_1 = (m_4 / m_2**2 - 3) |
| | (default of `scipy.stats` package) |
+--------+-----------------------------------------------+
|2 | Kurt_2 = (((n+1) * Kurt_1 + 6) * (n-1) / f_2),|
| | f_2 = ((n-2)*(n-3)) |
+--------+-----------------------------------------------+
|3 | Kurt_3 = (m_4 / s**4 - 3) |
| | = ((Kurt_1+3) * (1 - 1/n)**2 - 3) |
+--------+-----------------------------------------------+
Where `n` is the number of instances in ``ts``, `s` is the standard
deviation of each attribute in ``ts``, and `m_i` is the ith
statistical momentum of each attribute in ``ts``.
Note that if the selected method is unable to be calculated due
to division by zero, then the first method is used instead.
adjusted : bool, optional
If True, then the calculations are corrected for statistical bias.
Returns
-------
float
Kurtosis of the nth-order differenced time-series
"""
ts_diff = np.diff(ts, n=num_diff)
ts_kurt = _summary.sum_kurtosis(
values=ts_diff, method=method, bias=not adjusted
)
return float(ts_kurt)
@classmethod
def ft_kurtosis_sdiff(
cls,
ts: np.ndarray,
method: int = 3,
adjusted: bool = False,
ts_period: t.Optional[int] = None,
) -> float:
"""Seasonal kurtosis of the first-order differenced time-series.
This method calculates the kurtosis of the first-order differenced
time-series, lagged with its period.
If the time-series is not seasonal, then its period is assumed to be 1.
Parameters
----------
ts : :obj:`np.ndarray`
One-dimensional time-series values.
method : int, optional (default=3)
Defines the strategy used for estimate data kurtosis. Used for
total compatibility with R package ``e1071``. This option must be
one of the following:
+--------+-----------------------------------------------+
|Method | Formula |
+--------+-----------------------------------------------+
|1 | Kurt_1 = (m_4 / m_2**2 - 3) |
| | (default of `scipy.stats` package) |
+--------+-----------------------------------------------+
|2 | Kurt_2 = (((n+1) * Kurt_1 + 6) * (n-1) / f_2),|
| | f_2 = ((n-2)*(n-3)) |
+--------+-----------------------------------------------+
|3 | Kurt_3 = (m_4 / s**4 - 3) |
| | = ((Kurt_1+3) * (1 - 1/n)**2 - 3) |
+--------+-----------------------------------------------+
Where `n` is the number of instances in ``ts``, `s` is the standard
deviation of each attribute in ``ts``, and `m_i` is the ith
statistical momentum of each attribute in ``ts``.
Note that if the selected method is unable to be calculated due
to division by zero, then the first method is used instead.
adjusted : bool, optional
If True, then the calculations are corrected for statistical bias.
ts_period : int, optional
Time-series period. Used to take advantage of precomputations.
Returns
-------
float
Kurtosis of the first-order difference of the lagged time-series
by its own period.
"""
_ts_period = _period.get_ts_period(ts=ts, ts_period=ts_period)
ts_sdiff = ts[_ts_period:] - ts[:-_ts_period]
ts_kurt = _summary.sum_kurtosis(
values=ts_sdiff, method=method, bias=not adjusted
)
return float(ts_kurt)
@classmethod
def ft_exp_max_lyap(
cls, ts: np.ndarray, embed_dim: int = 10, lag: t.Optional[int] = None
) -> float:
"""Estimation of the maximum Lyapunov coefficient.
Parameters
----------
ts : :obj:`np.ndarray`
One-dimensional time-series values.
embed_dim : int, optional (default=10)
Time-series embed dimension.
lag : int, optional
Lag of the embed.
Returns
-------
float
Estimation of the maximum Lyapunov coefficient.
References
----------
.. [1] H. E. Hurst, The problem of long-term storage in reservoirs,
International Association of Scientific Hydrology. Bulletin, vol.
1, no. 3, pp. 13–27, 1956.
.. [2] H. E. Hurst, A suggested statistical model of some time series
which occur in nature, Nature, vol. 180, p. 494, 1957.
.. [3] R. Weron, Estimating long-range dependence: finite sample
properties and confidence intervals, Physica A: Statistical
Mechanics and its Applications, vol. 312, no. 1, pp. 285–299,
2002.
.. [4] "nolds" Python package: https://pypi.org/project/nolds/
.. [5] Lemke, Christiane & Gabrys, Bogdan. (2010). Meta-learning for
time series forecasting and forecast combination. Neurocomputing.
73. 2006-2016. 10.1016/j.neucom.2009.09.020.
"""
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", module="nolds", category=RuntimeWarning
)
max_lyap_exp = nolds.lyap_r(data=ts, lag=lag, emb_dim=embed_dim)
return max_lyap_exp
@classmethod
def ft_exp_hurst(cls, ts: np.ndarray) -> float:
"""Estimation of the Hurst exponent.
Check `nolds.hurst_rs` documentation for a clear explanation about
the underlying function.
Parameters
----------
ts : :obj:`np.ndarray`
One-dimensional time-series values.
Returns
-------
float
Estimation of the hurst exponent.
References
----------
.. [1] H. E. Hurst, The problem of long-term storage in reservoirs,
International Association of Scientific Hydrology. Bulletin, vol.
1, no. 3, pp. 13–27, 1956.
.. [2] H. E. Hurst, A suggested statistical model of some time series
which occur in nature, Nature, vol. 180, p. 494, 1957.
.. [3] R. Weron, Estimating long-range dependence: finite sample
properties and confidence intervals, Physica A: Statistical
Mechanics and its Applications, vol. 312, no. 1, pp. 285–299,
2002.
.. [4] "nolds" Python package: https://pypi.org/project/nolds/
"""
return nolds.hurst_rs(data=ts)
@classmethod
def ft_dfa(
cls, ts: np.ndarray, pol_order: int = 1, overlap_windows: bool = True
) -> float:
"""Calculate the Hurst parameter from Detrended fluctuation analysis.
Note that the ``Hurst parameter`` is not the same quantity as the
``Hurst exponent``. The Hurst parameter `H` is defined as the quantity
such that the following holds: std(ts, l * n) = l ** H * std(ts, n),
where `ts` is the time-series, `l` is a constant factor, `n` is some
window length of `ts`, and std(ts, k) is the standard deviation of
`ts` within a window of size `k`.
Check `nolds.dfa` documentation for a clear explanation about the
underlying function.
Parameters
----------
ts : :obj:`np.ndarray`
One-dimensional time-series values.
pol_order : int, optional (default=1)
Order of the detrending polynomial within each window of the
analysis.
overlap_windows : bool, optional (default=True)
If True, overlap the windows used while performing the analysis.
Returns
-------
float
Hurst parameter.
References
----------
.. [1] C.-K. Peng, S. V. Buldyrev, S. Havlin, M. Simons,
H. E. Stanley, and A. L. Goldberger, Mosaic organization of
DNA nucleotides, Physical Review E, vol. 49, no. 2, 1994.
.. [2] R. Hardstone, S.-S. Poil, G. Schiavone, R. Jansen,
V. V. Nikulin, H. D. Mansvelder, and K. Linkenkaer-Hansen,
Detrended fluctuation analysis: A scale-free view on neuronal
oscillations, Frontiers in Physiology, vol. 30, 2012.
.. [3] "nolds" Python package: https://pypi.org/project/nolds/
"""
hurst_coeff = nolds.dfa(ts, order=pol_order, overlap=overlap_windows)
return hurst_coeff
@classmethod
def ft_corr_dim(cls, ts: np.ndarray, emb_dim: int = 1) -> float:
"""Correlation dimension of the time-series.
It is used the Grassberger-Procaccia algorithm for the correlation
dimension estimation.
Parameters
----------
ts : :obj:`np.ndarray`
One-dimensional time-series values.
emb_dim : int, optional (default=1)
Embedding dimension to estimate the correlation dimension.
Returns
-------
float
Estimated correlation dimension.
References
----------
.. [1] P. Grassberger and I. Procaccia, Characterization of strange
attractors, Physical review letters, vol. 50, no. 5, p. 346,
1983.
.. [2] P. Grassberger and I. Procaccia, Measuring the strangeness of
strange attractors, Physica D: Nonlinear Phenomena, vol. 9,
no. 1, pp. 189–208, 1983.
.. [3] P. Grassberger, Grassberger-Procaccia algorithm,
Scholarpedia, vol. 2, no. 5, p. 3043.
.. [4] "nolds" Python package. URL: https://pypi.org/project/nolds/
"""
try:
corr_dim = nolds.corr_dim(ts, emb_dim=emb_dim)
except AssertionError:
corr_dim = np.nan
return corr_dim
@classmethod
def ft_opt_boxcox_coef(
cls, ts: np.ndarray, adjust_data: bool = True
) -> float:
"""Estimated optimal box-cox transformation coefficient.
Parameters
----------
ts : :obj:`np.ndarray`
One-dimensional time-series values.
adjust_data : bool, optional (default=True)
If True, transform the data to y(t) = ts(t) - min(ts) + 1. This is
required for non-positive data. If False, estimate the coefficient
with the original data, possibly failing if the time-series have
non-positive data.
Returns
-------
float
Estimated optimal box-cox transformation coefficient.
References
----------
.. [1] Box, G. E. P. and Cox, D. R. (1964). An analysis of
transformations, Journal of the Royal Statistical Society, Series
B, 26, 211-252.
.. [2] Yanfei Kang, Rob J. Hyndman, Kate Smith-Miles, Visualising
forecasting algorithm performance using time series instance
spaces, International Journal of Forecasting, Volume 33, Issue 2,
2017, Pages 345-358, ISSN 0169-2070,
https://doi.org/10.1016/j.ijforecast.2016.09.004.
"""
if adjust_data:
ts = ts - ts.min() + 1
return scipy.stats.boxcox_normmax(ts, method="mle")
@classmethod
def ft_t_mean(cls, ts: np.ndarray, pcut: float = 0.02) -> float:
"""Trimmed mean of the time-series values.
Parameters
----------
ts : :obj:`np.ndarray`
One-dimensional time-series values.
pcut : float, optional (default=0.02)
Proportion of outlier cut. Must be in [0, 0.5) range.
Returns
-------
float
Trimmed mean of time-series.
References
----------
.. [1] B.D. Fulcher and N.S. Jones, "hctsa: A Computational Framework
for Automated Time-Series Phenotyping Using Massive Feature
Extraction, Cell Systems 5: 527 (2017).
DOI: 10.1016/j.cels.2017.10.001
.. [2] B.D. Fulcher, M.A. Little, N.S. Jones, "Highly comparative
time-series analysis: the empirical structure of time series and
their methods", J. Roy. Soc. Interface 10(83) 20130048 (2013).
DOI: 10.1098/rsif.2013.0048
"""
return scipy.stats.trim_mean(ts, proportiontocut=pcut)
@classmethod
def ft_spikiness(
cls, ts_residuals: np.ndarray, ddof: int = 1
) -> np.ndarray:
"""Spikiness of the time-series residuals.
The spikiness of the time-series residuals is the variance of the
variance with jackknife resampling (leave-one-out) on the residuals.
Here, in order to enable other times of summarization, we return all
the `jackknifed` variances.
Parameters
----------
ts : :obj:`np.ndarray`
One-dimensional time-series values.
ddof : int, optional (default=1)
Degrees of freedom to calculate the variances.
Returns
-------
:obj:`np.ndarray`
Spikiness of the time-series residuals.
References
----------
.. [1] R. J. Hyndman, E. Wang and N. Laptev, "Large-Scale Unusual Time
Series Detection," 2015 IEEE International Conference on Data
Mining Workshop (ICDMW), Atlantic City, NJ, 2015, pp. 1616-1619,
doi: 10.1109/ICDMW.2015.104.
.. [2] Hyndman, R. J., Wang, E., Kang, Y., & Talagala, T. (2018).
tsfeatures: Time series feature extraction. R package version 0.1.
.. [3] Pablo Montero-Manso, George Athanasopoulos, Rob J. Hyndman,
Thiyanga S. Talagala, FFORMA: Feature-based forecast model
averaging, International Journal of Forecasting, Volume 36, Issue
1, 2020, Pages 86-92, ISSN 0169-2070,
https://doi.org/10.1016/j.ijforecast.2019.02.011.
"""
vars_ = np.array(
[
np.var(np.delete(ts_residuals, i), ddof=ddof)
for i in np.arange(ts_residuals.size)
]
)
# Note: on the original reference paper, the spikiness is calculated
# as the variance of the 'vars_'. However, to enable summarization,
# here we return the full array.
return vars_
| 38.235421
| 79
| 0.529995
|
5878b3deb1f47f8cf5a7d7f8dd37338142b4616c
| 1,897
|
py
|
Python
|
bindings/python/examples/types.py
|
mmccarty/legion
|
30e00fa6016527c4cf60025a461fb7865f8def6b
|
[
"Apache-2.0"
] | 555
|
2015-01-19T07:50:27.000Z
|
2022-03-22T11:35:48.000Z
|
bindings/python/examples/types.py
|
mmccarty/legion
|
30e00fa6016527c4cf60025a461fb7865f8def6b
|
[
"Apache-2.0"
] | 1,157
|
2015-01-07T18:34:23.000Z
|
2022-03-31T19:45:27.000Z
|
bindings/python/examples/types.py
|
mmccarty/legion
|
30e00fa6016527c4cf60025a461fb7865f8def6b
|
[
"Apache-2.0"
] | 145
|
2015-02-03T02:31:42.000Z
|
2022-02-28T12:03:51.000Z
|
#!/usr/bin/env python3
# Copyright 2021 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import pygion
from pygion import task, WD
@task(return_type=pygion.complex64)
def complex_plus_one(x):
return x + 1
@task(privileges=[WD])
def do_local_fills(R):
R.b.fill(False)
R.c64.fill(1+2j)
R.c128.fill(3+4j)
R.f32.fill(3.45)
R.f64.fill(6.78)
R.i8.fill(-1)
R.i16.fill(-123)
R.i32.fill(-123456)
R.i64.fill(-123456789)
R.u8.fill(1)
R.u16.fill(123)
R.u32.fill(123456)
R.u64.fill(123456789)
print('value of R.c64[0] after local fill %s' % R.c64[0])
@task
def main():
R = pygion.Region(
[10],
{
'b': pygion.bool_,
'c64': pygion.complex64,
'c128': pygion.complex128,
'f32': pygion.float32,
'f64': pygion.float64,
'i8': pygion.int8,
'i16': pygion.int16,
'i32': pygion.int32,
'i64': pygion.int64,
'u8': pygion.uint8,
'u16': pygion.uint16,
'u32': pygion.uint32,
'u64': pygion.uint64,
})
do_local_fills(R)
pygion.fill(R, 'c64', 5+6j)
print('value of R.c64[0] after remote fill %s' % R.c64[1])
x = complex_plus_one(3+4j)
print(x.get())
if __name__ == '__main__':
main()
| 24.960526
| 74
| 0.60991
|
1e9a9a2e679327d9bce896d8099417518451fecc
| 3,858
|
py
|
Python
|
The_Loan_Qualifier_Application/01_Test_the_Calculator/Completed/app.py
|
jpweldon/Module_2_Practice
|
cb546bbfcf5ffb7c6388f854e0eb8873834cfab9
|
[
"MIT"
] | null | null | null |
The_Loan_Qualifier_Application/01_Test_the_Calculator/Completed/app.py
|
jpweldon/Module_2_Practice
|
cb546bbfcf5ffb7c6388f854e0eb8873834cfab9
|
[
"MIT"
] | null | null | null |
The_Loan_Qualifier_Application/01_Test_the_Calculator/Completed/app.py
|
jpweldon/Module_2_Practice
|
cb546bbfcf5ffb7c6388f854e0eb8873834cfab9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Loan Qualifier Application.
This is a command line application to match applicants with qualifying loans.
Example:
$ python app.py
"""
import sys
from pathlib import Path
import fire
import questionary
from qualifier.utils.fileio import load_csv, save_csv
from qualifier.utils.calculators import (
calculate_monthly_debt_ratio,
calculate_loan_to_value_ratio
)
from qualifier.filters.max_loan_size import filter_max_loan_size
from qualifier.filters.credit_score import filter_credit_score
from qualifier.filters.debt_to_income import filter_debt_to_income
from qualifier.filters.loan_to_value import filter_loan_to_value
def load_bank_data():
"""Ask for the file path to the latest banking data and load the CSV file.
Returns:
The bank data from the data rate sheet CSV file.
"""
csvpath = questionary.text("Enter a file path to a rate-sheet (.csv):").ask()
csvpath = Path(csvpath)
if not csvpath.exists():
sys.exit(f"Oops! Can't find this path: {csvpath}")
return load_csv(csvpath)
def get_applicant_info():
"""Prompt dialog to get the applicant's financial information.
Returns:
Returns the applicant's financial information.
"""
credit_score = questionary.text("What's your credit score?").ask()
debt = questionary.text("What's your current amount of monthly debt?").ask()
income = questionary.text("What's your total monthly income?").ask()
loan_amount = questionary.text("What's your desired loan amount?").ask()
home_value = questionary.text("What's your home value?").ask()
credit_score = int(credit_score)
debt = float(debt)
income = float(income)
loan_amount = float(loan_amount)
home_value = float(home_value)
return credit_score, debt, income, loan_amount, home_value
def find_qualifying_loans(bank_data, credit_score, debt, income, loan, home_value):
"""Determine which loans the user qualifies for.
Loan qualification criteria is based on:
- Credit Score
- Loan Size
- Debit to Income ratio (calculated)
- Loan to Value ratio (calculated)
Args:
bank_data (list): A list of bank data.
credit_score (int): The applicant's current credit score.
debt (float): The applicant's total monthly debt payments.
income (float): The applicant's total monthly income.
loan (float): The total loan amount applied for.
home_value (float): The estimated home value.
Returns:
A list of the banks willing to underwrite the loan.
"""
# Calculate the monthly debt ratio
monthly_debt_ratio = calculate_monthly_debt_ratio(debt, income)
print(f"The monthly debt to income ratio is {monthly_debt_ratio:.02f}")
# Calculate loan to value ratio
loan_to_value_ratio = calculate_loan_to_value_ratio(loan, home_value)
print(f"The loan to value ratio is {loan_to_value_ratio:.02f}.")
# Run qualification filters
bank_data_filtered = filter_max_loan_size(loan, bank_data)
bank_data_filtered = filter_credit_score(credit_score, bank_data_filtered)
bank_data_filtered = filter_debt_to_income(monthly_debt_ratio, bank_data_filtered)
bank_data_filtered = filter_loan_to_value(loan_to_value_ratio, bank_data_filtered)
print(f"Found {len(bank_data_filtered)} qualifying loans")
return bank_data_filtered
def run():
"""The main function for running the script."""
# Load the latest Bank data
bank_data = load_bank_data()
# Get the applicant's information
credit_score, debt, income, loan_amount, home_value = get_applicant_info()
# Find qualifying loans
qualifying_loans = find_qualifying_loans(
bank_data, credit_score, debt, income, loan_amount, home_value
)
if __name__ == "__main__":
fire.Fire(run)
| 32.420168
| 86
| 0.724469
|
7d7effeb46a34fb666e66222e60d5fe071ccf8f2
| 5,912
|
py
|
Python
|
wiki/web/user.py
|
jzengerling/WikiWiki5
|
72f23187afe03dbb1fde3f2d98c083e7adc7bd06
|
[
"BSD-3-Clause"
] | null | null | null |
wiki/web/user.py
|
jzengerling/WikiWiki5
|
72f23187afe03dbb1fde3f2d98c083e7adc7bd06
|
[
"BSD-3-Clause"
] | 1
|
2018-04-17T22:54:33.000Z
|
2018-04-17T22:54:33.000Z
|
wiki/web/user.py
|
jzengerling/WikiWiki5
|
72f23187afe03dbb1fde3f2d98c083e7adc7bd06
|
[
"BSD-3-Clause"
] | 1
|
2018-04-05T22:07:48.000Z
|
2018-04-05T22:07:48.000Z
|
"""
User classes & helpers
~~~~~~~~~~~~~~~~~~~~~~
"""
import sqlite3
import os
import binascii
import hashlib
from functools import wraps
from flask import current_app
from flask_login import current_user
class UserManager(object):
def __init__(self, defaultAuthenticationMethod = "hash", path="."):
self.defaultAuthenticationMethod = defaultAuthenticationMethod
self.path = path
def database(f):
def _exec(self, *args, **argd):
connection = sqlite3.connect(self.path + '/users.db')
connection.execute("PRAGMA foreign_keys = ON")
cursor = connection.cursor()
returnVal = None
try:
cursor.execute('''create table if not exists Users
(name TEXT PRIMARY KEY, password TEXT,
authenticated INTEGER, active INTEGER,
authentication_method TEXT)''')
cursor.execute('''create table if not exists Roles
(name TEXT, role TEXT, PRIMARY KEY (name, role),
FOREIGN KEY(name) REFERENCES Users(name) ON DELETE CASCADE)''')
returnVal = f(self, cursor, *args, **argd)
except Exception, e:
connection.rollback()
raise
else:
connection.commit() # or maybe not
finally:
connection.close()
return returnVal
return _exec
@database
def add_user(self, cursor, name, password, active=True, roles=[], authentication_method=None):
if self.get_user(name) != None:
return False
dbpassword = ""
if authentication_method is None:
authentication_method = self.defaultAuthenticationMethod
if authentication_method == 'hash':
dbpassword = make_salted_hash(password)
elif authentication_method == 'cleartext':
dbpassword = password
else:
raise NotImplementedError(authentication_method)
cursor.execute('INSERT INTO Users VALUES (?,?,?,?,?)', (name, dbpassword, False, active, authentication_method))
for role in roles:
cursor.execute('INSERT INTO Roles VALUES (?,?)', (name, role))
@database
def get_user(self, cursor, name):
cursor.execute('SELECT * FROM Users WHERE name=?', (name,))
user = cursor.fetchone()
if user is None:
return None
else:
cursor.execute('SELECT * FROM Roles WHERE name=?', (name,))
roleRows = cursor.fetchall()
roles=[]
for role in roleRows:
roles.append(role[1])
data = {};
data["password"] = user[1]
data["authenticated"] = user[2]
data["active"] = user[3]
data["authentication_method"] = user[4]
data["roles"] = roles
return User(self, user[0], data)
@database
def delete_user(self, cursor, name):
cursor.execute('DELETE FROM Users WHERE name=?', (name,))
if cursor.rowcount == 0:
return False
return True
@database
def update(self, cursor, name, userdata):
pw = userdata["password"]
auth = userdata["authenticated"]
active = userdata["active"]
authmethod = userdata["authentication_method"]
roles = userdata["roles"]
cursor.execute('''
UPDATE Users
SET password = ?, authenticated = ?,
active = ?, authentication_method = ?
WHERE name = ?
''', (pw, auth, active, authmethod, name))
cursor.execute('DELETE FROM Roles WHERE name=?', (name,))
for role in roles:
cursor.execute('INSERT INTO Roles VALUES (?,?)', (name, role))
class User(object):
def __init__(self, manager, name, data):
self.manager = manager
self.name = name
self.data = data
def get(self, option):
return self.data.get(option)
def set(self, option, value):
self.data[option] = value
self.save()
def save(self):
self.manager.update(self.name, self.data)
def is_authenticated(self):
return self.data.get('authenticated')
def is_active(self):
return self.data.get('active')
def is_anonymous(self):
return False
def get_id(self):
return self.name
def check_password(self, password):
"""Return True, return False, or raise NotImplementedError if the
authentication_method is missing or unknown."""
authentication_method = self.data.get('authentication_method', None)
if authentication_method is None:
authentication_method = self.manager.authentication_method
# See comment in UserManager.add_user about authentication_method.
if authentication_method == 'hash':
result = check_hashed_password(password, self.get('hash'))
elif authentication_method == 'cleartext':
result = (self.get('password') == password)
else:
raise NotImplementedError(authentication_method)
return result
def make_salted_hash(password, salt=None):
if not salt:
salt = os.urandom(64)
d = hashlib.sha512()
d.update(salt[:32])
d.update(password)
d.update(salt[32:])
return binascii.hexlify(salt) + d.hexdigest()
def check_hashed_password(password, salted_hash):
salt = binascii.unhexlify(salted_hash[:128])
return make_salted_hash(password, salt) == salted_hash
def protect(f):
@wraps(f)
def wrapper(*args, **kwargs):
if current_app.config.get('PRIVATE') and not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
return f(*args, **kwargs)
return wrapper
| 30.791667
| 120
| 0.58931
|
a873f49b61c91fd515e237dbd92683bb01b859e2
| 103
|
py
|
Python
|
time_utils.py
|
nicktien007/Nick.Udic.Kcm
|
ba5fb48fc1b2cfe1eb40085f0165d5d7c172c627
|
[
"MIT"
] | null | null | null |
time_utils.py
|
nicktien007/Nick.Udic.Kcm
|
ba5fb48fc1b2cfe1eb40085f0165d5d7c172c627
|
[
"MIT"
] | null | null | null |
time_utils.py
|
nicktien007/Nick.Udic.Kcm
|
ba5fb48fc1b2cfe1eb40085f0165d5d7c172c627
|
[
"MIT"
] | null | null | null |
from datetime import datetime
def show_current_time():
return datetime.now().strftime("%H:%M:%S")
| 20.6
| 46
| 0.718447
|
a80e27b4fe643e0f6d40076ddf5c6297504e056c
| 925
|
py
|
Python
|
addons/oejia_wx/models/wx_config.py
|
marionumza/vocal_v12
|
480990e919c9410903e06e7813ee92800bd6a569
|
[
"Unlicense"
] | null | null | null |
addons/oejia_wx/models/wx_config.py
|
marionumza/vocal_v12
|
480990e919c9410903e06e7813ee92800bd6a569
|
[
"Unlicense"
] | null | null | null |
addons/oejia_wx/models/wx_config.py
|
marionumza/vocal_v12
|
480990e919c9410903e06e7813ee92800bd6a569
|
[
"Unlicense"
] | 1
|
2021-05-05T07:59:08.000Z
|
2021-05-05T07:59:08.000Z
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from .menu_about_models import ACTION_OPTION
class WxConfig(models.Model):
_name = 'wx.config'
_description = u'公众号配置'
action = fields.Reference(string='关注后的自动回复', selection=ACTION_OPTION)
@api.multi
def write(self, vals):
result = super(WxConfig, self).write(vals)
from ..controllers import client
entry = client.wxenv(self.env)
if self.action:
entry.subscribe_auto_msg = self.action.get_wx_reply()
return result
@api.one
def _compute_handler_url(self):
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
self.handler_url = '%s/app_handler'%base_url
@api.model
def get_cur(self):
return self.env.ref('oejia_wx.wx_config_data_1')
@api.multi
def name_get(self):
return [(e.id, u'公众号配置') for e in self]
| 25.694444
| 83
| 0.648649
|
3616917c3113516a728b3ef1f7e7a2593cc1c35e
| 1,299
|
py
|
Python
|
catalog/admin.py
|
omegatrix/django-local-library-tutorial
|
2f6d05d88cba19e9bf1c4d66742e63edb609c461
|
[
"MIT"
] | null | null | null |
catalog/admin.py
|
omegatrix/django-local-library-tutorial
|
2f6d05d88cba19e9bf1c4d66742e63edb609c461
|
[
"MIT"
] | null | null | null |
catalog/admin.py
|
omegatrix/django-local-library-tutorial
|
2f6d05d88cba19e9bf1c4d66742e63edb609c461
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Author, Book, BookInstance, Genre, Language
@admin.register(Author)
class AuthorAdmin(admin.ModelAdmin):
list_display = (
"last_name",
"first_name",
"date_of_birth",
"date_of_death",
)
fields = [
"first_name",
"last_name",
(
"date_of_birth",
"date_of_death",
),
]
# Register the admin class with the associated model
# admin.site.register(Author, AuthorAdmin)
class BooksInstanceInline(admin.TabularInline):
model = BookInstance
extra = 0
@admin.register(Book)
class BookAdmin(admin.ModelAdmin):
list_display = (
"title",
"author",
"display_genre",
)
inlines = [
BooksInstanceInline,
]
@admin.register(BookInstance)
class BookInstanceAdmin(admin.ModelAdmin):
list_display = ("book", "status", "borrower", "due_back", "id")
list_filter = ("status", "due_back")
fieldsets = (
(None, {"fields": ("book", "imprint", "id")}),
("Availability", {"fields": ("status", "due_back", "borrower")}),
)
# admin.site.register(Author)
# admin.site.register(Book)
# admin.site.register(BookInstance)
admin.site.register(Genre)
admin.site.register(Language)
| 21.295082
| 73
| 0.620477
|
f2a79941032a5b995adfb8c69d35e2a241d1cb9a
| 17,103
|
py
|
Python
|
grr/client/vfs_handlers/registry.py
|
theGreenJedi/grr
|
d9e11e304dc299d49c76b7fdf6fdbfcd4b8eec39
|
[
"Apache-2.0"
] | null | null | null |
grr/client/vfs_handlers/registry.py
|
theGreenJedi/grr
|
d9e11e304dc299d49c76b7fdf6fdbfcd4b8eec39
|
[
"Apache-2.0"
] | null | null | null |
grr/client/vfs_handlers/registry.py
|
theGreenJedi/grr
|
d9e11e304dc299d49c76b7fdf6fdbfcd4b8eec39
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Implement access to the windows registry."""
import ctypes
import ctypes.wintypes
import exceptions
import os
import stat
import StringIO
import _winreg
from grr.client import vfs
from grr.lib import utils
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import protodict as rdf_protodict
# Difference between 1 Jan 1601 and 1 Jan 1970.
WIN_UNIX_DIFF_MSECS = 11644473600
# KEY_READ = STANDARD_RIGHTS_READ | KEY_QUERY_VALUE |
# KEY_ENUMERATE_SUB_KEYS | KEY_NOTIFY
# Also see: http://msdn.microsoft.com/en-us/library/windows/desktop/
# ms724878(v=vs.85).aspx
KEY_READ = 0x20019
def CanonicalPathToLocalPath(path):
path = path.replace("/", "\\")
return path.strip("\\")
# _winreg is broken on Python 2.x and doesn't support unicode registry values.
# We provide some replacement functions here.
advapi32 = ctypes.windll.advapi32
LPDWORD = ctypes.POINTER(ctypes.wintypes.DWORD)
LPBYTE = ctypes.POINTER(ctypes.wintypes.BYTE)
ERROR_SUCCESS = 0
ERROR_MORE_DATA = 234
class FileTime(ctypes.Structure):
_fields_ = [("dwLowDateTime", ctypes.wintypes.DWORD),
("dwHighDateTime", ctypes.wintypes.DWORD)]
RegCloseKey = advapi32.RegCloseKey # pylint: disable=g-bad-name
RegCloseKey.restype = ctypes.c_long
RegCloseKey.argtypes = [ctypes.c_void_p]
RegEnumKeyEx = advapi32.RegEnumKeyExW # pylint: disable=g-bad-name
RegEnumKeyEx.restype = ctypes.c_long
RegEnumKeyEx.argtypes = [ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.c_wchar_p, LPDWORD, LPDWORD, ctypes.c_wchar_p,
LPDWORD, ctypes.POINTER(FileTime)]
RegEnumValue = advapi32.RegEnumValueW # pylint: disable=g-bad-name
RegEnumValue.restype = ctypes.c_long
RegEnumValue.argtypes = [ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.c_wchar_p, LPDWORD, LPDWORD, LPDWORD, LPBYTE,
LPDWORD]
RegOpenKeyEx = advapi32.RegOpenKeyExW # pylint: disable=g-bad-name
RegOpenKeyEx.restype = ctypes.c_long
RegOpenKeyEx.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p, ctypes.c_ulong,
ctypes.c_ulong, ctypes.POINTER(ctypes.c_void_p)]
RegQueryInfoKey = advapi32.RegQueryInfoKeyW # pylint: disable=g-bad-name
RegQueryInfoKey.restype = ctypes.c_long
RegQueryInfoKey.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p, LPDWORD, LPDWORD,
LPDWORD, LPDWORD, LPDWORD, LPDWORD, LPDWORD,
LPDWORD, LPDWORD, ctypes.POINTER(FileTime)]
RegQueryValueEx = advapi32.RegQueryValueExW # pylint: disable=g-bad-name
RegQueryValueEx.restype = ctypes.c_long
RegQueryValueEx.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p, LPDWORD, LPDWORD,
LPBYTE, LPDWORD]
class KeyHandle(object):
"""A wrapper class for a registry key handle."""
def __init__(self, value=0):
if value:
self.handle = ctypes.c_void_p(value)
else:
self.handle = ctypes.c_void_p()
def __enter__(self):
return self
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
self.Close()
return False
def Close(self):
if not self.handle:
return
if RegCloseKey is None:
return # Globals become None during exit.
rc = RegCloseKey(self.handle)
self.handle = ctypes.c_void_p()
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
def __del__(self):
self.Close()
def OpenKey(key, sub_key):
"""This calls the Windows OpenKeyEx function in a Unicode safe way."""
new_key = KeyHandle()
# Don't use KEY_WOW64_64KEY (0x100) since it breaks on Windows 2000
rc = RegOpenKeyEx(key.handle, sub_key, 0, KEY_READ, ctypes.cast(
ctypes.byref(new_key.handle), ctypes.POINTER(ctypes.c_void_p)))
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
return new_key
def CloseKey(key):
rc = RegCloseKey(key)
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
def QueryInfoKey(key):
"""This calls the Windows RegQueryInfoKey function in a Unicode safe way."""
null = LPDWORD()
num_sub_keys = ctypes.wintypes.DWORD()
num_values = ctypes.wintypes.DWORD()
ft = FileTime()
rc = RegQueryInfoKey(key.handle, ctypes.c_wchar_p(), null, null,
ctypes.byref(num_sub_keys), null, null,
ctypes.byref(num_values), null, null, null,
ctypes.byref(ft))
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
return (num_sub_keys.value, num_values.value, ft.dwLowDateTime
| (ft.dwHighDateTime << 32))
def QueryValueEx(key, value_name):
"""This calls the Windows QueryValueEx function in a Unicode safe way."""
size = 256
data_type = ctypes.wintypes.DWORD()
while True:
tmp_size = ctypes.wintypes.DWORD(size)
buf = ctypes.create_string_buffer(size)
rc = RegQueryValueEx(key.handle, value_name, LPDWORD(),
ctypes.byref(data_type), ctypes.cast(buf, LPBYTE),
ctypes.byref(tmp_size))
if rc != ERROR_MORE_DATA:
break
# We limit the size here to ~10 MB so the response doesn't get too big.
if size > 10 * 1024 * 1024:
raise exceptions.WindowsError("Value too big to be read by GRR.")
size *= 2
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
return (Reg2Py(buf, tmp_size.value, data_type.value), data_type.value)
def EnumKey(key, index):
"""This calls the Windows RegEnumKeyEx function in a Unicode safe way."""
buf = ctypes.create_unicode_buffer(257)
length = ctypes.wintypes.DWORD(257)
rc = RegEnumKeyEx(key.handle, index, ctypes.cast(buf, ctypes.c_wchar_p),
ctypes.byref(length), LPDWORD(), ctypes.c_wchar_p(),
LPDWORD(), ctypes.POINTER(FileTime)())
if rc != 0:
raise ctypes.WinError(2)
return ctypes.wstring_at(buf, length.value).rstrip(u"\x00")
def EnumValue(key, index):
"""This calls the Windows RegEnumValue function in a Unicode safe way."""
null = ctypes.POINTER(ctypes.wintypes.DWORD)()
value_size = ctypes.wintypes.DWORD()
data_size = ctypes.wintypes.DWORD()
rc = RegQueryInfoKey(key.handle, ctypes.c_wchar_p(), null, null, null, null,
null, null, ctypes.byref(value_size),
ctypes.byref(data_size), null,
ctypes.POINTER(FileTime)())
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
value_size.value += 1
data_size.value += 1
value = ctypes.create_unicode_buffer(value_size.value)
while True:
data = ctypes.create_string_buffer(data_size.value)
tmp_value_size = ctypes.wintypes.DWORD(value_size.value)
tmp_data_size = ctypes.wintypes.DWORD(data_size.value)
data_type = ctypes.wintypes.DWORD()
rc = RegEnumValue(key.handle, index, ctypes.cast(value, ctypes.c_wchar_p),
ctypes.byref(tmp_value_size), null,
ctypes.byref(data_type), ctypes.cast(data, LPBYTE),
ctypes.byref(tmp_data_size))
if rc != ERROR_MORE_DATA:
break
data_size.value *= 2
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
return (value.value, Reg2Py(data, tmp_data_size.value, data_type.value),
data_type.value)
def Reg2Py(data, size, data_type):
if data_type == _winreg.REG_DWORD:
if size == 0:
return 0
return ctypes.cast(data, ctypes.POINTER(ctypes.c_int)).contents.value
elif data_type == _winreg.REG_SZ or data_type == _winreg.REG_EXPAND_SZ:
return ctypes.wstring_at(data, size // 2).rstrip(u"\x00")
elif data_type == _winreg.REG_MULTI_SZ:
return ctypes.wstring_at(data, size // 2).rstrip(u"\x00").split(u"\x00")
else:
if size == 0:
return None
return ctypes.string_at(data, size)
class RegistryFile(vfs.VFSHandler):
"""Emulate registry access through the VFS."""
supported_pathtype = rdf_paths.PathSpec.PathType.REGISTRY
auto_register = True
value = None
value_type = _winreg.REG_NONE
hive = None
hive_name = None
last_modified = 0
is_directory = True
fd = None
# Maps the registry types to protobuf enums
registry_map = {
_winreg.REG_NONE: rdf_client.StatEntry.RegistryType.REG_NONE,
_winreg.REG_SZ: rdf_client.StatEntry.RegistryType.REG_SZ,
_winreg.REG_EXPAND_SZ: rdf_client.StatEntry.RegistryType.REG_EXPAND_SZ,
_winreg.REG_BINARY: rdf_client.StatEntry.RegistryType.REG_BINARY,
_winreg.REG_DWORD: rdf_client.StatEntry.RegistryType.REG_DWORD,
_winreg.REG_DWORD_LITTLE_ENDIAN: (
rdf_client.StatEntry.RegistryType.REG_DWORD_LITTLE_ENDIAN),
_winreg.REG_DWORD_BIG_ENDIAN: (
rdf_client.StatEntry.RegistryType.REG_DWORD_BIG_ENDIAN),
_winreg.REG_LINK: rdf_client.StatEntry.RegistryType.REG_LINK,
_winreg.REG_MULTI_SZ: rdf_client.StatEntry.RegistryType.REG_MULTI_SZ,
}
def __init__(self,
base_fd,
pathspec=None,
progress_callback=None,
full_pathspec=None):
super(RegistryFile, self).__init__(base_fd,
pathspec=pathspec,
full_pathspec=full_pathspec,
progress_callback=progress_callback)
if base_fd is None:
self.pathspec.Append(pathspec)
elif base_fd.IsDirectory():
self.pathspec.last.path = utils.JoinPath(self.pathspec.last.path,
pathspec.path)
else:
raise IOError("Registry handler can not be stacked on another handler.")
path_components = filter(None, self.pathspec.last.path.split("/"))
try:
# The first component MUST be a hive
self.hive_name = path_components[0]
self.hive = KeyHandle(getattr(_winreg, self.hive_name))
except AttributeError:
raise IOError("Unknown hive name %s" % self.hive_name)
except IndexError:
# A hive is not specified, we just list all the hives.
return
# Normalize the path casing if needed
self.key_name = "/".join(path_components[1:])
self.local_path = CanonicalPathToLocalPath(self.key_name)
try:
# Maybe its a value
key_name, value_name = os.path.split(self.local_path)
with OpenKey(self.hive, key_name) as key:
self.value, self.value_type = QueryValueEx(key, value_name)
# We are a value and therefore not a directory.
self.is_directory = False
except exceptions.WindowsError:
try:
# Try to get the default value for this key
with OpenKey(self.hive, self.local_path) as key:
# Check for default value.
try:
self.value, self.value_type = QueryValueEx(key, "")
except exceptions.WindowsError:
# Empty default value
self.value = ""
self.value_type = _winreg.REG_NONE
except exceptions.WindowsError:
raise IOError("Unable to open key %s" % self.key_name)
def Stat(self):
return self._Stat("", self.value, self.value_type)
def _Stat(self, name, value, value_type):
response = rdf_client.StatEntry()
response_pathspec = self.pathspec.Copy()
# No matter how we got here, there is no need to do case folding from now on
# since this is the exact filename casing.
response_pathspec.path_options = rdf_paths.PathSpec.Options.CASE_LITERAL
response_pathspec.last.path = utils.JoinPath(response_pathspec.last.path,
name)
response.pathspec = response_pathspec
if self.IsDirectory():
response.st_mode = stat.S_IFDIR
else:
response.st_mode = stat.S_IFREG
response.st_mtime = self.last_modified
response.st_size = len(utils.SmartStr(value))
if value_type is not None:
response.registry_type = self.registry_map.get(value_type, 0)
response.registry_data = rdf_protodict.DataBlob().SetValue(value)
return response
def _Walk(self, depth=0, hive=None, hive_name=None, top=""):
if depth < 0:
return
if hive is None:
hives = sorted([(name, KeyHandle(getattr(_winreg, name)))
for name in dir(_winreg) if name.startswith("HKEY_")])
yield "", [name for name, _ in hives], []
for new_hive_name, new_hive in hives:
for tup in self._Walk(depth - 1, new_hive, new_hive_name):
yield tup
else:
keys, value_names = [], []
try:
with OpenKey(hive, top[1:]) as key:
(number_of_keys, number_of_values,
unused_last_modified) = QueryInfoKey(key)
# First keys
for i in xrange(number_of_keys):
try:
keys.append(EnumKey(key, i))
except exceptions.WindowsError:
pass
keys.sort()
# Now Values
for i in xrange(number_of_values):
try:
name, unused_value, unused_value_type = EnumValue(key, i)
value_names.append(name)
except exceptions.WindowsError:
pass
value_names.sort()
except exceptions.WindowsError:
pass
yield "%s%s" % (hive_name, top), keys, value_names
for key in keys:
for tup in self._Walk(depth - 1, hive, hive_name,
r"%s\%s" % (top, key)):
yield tup
def RecursiveListNames(self, depth=0):
if not self.IsDirectory():
return iter(())
if self.hive is None:
return self._Walk(depth)
return self._Walk(depth, self.hive, self.hive_name, self.local_path)
def ListNames(self):
"""List the names of all keys and values."""
if not self.IsDirectory():
return
# Handle the special case where no hive is specified and just list the hives
if self.hive is None:
for name in dir(_winreg):
if name.startswith("HKEY_"):
yield name
return
try:
with OpenKey(self.hive, self.local_path) as key:
(self.number_of_keys, self.number_of_values,
self.last_modified) = QueryInfoKey(key)
self.last_modified = self.last_modified / 10000000 - WIN_UNIX_DIFF_MSECS
# First keys
for i in xrange(self.number_of_keys):
try:
yield EnumKey(key, i)
except exceptions.WindowsError:
pass
# Now Values
for i in xrange(self.number_of_values):
try:
name, unused_value, unused_value_type = EnumValue(key, i)
yield name
except exceptions.WindowsError:
pass
except exceptions.WindowsError as e:
raise IOError("Unable to list key %s: %s" % (self.key_name, e))
def ListFiles(self):
"""A generator of all keys and values."""
if not self.IsDirectory():
return
if self.hive is None:
for name in dir(_winreg):
if name.startswith("HKEY_"):
response = rdf_client.StatEntry(st_mode=stat.S_IFDIR)
response_pathspec = self.pathspec.Copy()
response_pathspec.last.path = utils.JoinPath(
response_pathspec.last.path, name)
response.pathspec = response_pathspec
yield response
return
try:
with OpenKey(self.hive, self.local_path) as key:
(self.number_of_keys, self.number_of_values,
self.last_modified) = QueryInfoKey(key)
self.last_modified = self.last_modified / 10000000 - WIN_UNIX_DIFF_MSECS
# First keys - These will look like directories.
for i in xrange(self.number_of_keys):
try:
name = EnumKey(key, i)
key_name = utils.JoinPath(self.local_path, name)
try:
# Store the default value in the stat response for values.
with OpenKey(self.hive, key_name) as subkey:
value, value_type = QueryValueEx(subkey, "")
except exceptions.WindowsError:
value, value_type = None, None
response = self._Stat(name, value, value_type)
# Keys look like Directories in the VFS.
response.st_mode = stat.S_IFDIR
yield response
except exceptions.WindowsError:
pass
# Now Values - These will look like files.
for i in xrange(self.number_of_values):
try:
name, value, value_type = EnumValue(key, i)
response = self._Stat(name, value, value_type)
# Values look like files in the VFS.
response.st_mode = stat.S_IFREG
yield response
except exceptions.WindowsError:
pass
except exceptions.WindowsError as e:
raise IOError("Unable to list key %s: %s" % (self.key_name, e))
def IsDirectory(self):
return self.is_directory
def Read(self, length):
if not self.fd:
self.fd = StringIO.StringIO(utils.SmartStr(self.value))
return self.fd.read(length)
def Seek(self, offset, whence=0):
if not self.fd:
self.fd = StringIO.StringIO(utils.SmartStr(self.value))
return self.fd.seek(offset, whence)
| 32.639313
| 80
| 0.651991
|
2db7ab26bc06e67fb858f1ebabea79c6eb2b43d2
| 37,483
|
py
|
Python
|
sdk/python/pulumi_azure_native/securityinsights/v20210301preview/watchlist.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/securityinsights/v20210301preview/watchlist.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/securityinsights/v20210301preview/watchlist.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['WatchlistArgs', 'Watchlist']
@pulumi.input_type
class WatchlistArgs:
def __init__(__self__, *,
display_name: pulumi.Input[str],
items_search_key: pulumi.Input[str],
operational_insights_resource_provider: pulumi.Input[str],
provider: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
source: pulumi.Input[Union[str, 'Source']],
workspace_name: pulumi.Input[str],
content_type: Optional[pulumi.Input[str]] = None,
created: Optional[pulumi.Input[str]] = None,
created_by: Optional[pulumi.Input['WatchlistUserInfoArgs']] = None,
default_duration: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
is_deleted: Optional[pulumi.Input[bool]] = None,
labels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
number_of_lines_to_skip: Optional[pulumi.Input[int]] = None,
raw_content: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
updated: Optional[pulumi.Input[str]] = None,
updated_by: Optional[pulumi.Input['WatchlistUserInfoArgs']] = None,
upload_status: Optional[pulumi.Input[str]] = None,
watchlist_alias: Optional[pulumi.Input[str]] = None,
watchlist_id: Optional[pulumi.Input[str]] = None,
watchlist_items_count: Optional[pulumi.Input[int]] = None,
watchlist_type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Watchlist resource.
:param pulumi.Input[str] display_name: The display name of the watchlist
:param pulumi.Input[str] items_search_key: The search key is used to optimize query performance when using watchlists for joins with other data. For example, enable a column with IP addresses to be the designated SearchKey field, then use this field as the key field when joining to other event data by IP address.
:param pulumi.Input[str] operational_insights_resource_provider: The namespace of workspaces resource provider- Microsoft.OperationalInsights.
:param pulumi.Input[str] provider: The provider of the watchlist
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Union[str, 'Source']] source: The source of the watchlist
:param pulumi.Input[str] workspace_name: The name of the workspace.
:param pulumi.Input[str] content_type: The content type of the raw content. Example : text/csv or text/tsv
:param pulumi.Input[str] created: The time the watchlist was created
:param pulumi.Input['WatchlistUserInfoArgs'] created_by: Describes a user that created the watchlist
:param pulumi.Input[str] default_duration: The default duration of a watchlist (in ISO 8601 duration format)
:param pulumi.Input[str] description: A description of the watchlist
:param pulumi.Input[str] etag: Etag of the azure resource
:param pulumi.Input[bool] is_deleted: A flag that indicates if the watchlist is deleted or not
:param pulumi.Input[Sequence[pulumi.Input[str]]] labels: List of labels relevant to this watchlist
:param pulumi.Input[int] number_of_lines_to_skip: The number of lines in a csv/tsv content to skip before the header
:param pulumi.Input[str] raw_content: The raw content that represents to watchlist items to create. In case of csv/tsv content type, it's the content of the file that will parsed by the endpoint
:param pulumi.Input[str] tenant_id: The tenantId where the watchlist belongs to
:param pulumi.Input[str] updated: The last time the watchlist was updated
:param pulumi.Input['WatchlistUserInfoArgs'] updated_by: Describes a user that updated the watchlist
:param pulumi.Input[str] upload_status: The status of the Watchlist upload : New, InProgress or Complete. Pls note : When a Watchlist upload status is equal to InProgress, the Watchlist cannot be deleted
:param pulumi.Input[str] watchlist_alias: The alias of the watchlist
:param pulumi.Input[str] watchlist_id: The id (a Guid) of the watchlist
:param pulumi.Input[int] watchlist_items_count: The number of Watchlist Items in the Watchlist
:param pulumi.Input[str] watchlist_type: The type of the watchlist
"""
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "items_search_key", items_search_key)
pulumi.set(__self__, "operational_insights_resource_provider", operational_insights_resource_provider)
pulumi.set(__self__, "provider", provider)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "source", source)
pulumi.set(__self__, "workspace_name", workspace_name)
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if created is not None:
pulumi.set(__self__, "created", created)
if created_by is not None:
pulumi.set(__self__, "created_by", created_by)
if default_duration is not None:
pulumi.set(__self__, "default_duration", default_duration)
if description is not None:
pulumi.set(__self__, "description", description)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if is_deleted is not None:
pulumi.set(__self__, "is_deleted", is_deleted)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if number_of_lines_to_skip is not None:
pulumi.set(__self__, "number_of_lines_to_skip", number_of_lines_to_skip)
if raw_content is not None:
pulumi.set(__self__, "raw_content", raw_content)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
if updated is not None:
pulumi.set(__self__, "updated", updated)
if updated_by is not None:
pulumi.set(__self__, "updated_by", updated_by)
if upload_status is not None:
pulumi.set(__self__, "upload_status", upload_status)
if watchlist_alias is not None:
pulumi.set(__self__, "watchlist_alias", watchlist_alias)
if watchlist_id is not None:
pulumi.set(__self__, "watchlist_id", watchlist_id)
if watchlist_items_count is not None:
pulumi.set(__self__, "watchlist_items_count", watchlist_items_count)
if watchlist_type is not None:
pulumi.set(__self__, "watchlist_type", watchlist_type)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Input[str]:
"""
The display name of the watchlist
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: pulumi.Input[str]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="itemsSearchKey")
def items_search_key(self) -> pulumi.Input[str]:
"""
The search key is used to optimize query performance when using watchlists for joins with other data. For example, enable a column with IP addresses to be the designated SearchKey field, then use this field as the key field when joining to other event data by IP address.
"""
return pulumi.get(self, "items_search_key")
@items_search_key.setter
def items_search_key(self, value: pulumi.Input[str]):
pulumi.set(self, "items_search_key", value)
@property
@pulumi.getter(name="operationalInsightsResourceProvider")
def operational_insights_resource_provider(self) -> pulumi.Input[str]:
"""
The namespace of workspaces resource provider- Microsoft.OperationalInsights.
"""
return pulumi.get(self, "operational_insights_resource_provider")
@operational_insights_resource_provider.setter
def operational_insights_resource_provider(self, value: pulumi.Input[str]):
pulumi.set(self, "operational_insights_resource_provider", value)
@property
@pulumi.getter
def provider(self) -> pulumi.Input[str]:
"""
The provider of the watchlist
"""
return pulumi.get(self, "provider")
@provider.setter
def provider(self, value: pulumi.Input[str]):
pulumi.set(self, "provider", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def source(self) -> pulumi.Input[Union[str, 'Source']]:
"""
The source of the watchlist
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: pulumi.Input[Union[str, 'Source']]):
pulumi.set(self, "source", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> pulumi.Input[str]:
"""
The name of the workspace.
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "workspace_name", value)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[pulumi.Input[str]]:
"""
The content type of the raw content. Example : text/csv or text/tsv
"""
return pulumi.get(self, "content_type")
@content_type.setter
def content_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_type", value)
@property
@pulumi.getter
def created(self) -> Optional[pulumi.Input[str]]:
"""
The time the watchlist was created
"""
return pulumi.get(self, "created")
@created.setter
def created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created", value)
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[pulumi.Input['WatchlistUserInfoArgs']]:
"""
Describes a user that created the watchlist
"""
return pulumi.get(self, "created_by")
@created_by.setter
def created_by(self, value: Optional[pulumi.Input['WatchlistUserInfoArgs']]):
pulumi.set(self, "created_by", value)
@property
@pulumi.getter(name="defaultDuration")
def default_duration(self) -> Optional[pulumi.Input[str]]:
"""
The default duration of a watchlist (in ISO 8601 duration format)
"""
return pulumi.get(self, "default_duration")
@default_duration.setter
def default_duration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_duration", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the watchlist
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter(name="isDeleted")
def is_deleted(self) -> Optional[pulumi.Input[bool]]:
"""
A flag that indicates if the watchlist is deleted or not
"""
return pulumi.get(self, "is_deleted")
@is_deleted.setter
def is_deleted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deleted", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of labels relevant to this watchlist
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="numberOfLinesToSkip")
def number_of_lines_to_skip(self) -> Optional[pulumi.Input[int]]:
"""
The number of lines in a csv/tsv content to skip before the header
"""
return pulumi.get(self, "number_of_lines_to_skip")
@number_of_lines_to_skip.setter
def number_of_lines_to_skip(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "number_of_lines_to_skip", value)
@property
@pulumi.getter(name="rawContent")
def raw_content(self) -> Optional[pulumi.Input[str]]:
"""
The raw content that represents to watchlist items to create. In case of csv/tsv content type, it's the content of the file that will parsed by the endpoint
"""
return pulumi.get(self, "raw_content")
@raw_content.setter
def raw_content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "raw_content", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The tenantId where the watchlist belongs to
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter
def updated(self) -> Optional[pulumi.Input[str]]:
"""
The last time the watchlist was updated
"""
return pulumi.get(self, "updated")
@updated.setter
def updated(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "updated", value)
@property
@pulumi.getter(name="updatedBy")
def updated_by(self) -> Optional[pulumi.Input['WatchlistUserInfoArgs']]:
"""
Describes a user that updated the watchlist
"""
return pulumi.get(self, "updated_by")
@updated_by.setter
def updated_by(self, value: Optional[pulumi.Input['WatchlistUserInfoArgs']]):
pulumi.set(self, "updated_by", value)
@property
@pulumi.getter(name="uploadStatus")
def upload_status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the Watchlist upload : New, InProgress or Complete. Pls note : When a Watchlist upload status is equal to InProgress, the Watchlist cannot be deleted
"""
return pulumi.get(self, "upload_status")
@upload_status.setter
def upload_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "upload_status", value)
@property
@pulumi.getter(name="watchlistAlias")
def watchlist_alias(self) -> Optional[pulumi.Input[str]]:
"""
The alias of the watchlist
"""
return pulumi.get(self, "watchlist_alias")
@watchlist_alias.setter
def watchlist_alias(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "watchlist_alias", value)
@property
@pulumi.getter(name="watchlistId")
def watchlist_id(self) -> Optional[pulumi.Input[str]]:
"""
The id (a Guid) of the watchlist
"""
return pulumi.get(self, "watchlist_id")
@watchlist_id.setter
def watchlist_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "watchlist_id", value)
@property
@pulumi.getter(name="watchlistItemsCount")
def watchlist_items_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of Watchlist Items in the Watchlist
"""
return pulumi.get(self, "watchlist_items_count")
@watchlist_items_count.setter
def watchlist_items_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "watchlist_items_count", value)
@property
@pulumi.getter(name="watchlistType")
def watchlist_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the watchlist
"""
return pulumi.get(self, "watchlist_type")
@watchlist_type.setter
def watchlist_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "watchlist_type", value)
class Watchlist(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
content_type: Optional[pulumi.Input[str]] = None,
created: Optional[pulumi.Input[str]] = None,
created_by: Optional[pulumi.Input[pulumi.InputType['WatchlistUserInfoArgs']]] = None,
default_duration: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
is_deleted: Optional[pulumi.Input[bool]] = None,
items_search_key: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
number_of_lines_to_skip: Optional[pulumi.Input[int]] = None,
operational_insights_resource_provider: Optional[pulumi.Input[str]] = None,
provider: Optional[pulumi.Input[str]] = None,
raw_content: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[Union[str, 'Source']]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
updated: Optional[pulumi.Input[str]] = None,
updated_by: Optional[pulumi.Input[pulumi.InputType['WatchlistUserInfoArgs']]] = None,
upload_status: Optional[pulumi.Input[str]] = None,
watchlist_alias: Optional[pulumi.Input[str]] = None,
watchlist_id: Optional[pulumi.Input[str]] = None,
watchlist_items_count: Optional[pulumi.Input[int]] = None,
watchlist_type: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Represents a Watchlist in Azure Security Insights.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] content_type: The content type of the raw content. Example : text/csv or text/tsv
:param pulumi.Input[str] created: The time the watchlist was created
:param pulumi.Input[pulumi.InputType['WatchlistUserInfoArgs']] created_by: Describes a user that created the watchlist
:param pulumi.Input[str] default_duration: The default duration of a watchlist (in ISO 8601 duration format)
:param pulumi.Input[str] description: A description of the watchlist
:param pulumi.Input[str] display_name: The display name of the watchlist
:param pulumi.Input[str] etag: Etag of the azure resource
:param pulumi.Input[bool] is_deleted: A flag that indicates if the watchlist is deleted or not
:param pulumi.Input[str] items_search_key: The search key is used to optimize query performance when using watchlists for joins with other data. For example, enable a column with IP addresses to be the designated SearchKey field, then use this field as the key field when joining to other event data by IP address.
:param pulumi.Input[Sequence[pulumi.Input[str]]] labels: List of labels relevant to this watchlist
:param pulumi.Input[int] number_of_lines_to_skip: The number of lines in a csv/tsv content to skip before the header
:param pulumi.Input[str] operational_insights_resource_provider: The namespace of workspaces resource provider- Microsoft.OperationalInsights.
:param pulumi.Input[str] provider: The provider of the watchlist
:param pulumi.Input[str] raw_content: The raw content that represents to watchlist items to create. In case of csv/tsv content type, it's the content of the file that will parsed by the endpoint
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Union[str, 'Source']] source: The source of the watchlist
:param pulumi.Input[str] tenant_id: The tenantId where the watchlist belongs to
:param pulumi.Input[str] updated: The last time the watchlist was updated
:param pulumi.Input[pulumi.InputType['WatchlistUserInfoArgs']] updated_by: Describes a user that updated the watchlist
:param pulumi.Input[str] upload_status: The status of the Watchlist upload : New, InProgress or Complete. Pls note : When a Watchlist upload status is equal to InProgress, the Watchlist cannot be deleted
:param pulumi.Input[str] watchlist_alias: The alias of the watchlist
:param pulumi.Input[str] watchlist_id: The id (a Guid) of the watchlist
:param pulumi.Input[int] watchlist_items_count: The number of Watchlist Items in the Watchlist
:param pulumi.Input[str] watchlist_type: The type of the watchlist
:param pulumi.Input[str] workspace_name: The name of the workspace.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WatchlistArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Represents a Watchlist in Azure Security Insights.
:param str resource_name: The name of the resource.
:param WatchlistArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WatchlistArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
content_type: Optional[pulumi.Input[str]] = None,
created: Optional[pulumi.Input[str]] = None,
created_by: Optional[pulumi.Input[pulumi.InputType['WatchlistUserInfoArgs']]] = None,
default_duration: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
is_deleted: Optional[pulumi.Input[bool]] = None,
items_search_key: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
number_of_lines_to_skip: Optional[pulumi.Input[int]] = None,
operational_insights_resource_provider: Optional[pulumi.Input[str]] = None,
provider: Optional[pulumi.Input[str]] = None,
raw_content: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[Union[str, 'Source']]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
updated: Optional[pulumi.Input[str]] = None,
updated_by: Optional[pulumi.Input[pulumi.InputType['WatchlistUserInfoArgs']]] = None,
upload_status: Optional[pulumi.Input[str]] = None,
watchlist_alias: Optional[pulumi.Input[str]] = None,
watchlist_id: Optional[pulumi.Input[str]] = None,
watchlist_items_count: Optional[pulumi.Input[int]] = None,
watchlist_type: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WatchlistArgs.__new__(WatchlistArgs)
__props__.__dict__["content_type"] = content_type
__props__.__dict__["created"] = created
__props__.__dict__["created_by"] = created_by
__props__.__dict__["default_duration"] = default_duration
__props__.__dict__["description"] = description
if display_name is None and not opts.urn:
raise TypeError("Missing required property 'display_name'")
__props__.__dict__["display_name"] = display_name
__props__.__dict__["etag"] = etag
__props__.__dict__["is_deleted"] = is_deleted
if items_search_key is None and not opts.urn:
raise TypeError("Missing required property 'items_search_key'")
__props__.__dict__["items_search_key"] = items_search_key
__props__.__dict__["labels"] = labels
__props__.__dict__["number_of_lines_to_skip"] = number_of_lines_to_skip
if operational_insights_resource_provider is None and not opts.urn:
raise TypeError("Missing required property 'operational_insights_resource_provider'")
__props__.__dict__["operational_insights_resource_provider"] = operational_insights_resource_provider
if provider is None and not opts.urn:
raise TypeError("Missing required property 'provider'")
__props__.__dict__["provider"] = provider
__props__.__dict__["raw_content"] = raw_content
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if source is None and not opts.urn:
raise TypeError("Missing required property 'source'")
__props__.__dict__["source"] = source
__props__.__dict__["tenant_id"] = tenant_id
__props__.__dict__["updated"] = updated
__props__.__dict__["updated_by"] = updated_by
__props__.__dict__["upload_status"] = upload_status
__props__.__dict__["watchlist_alias"] = watchlist_alias
__props__.__dict__["watchlist_id"] = watchlist_id
__props__.__dict__["watchlist_items_count"] = watchlist_items_count
__props__.__dict__["watchlist_type"] = watchlist_type
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:securityinsights/v20210301preview:Watchlist"), pulumi.Alias(type_="azure-native:securityinsights:Watchlist"), pulumi.Alias(type_="azure-nextgen:securityinsights:Watchlist"), pulumi.Alias(type_="azure-native:securityinsights/v20190101preview:Watchlist"), pulumi.Alias(type_="azure-nextgen:securityinsights/v20190101preview:Watchlist"), pulumi.Alias(type_="azure-native:securityinsights/v20210401:Watchlist"), pulumi.Alias(type_="azure-nextgen:securityinsights/v20210401:Watchlist")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Watchlist, __self__).__init__(
'azure-native:securityinsights/v20210301preview:Watchlist',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Watchlist':
"""
Get an existing Watchlist resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = WatchlistArgs.__new__(WatchlistArgs)
__props__.__dict__["content_type"] = None
__props__.__dict__["created"] = None
__props__.__dict__["created_by"] = None
__props__.__dict__["default_duration"] = None
__props__.__dict__["description"] = None
__props__.__dict__["display_name"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["is_deleted"] = None
__props__.__dict__["items_search_key"] = None
__props__.__dict__["labels"] = None
__props__.__dict__["name"] = None
__props__.__dict__["number_of_lines_to_skip"] = None
__props__.__dict__["provider"] = None
__props__.__dict__["raw_content"] = None
__props__.__dict__["source"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tenant_id"] = None
__props__.__dict__["type"] = None
__props__.__dict__["updated"] = None
__props__.__dict__["updated_by"] = None
__props__.__dict__["upload_status"] = None
__props__.__dict__["watchlist_alias"] = None
__props__.__dict__["watchlist_id"] = None
__props__.__dict__["watchlist_items_count"] = None
__props__.__dict__["watchlist_type"] = None
return Watchlist(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> pulumi.Output[Optional[str]]:
"""
The content type of the raw content. Example : text/csv or text/tsv
"""
return pulumi.get(self, "content_type")
@property
@pulumi.getter
def created(self) -> pulumi.Output[Optional[str]]:
"""
The time the watchlist was created
"""
return pulumi.get(self, "created")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> pulumi.Output[Optional['outputs.WatchlistUserInfoResponse']]:
"""
Describes a user that created the watchlist
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="defaultDuration")
def default_duration(self) -> pulumi.Output[Optional[str]]:
"""
The default duration of a watchlist (in ISO 8601 duration format)
"""
return pulumi.get(self, "default_duration")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description of the watchlist
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
The display name of the watchlist
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="isDeleted")
def is_deleted(self) -> pulumi.Output[Optional[bool]]:
"""
A flag that indicates if the watchlist is deleted or not
"""
return pulumi.get(self, "is_deleted")
@property
@pulumi.getter(name="itemsSearchKey")
def items_search_key(self) -> pulumi.Output[str]:
"""
The search key is used to optimize query performance when using watchlists for joins with other data. For example, enable a column with IP addresses to be the designated SearchKey field, then use this field as the key field when joining to other event data by IP address.
"""
return pulumi.get(self, "items_search_key")
@property
@pulumi.getter
def labels(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of labels relevant to this watchlist
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="numberOfLinesToSkip")
def number_of_lines_to_skip(self) -> pulumi.Output[Optional[int]]:
"""
The number of lines in a csv/tsv content to skip before the header
"""
return pulumi.get(self, "number_of_lines_to_skip")
@property
@pulumi.getter
def provider(self) -> pulumi.Output[str]:
"""
The provider of the watchlist
"""
return pulumi.get(self, "provider")
@property
@pulumi.getter(name="rawContent")
def raw_content(self) -> pulumi.Output[Optional[str]]:
"""
The raw content that represents to watchlist items to create. In case of csv/tsv content type, it's the content of the file that will parsed by the endpoint
"""
return pulumi.get(self, "raw_content")
@property
@pulumi.getter
def source(self) -> pulumi.Output[str]:
"""
The source of the watchlist
"""
return pulumi.get(self, "source")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[Optional[str]]:
"""
The tenantId where the watchlist belongs to
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Azure resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def updated(self) -> pulumi.Output[Optional[str]]:
"""
The last time the watchlist was updated
"""
return pulumi.get(self, "updated")
@property
@pulumi.getter(name="updatedBy")
def updated_by(self) -> pulumi.Output[Optional['outputs.WatchlistUserInfoResponse']]:
"""
Describes a user that updated the watchlist
"""
return pulumi.get(self, "updated_by")
@property
@pulumi.getter(name="uploadStatus")
def upload_status(self) -> pulumi.Output[Optional[str]]:
"""
The status of the Watchlist upload : New, InProgress or Complete. Pls note : When a Watchlist upload status is equal to InProgress, the Watchlist cannot be deleted
"""
return pulumi.get(self, "upload_status")
@property
@pulumi.getter(name="watchlistAlias")
def watchlist_alias(self) -> pulumi.Output[Optional[str]]:
"""
The alias of the watchlist
"""
return pulumi.get(self, "watchlist_alias")
@property
@pulumi.getter(name="watchlistId")
def watchlist_id(self) -> pulumi.Output[Optional[str]]:
"""
The id (a Guid) of the watchlist
"""
return pulumi.get(self, "watchlist_id")
@property
@pulumi.getter(name="watchlistItemsCount")
def watchlist_items_count(self) -> pulumi.Output[Optional[int]]:
"""
The number of Watchlist Items in the Watchlist
"""
return pulumi.get(self, "watchlist_items_count")
@property
@pulumi.getter(name="watchlistType")
def watchlist_type(self) -> pulumi.Output[Optional[str]]:
"""
The type of the watchlist
"""
return pulumi.get(self, "watchlist_type")
| 44.997599
| 569
| 0.652189
|
73cd37d073f4f86da6162439d5c3faa3c139e6d1
| 977
|
py
|
Python
|
labour_common_qualifications/management/commands/setup_labour_common_qualifications.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 13
|
2015-11-29T12:19:12.000Z
|
2021-02-21T15:42:11.000Z
|
labour_common_qualifications/management/commands/setup_labour_common_qualifications.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 23
|
2015-04-29T19:43:34.000Z
|
2021-02-10T05:50:17.000Z
|
labour_common_qualifications/management/commands/setup_labour_common_qualifications.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 11
|
2015-09-20T18:59:00.000Z
|
2020-02-07T08:47:34.000Z
|
from django.core.management.base import BaseCommand
from django.contrib.contenttypes.models import ContentType
from labour.models import Qualification
from ...models import JVKortti
class Command(BaseCommand):
args = ''
help = 'Setup common labour qualifications'
def handle(*args, **options):
content_type = ContentType.objects.get_for_model(JVKortti)
Qualification.objects.get_or_create(
slug='jv-kortti',
defaults=dict(
name="JV-kortti",
qualification_extra_content_type=content_type
)
)
for slug, name in [
('b-ajokortti', "Henkilöauton ajokortti (B)"),
('c-ajokortti', "Kuorma-auton ajokortti (C)"),
('ea1', "Ensiapukoulutus EA1"),
('ea2', "Ensiapukoulutus EA2"),
('hygieniapassi', "Hygieniapassi"),
]:
Qualification.objects.get_or_create(slug=slug, defaults=dict(name=name))
| 33.689655
| 84
| 0.62129
|
b66cd9fcfcca9c806c69454c4314c46bd591d0dc
| 7,598
|
py
|
Python
|
youtuatools/extractor/jamendo.py
|
Pagasis/YouTua
|
edb44b2065a7224f8b26aaf76166bf7287901567
|
[
"MIT"
] | 47
|
2021-01-02T07:44:50.000Z
|
2022-02-28T22:02:13.000Z
|
youtuatools/extractor/jamendo.py
|
Pagasis/YouTua
|
edb44b2065a7224f8b26aaf76166bf7287901567
|
[
"MIT"
] | 4
|
2021-02-07T03:35:13.000Z
|
2021-10-31T19:23:53.000Z
|
youtuatools/extractor/jamendo.py
|
Pagasis/YouTua
|
edb44b2065a7224f8b26aaf76166bf7287901567
|
[
"MIT"
] | 8
|
2021-01-03T05:44:39.000Z
|
2021-11-01T05:46:32.000Z
|
# coding: utf-8
from __future__ import unicode_literals
import hashlib
import random
from ..compat import compat_str
from .common import InfoExtractor
from ..utils import (
clean_html,
int_or_none,
try_get,
)
class JamendoIE(InfoExtractor):
_VALID_URL = r"""(?x)
https?://
(?:
licensing\.jamendo\.com/[^/]+|
(?:www\.)?jamendo\.com
)
/track/(?P<id>[0-9]+)(?:/(?P<display_id>[^/?#&]+))?
"""
_TESTS = [
{
"url": "https://www.jamendo.com/track/196219/stories-from-emona-i",
"md5": "6e9e82ed6db98678f171c25a8ed09ffd",
"info_dict": {
"id": "196219",
"display_id": "stories-from-emona-i",
"ext": "flac",
# 'title': 'Maya Filipič - Stories from Emona I',
"title": "Stories from Emona I",
# 'artist': 'Maya Filipič',
"track": "Stories from Emona I",
"duration": 210,
"thumbnail": r"re:^https?://.*\.jpg",
"timestamp": 1217438117,
"upload_date": "20080730",
"license": "by-nc-nd",
"view_count": int,
"like_count": int,
"average_rating": int,
"tags": ["piano", "peaceful", "newage", "strings", "upbeat"],
},
},
{
"url": "https://licensing.jamendo.com/en/track/1496667/energetic-rock",
"only_matching": True,
},
]
def _call_api(self, resource, resource_id):
path = "/api/%ss" % resource
rand = compat_str(random.random())
return self._download_json(
"https://www.jamendo.com" + path,
resource_id,
query={
"id[]": resource_id,
},
headers={
"X-Jam-Call": "$%s*%s~"
% (hashlib.sha1((path + rand).encode()).hexdigest(), rand)
},
)[0]
def _real_extract(self, url):
track_id, display_id = self._VALID_URL_RE.match(url).groups()
# webpage = self._download_webpage(
# 'https://www.jamendo.com/track/' + track_id, track_id)
# models = self._parse_json(self._html_search_regex(
# r"data-bundled-models='([^']+)",
# webpage, 'bundled models'), track_id)
# track = models['track']['models'][0]
track = self._call_api("track", track_id)
title = track_name = track["name"]
# get_model = lambda x: try_get(models, lambda y: y[x]['models'][0], dict) or {}
# artist = get_model('artist')
# artist_name = artist.get('name')
# if artist_name:
# title = '%s - %s' % (artist_name, title)
# album = get_model('album')
formats = [
{
"url": "https://%s.jamendo.com/?trackid=%s&format=%s&from=app-97dab294"
% (sub_domain, track_id, format_id),
"format_id": format_id,
"ext": ext,
"quality": quality,
}
for quality, (format_id, sub_domain, ext) in enumerate(
(
("mp31", "mp3l", "mp3"),
("mp32", "mp3d", "mp3"),
("ogg1", "ogg", "ogg"),
("flac", "flac", "flac"),
)
)
]
self._sort_formats(formats)
urls = []
thumbnails = []
for covers in (track.get("cover") or {}).values():
for cover_id, cover_url in covers.items():
if not cover_url or cover_url in urls:
continue
urls.append(cover_url)
size = int_or_none(cover_id.lstrip("size"))
thumbnails.append(
{
"id": cover_id,
"url": cover_url,
"width": size,
"height": size,
}
)
tags = []
for tag in track.get("tags") or []:
tag_name = tag.get("name")
if not tag_name:
continue
tags.append(tag_name)
stats = track.get("stats") or {}
license = track.get("licenseCC") or []
return {
"id": track_id,
"display_id": display_id,
"thumbnails": thumbnails,
"title": title,
"description": track.get("description"),
"duration": int_or_none(track.get("duration")),
# 'artist': artist_name,
"track": track_name,
# 'album': album.get('name'),
"formats": formats,
"license": "-".join(license) if license else None,
"timestamp": int_or_none(track.get("dateCreated")),
"view_count": int_or_none(stats.get("listenedAll")),
"like_count": int_or_none(stats.get("favorited")),
"average_rating": int_or_none(stats.get("averageNote")),
"tags": tags,
}
class JamendoAlbumIE(JamendoIE):
_VALID_URL = r"https?://(?:www\.)?jamendo\.com/album/(?P<id>[0-9]+)"
_TESTS = [
{
"url": "https://www.jamendo.com/album/121486/duck-on-cover",
"info_dict": {
"id": "121486",
"title": "Duck On Cover",
"description": "md5:c2920eaeef07d7af5b96d7c64daf1239",
},
"playlist": [
{
"md5": "e1a2fcb42bda30dfac990212924149a8",
"info_dict": {
"id": "1032333",
"ext": "flac",
"title": "Shearer - Warmachine",
"artist": "Shearer",
"track": "Warmachine",
"timestamp": 1368089771,
"upload_date": "20130509",
},
},
{
"md5": "1f358d7b2f98edfe90fd55dac0799d50",
"info_dict": {
"id": "1032330",
"ext": "flac",
"title": "Shearer - Without Your Ghost",
"artist": "Shearer",
"track": "Without Your Ghost",
"timestamp": 1368089771,
"upload_date": "20130509",
},
},
],
"params": {"playlistend": 2},
}
]
def _real_extract(self, url):
album_id = self._match_id(url)
album = self._call_api("album", album_id)
album_name = album.get("name")
entries = []
for track in album.get("tracks") or []:
track_id = track.get("id")
if not track_id:
continue
track_id = compat_str(track_id)
entries.append(
{
"_type": "url_transparent",
"url": "https://www.jamendo.com/track/" + track_id,
"ie_key": JamendoIE.ie_key(),
"id": track_id,
"album": album_name,
}
)
return self.playlist_result(
entries,
album_id,
album_name,
clean_html(try_get(album, lambda x: x["description"]["en"], compat_str)),
)
| 34.694064
| 88
| 0.43801
|
956669b868c5279339f881b8d3369268cee5a474
| 4,575
|
py
|
Python
|
python/metaspore/nn/wide_and_deep.py
|
meta-soul/MetaSpore
|
e6fbc12c6a3139df76c87215b16f9dba65962ec7
|
[
"Apache-2.0"
] | 32
|
2022-03-30T10:24:00.000Z
|
2022-03-31T16:19:15.000Z
|
python/metaspore/nn/wide_and_deep.py
|
meta-soul/MetaSpore
|
e6fbc12c6a3139df76c87215b16f9dba65962ec7
|
[
"Apache-2.0"
] | null | null | null |
python/metaspore/nn/wide_and_deep.py
|
meta-soul/MetaSpore
|
e6fbc12c6a3139df76c87215b16f9dba65962ec7
|
[
"Apache-2.0"
] | 3
|
2022-03-30T10:28:57.000Z
|
2022-03-30T11:37:39.000Z
|
#
# Copyright 2022 DMetaSoul
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from ..updater import FTRLTensorUpdater
from ..initializer import NormalTensorInitializer
from ..embedding import EmbeddingSumConcat
from .normalization import Normalization
class WideAndDeepModule(torch.nn.Module):
def __init__(self,
wide_embedding_size=16,
wide_column_name_path=None,
wide_combine_schema_path=None,
wide_updater=None,
wide_initializer=None,
deep_sparse_embedding_size=16,
deep_sparse_column_name_path=None,
deep_sparse_combine_schema_path=None,
deep_sparse_updater=None,
deep_sparse_initializer=None,
deep_dense_hidden_units=(1024, 512),
):
super().__init__()
if wide_column_name_path is None:
raise ValueError("wide_column_name_path is required")
if wide_combine_schema_path is None:
raise ValueError("wide_combine_schema_path is required")
if wide_updater is None:
wide_updater = FTRLTensorUpdater()
if wide_initializer is None:
wide_initializer = NormalTensorInitializer(var=0.01)
if deep_sparse_column_name_path is None:
raise ValueError("deep_sparse_column_name_path is required")
if deep_sparse_combine_schema_path is None:
raise ValueError("deep_sparse_combine_schema_path is required")
if deep_sparse_updater is None:
deep_sparse_updater = FTRLTensorUpdater()
if deep_sparse_initializer is None:
deep_sparse_initializer = NormalTensorInitializer(var=0.01)
if not deep_dense_hidden_units:
raise ValueError("deep_dense_hidden_units can not be empty")
self._wide_embedding_size = wide_embedding_size
self._wide_column_name_path = wide_column_name_path
self._wide_combine_schema_path = wide_combine_schema_path
self._wide = EmbeddingSumConcat(self._wide_embedding_size,
self._wide_column_name_path,
self._wide_combine_schema_path)
self._wide.updater = wide_updater
self._wide.initializer = wide_initializer
self._deep_sparse_embedding_size = deep_sparse_embedding_size
self._deep_sparse_column_name_path = deep_sparse_column_name_path
self._deep_sparse_combine_schema_path = deep_sparse_combine_schema_path
self._deep_sparse = EmbeddingSumConcat(self._deep_sparse_embedding_size,
self._deep_sparse_column_name_path,
self._deep_sparse_combine_schema_path)
self._deep_sparse.updater = deep_sparse_updater
self._deep_sparse.initializer = deep_sparse_initializer
modules = []
deep_dense_input_units = self._deep_sparse.feature_count * self._deep_sparse_embedding_size
modules.append(Normalization(deep_dense_input_units))
modules.append(torch.nn.Linear(deep_dense_input_units, deep_dense_hidden_units[0]))
modules.append(torch.nn.ReLU())
for i in range(len(deep_dense_hidden_units)):
input_units = deep_dense_hidden_units[i]
if i != len(deep_dense_hidden_units) - 1:
output_units = deep_dense_hidden_units[i + 1]
else:
output_units = 1
modules.append(torch.nn.Linear(input_units, output_units))
if i != len(deep_dense_hidden_units) - 1:
modules.append(torch.nn.ReLU())
self._deep_dense = torch.nn.Sequential(*modules)
def forward(self, inputs):
wide_outputs = self._wide(inputs)
wide_outputs = torch.sum(wide_outputs, dim=1, keepdim=True)
deep_sparse_outputs = self._deep_sparse(inputs)
deep_outputs = self._deep_dense(deep_sparse_outputs)
return torch.sigmoid(wide_outputs + deep_outputs)
| 48.670213
| 99
| 0.679563
|
d312a07203cf82c5a0b9192f623b6ea89204cb97
| 2,368
|
py
|
Python
|
alipay/aop/api/domain/AlipayUserGroupbuyingSyncModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayUserGroupbuyingSyncModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayUserGroupbuyingSyncModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserGroupbuyingSyncModel(object):
def __init__(self):
self._group_expire = None
self._group_id = None
self._havana_id = None
self._item_id = None
@property
def group_expire(self):
return self._group_expire
@group_expire.setter
def group_expire(self, value):
self._group_expire = value
@property
def group_id(self):
return self._group_id
@group_id.setter
def group_id(self, value):
self._group_id = value
@property
def havana_id(self):
return self._havana_id
@havana_id.setter
def havana_id(self, value):
self._havana_id = value
@property
def item_id(self):
return self._item_id
@item_id.setter
def item_id(self, value):
self._item_id = value
def to_alipay_dict(self):
params = dict()
if self.group_expire:
if hasattr(self.group_expire, 'to_alipay_dict'):
params['group_expire'] = self.group_expire.to_alipay_dict()
else:
params['group_expire'] = self.group_expire
if self.group_id:
if hasattr(self.group_id, 'to_alipay_dict'):
params['group_id'] = self.group_id.to_alipay_dict()
else:
params['group_id'] = self.group_id
if self.havana_id:
if hasattr(self.havana_id, 'to_alipay_dict'):
params['havana_id'] = self.havana_id.to_alipay_dict()
else:
params['havana_id'] = self.havana_id
if self.item_id:
if hasattr(self.item_id, 'to_alipay_dict'):
params['item_id'] = self.item_id.to_alipay_dict()
else:
params['item_id'] = self.item_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserGroupbuyingSyncModel()
if 'group_expire' in d:
o.group_expire = d['group_expire']
if 'group_id' in d:
o.group_id = d['group_id']
if 'havana_id' in d:
o.havana_id = d['havana_id']
if 'item_id' in d:
o.item_id = d['item_id']
return o
| 27.534884
| 75
| 0.58277
|
382bfc8743ca87587ae01978807ddbbdf0950339
| 5,395
|
py
|
Python
|
thingsboard_gateway/connectors/modbus/bytes_modbus_downlink_converter.py
|
hguomin/thingsboard-gateway
|
e776c4454d9a1a037517046f3ebdfe7900cb02ef
|
[
"Apache-2.0"
] | 1
|
2021-09-27T05:32:26.000Z
|
2021-09-27T05:32:26.000Z
|
thingsboard_gateway/connectors/modbus/bytes_modbus_downlink_converter.py
|
hguomin/thingsboard-gateway
|
e776c4454d9a1a037517046f3ebdfe7900cb02ef
|
[
"Apache-2.0"
] | null | null | null |
thingsboard_gateway/connectors/modbus/bytes_modbus_downlink_converter.py
|
hguomin/thingsboard-gateway
|
e776c4454d9a1a037517046f3ebdfe7900cb02ef
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pymodbus.constants import Endian
from pymodbus.payload import BinaryPayloadBuilder
from thingsboard_gateway.connectors.modbus.modbus_converter import ModbusConverter, log
class BytesModbusDownlinkConverter(ModbusConverter):
def __init__(self, config):
self.__config = config
def convert(self, config, data):
byte_order_str = config.get("byteOrder", "BIG")
word_order_str = config.get("wordOrder", "LITTLE")
byte_order = Endian.Big if byte_order_str.upper() == "BIG" else Endian.Little
word_order = Endian.Big if word_order_str.upper() == "BIG" else Endian.Little
repack = config.get("repack", False)
builder = BinaryPayloadBuilder(byteorder=byte_order, wordorder=word_order, repack=repack)
builder_functions = {"string": builder.add_string,
"bits": builder.add_bits,
"8int": builder.add_8bit_int,
"16int": builder.add_16bit_int,
"32int": builder.add_32bit_int,
"64int": builder.add_64bit_int,
"8uint": builder.add_8bit_uint,
"16uint": builder.add_16bit_uint,
"32uint": builder.add_32bit_uint,
"64uint": builder.add_64bit_uint,
"16float": builder.add_16bit_float,
"32float": builder.add_32bit_float,
"64float": builder.add_64bit_float}
value = None
if data.get("data") and data["data"].get("params") is not None:
value = data["data"]["params"]
else:
value = config.get("value", 0)
lower_type = config.get("type", config.get("tag", "error")).lower()
if lower_type == "error":
log.error('"type" and "tag" - not found in configuration.')
variable_size = config.get("objectsCount", config.get("registersCount", config.get("registerCount", 1))) * 16
if lower_type in ["integer", "dword", "dword/integer", "word", "int"]:
lower_type = str(variable_size) + "int"
assert builder_functions.get(lower_type) is not None
builder_functions[lower_type](int(value))
elif lower_type in ["uint", "unsigned", "unsigned integer", "unsigned int"]:
lower_type = str(variable_size) + "uint"
assert builder_functions.get(lower_type) is not None
builder_functions[lower_type](int(value))
elif lower_type in ["float", "double"]:
lower_type = str(variable_size) + "float"
assert builder_functions.get(lower_type) is not None
builder_functions[lower_type](float(value))
elif lower_type in ["coil", "bits", "coils", "bit"]:
assert builder_functions.get("bits") is not None
if variable_size / 8 > 1.0:
builder_functions["bits"](value)
else:
return bytes(int(value))
elif lower_type in ["string"]:
assert builder_functions.get("string") is not None
builder_functions[lower_type](value)
elif lower_type in builder_functions and 'int' in lower_type:
builder_functions[lower_type](int(value))
elif lower_type in builder_functions and 'float' in lower_type:
builder_functions[lower_type](float(value))
elif lower_type in builder_functions:
builder_functions[lower_type](value)
else:
log.error("Unknown variable type")
builder_converting_functions = {5: builder.to_coils,
15: builder.to_coils,
6: builder.to_registers,
16: builder.to_registers}
function_code = config["functionCode"]
if function_code in builder_converting_functions:
builder = builder_converting_functions[function_code]()
log.debug(builder)
if "Exception" in str(builder):
log.exception(builder)
builder = str(builder)
if variable_size <= 16:
if isinstance(builder, list) and len(builder) not in (8, 16, 32, 64):
builder = builder[0]
else:
if isinstance(builder, list) and len(builder) not in (2, 4):
log.warning("There is a problem with the value builder. Only the firt register is written.")
builder = builder[0]
return builder
log.warning("Unsupported function code, for the device %s in the Modbus Downlink converter", config["device"])
return None
| 49.953704
| 118
| 0.596293
|
a154e610d7dc8f07e2ccbd8801812a4b72e61423
| 971
|
py
|
Python
|
pastry_shop/blog/models.py
|
Raekker/pastry-shop
|
27e4e98594c57cbe5825a6571c6f93ad97dc1eb3
|
[
"MIT"
] | null | null | null |
pastry_shop/blog/models.py
|
Raekker/pastry-shop
|
27e4e98594c57cbe5825a6571c6f93ad97dc1eb3
|
[
"MIT"
] | null | null | null |
pastry_shop/blog/models.py
|
Raekker/pastry-shop
|
27e4e98594c57cbe5825a6571c6f93ad97dc1eb3
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
from django.utils.translation import gettext_lazy as _
from pastry_shop.users.models import User
class Post(models.Model):
title = models.CharField(_("Title"), max_length=64)
content = models.TextField(_("Content"))
author = models.ForeignKey(User, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
ordering = ("-created",)
def __str__(self):
return self.title
class Comment(models.Model):
content = models.TextField(_("Content"))
author = models.ForeignKey(User, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
post = models.ForeignKey("Post", on_delete=models.CASCADE, related_name="comments")
def __str__(self):
return f"{self.content[:20]} - {self.author.username}"
| 30.34375
| 87
| 0.717817
|
4f520305fad753c5af1e9e6038c3469122a64819
| 24
|
py
|
Python
|
btd6_memory_info/generated/Assets/Scripts/Unity/UI_New/Main/DailyRewards/daily_rewards.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
btd6_memory_info/generated/Assets/Scripts/Unity/UI_New/Main/DailyRewards/daily_rewards.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
btd6_memory_info/generated/Assets/Scripts/Unity/UI_New/Main/DailyRewards/daily_rewards.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
class DailyRewards: pass
| 24
| 24
| 0.875
|
6a49a84d8bc67d095db3bc31c4a03e6ea67a3a5d
| 6,715
|
py
|
Python
|
selfdrive/debug/test_fw_query_on_routes.py
|
Basketkase/openpilot
|
769e1cf7a8322ca83d1a86a2f547acf5e3a5a52e
|
[
"MIT"
] | 73
|
2017-10-28T20:52:31.000Z
|
2022-03-05T21:41:34.000Z
|
selfdrive/debug/test_fw_query_on_routes.py
|
Basketkase/openpilot
|
769e1cf7a8322ca83d1a86a2f547acf5e3a5a52e
|
[
"MIT"
] | 60
|
2020-09-03T15:23:38.000Z
|
2021-12-17T12:39:50.000Z
|
selfdrive/debug/test_fw_query_on_routes.py
|
Basketkase/openpilot
|
769e1cf7a8322ca83d1a86a2f547acf5e3a5a52e
|
[
"MIT"
] | 130
|
2020-08-19T04:20:02.000Z
|
2022-03-24T23:05:22.000Z
|
#!/usr/bin/env python3
# type: ignore
from collections import defaultdict
import argparse
import os
import traceback
from tqdm import tqdm
from tools.lib.logreader import LogReader
from tools.lib.route import Route
from selfdrive.car.car_helpers import interface_names
from selfdrive.car.fw_versions import match_fw_to_car_exact, match_fw_to_car_fuzzy, build_fw_dict
from selfdrive.car.toyota.values import FW_VERSIONS as TOYOTA_FW_VERSIONS
from selfdrive.car.honda.values import FW_VERSIONS as HONDA_FW_VERSIONS
from selfdrive.car.hyundai.values import FW_VERSIONS as HYUNDAI_FW_VERSIONS
from selfdrive.car.volkswagen.values import FW_VERSIONS as VW_FW_VERSIONS
from selfdrive.car.mazda.values import FW_VERSIONS as MAZDA_FW_VERSIONS
NO_API = "NO_API" in os.environ
SUPPORTED_CARS = set(interface_names['toyota'])
SUPPORTED_CARS |= set(interface_names['honda'])
SUPPORTED_CARS |= set(interface_names['hyundai'])
SUPPORTED_CARS |= set(interface_names['volkswagen'])
SUPPORTED_CARS |= set(interface_names['mazda'])
try:
from xx.pipeline.c.CarState import migration
except ImportError:
migration = {}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run FW fingerprint on Qlog of route or list of routes')
parser.add_argument('route', help='Route or file with list of routes')
parser.add_argument('--car', help='Force comparison fingerprint to known car')
args = parser.parse_args()
if os.path.exists(args.route):
routes = list(open(args.route))
else:
routes = [args.route]
mismatches = defaultdict(list)
not_fingerprinted = 0
solved_by_fuzzy = 0
good_exact = 0
wrong_fuzzy = 0
good_fuzzy = 0
dongles = []
for route in tqdm(routes):
route = route.rstrip()
dongle_id, time = route.split('|')
if dongle_id in dongles:
continue
if NO_API:
qlog_path = f"cd:/{dongle_id}/{time}/0/qlog.bz2"
else:
route = Route(route)
qlog_path = route.qlog_paths()[0]
if qlog_path is None:
continue
try:
lr = LogReader(qlog_path)
dongles.append(dongle_id)
for msg in lr:
if msg.which() == "pandaState":
if msg.pandaState.pandaType not in ['uno', 'blackPanda', 'dos']:
break
elif msg.which() == "carParams":
bts = msg.carParams.as_builder().to_bytes()
car_fw = msg.carParams.carFw
if len(car_fw) == 0:
break
live_fingerprint = msg.carParams.carFingerprint
live_fingerprint = migration.get(live_fingerprint, live_fingerprint)
if args.car is not None:
live_fingerprint = args.car
if live_fingerprint not in SUPPORTED_CARS:
break
fw_versions_dict = build_fw_dict(car_fw)
exact_matches = match_fw_to_car_exact(fw_versions_dict)
fuzzy_matches = match_fw_to_car_fuzzy(fw_versions_dict)
if (len(exact_matches) == 1) and (list(exact_matches)[0] == live_fingerprint):
good_exact += 1
print(f"Correct! Live: {live_fingerprint} - Fuzzy: {fuzzy_matches}")
# Check if fuzzy match was correct
if len(fuzzy_matches) == 1:
if list(fuzzy_matches)[0] != live_fingerprint:
wrong_fuzzy += 1
print(f"{dongle_id}|{time}")
print("Fuzzy match wrong! Fuzzy:", fuzzy_matches, "Live:", live_fingerprint)
else:
good_fuzzy += 1
break
print(f"{dongle_id}|{time}")
print("Old style:", live_fingerprint, "Vin", msg.carParams.carVin)
print("New style (exact):", exact_matches)
print("New style (fuzzy):", fuzzy_matches)
for version in car_fw:
subaddr = None if version.subAddress == 0 else hex(version.subAddress)
print(f" (Ecu.{version.ecu}, {hex(version.address)}, {subaddr}): [{version.fwVersion}],")
print("Mismatches")
found = False
for car_fws in [TOYOTA_FW_VERSIONS, HONDA_FW_VERSIONS, HYUNDAI_FW_VERSIONS, VW_FW_VERSIONS, MAZDA_FW_VERSIONS]:
if live_fingerprint in car_fws:
found = True
expected = car_fws[live_fingerprint]
for (_, expected_addr, expected_sub_addr), v in expected.items():
for version in car_fw:
sub_addr = None if version.subAddress == 0 else version.subAddress
addr = version.address
if (addr, sub_addr) == (expected_addr, expected_sub_addr):
if version.fwVersion not in v:
print(f"({hex(addr)}, {'None' if sub_addr is None else hex(sub_addr)}) - {version.fwVersion}")
# Add to global list of mismatches
mismatch = (addr, sub_addr, version.fwVersion)
if mismatch not in mismatches[live_fingerprint]:
mismatches[live_fingerprint].append(mismatch)
# No FW versions for this car yet, add them all to mismatch list
if not found:
for version in car_fw:
sub_addr = None if version.subAddress == 0 else version.subAddress
addr = version.address
mismatch = (addr, sub_addr, version.fwVersion)
if mismatch not in mismatches[live_fingerprint]:
mismatches[live_fingerprint].append(mismatch)
print()
not_fingerprinted += 1
if len(fuzzy_matches) == 1:
if list(fuzzy_matches)[0] == live_fingerprint:
solved_by_fuzzy += 1
else:
wrong_fuzzy += 1
print("Fuzzy match wrong! Fuzzy:", fuzzy_matches, "Live:", live_fingerprint)
break
except Exception:
traceback.print_exc()
except KeyboardInterrupt:
break
print()
# Print FW versions that need to be added seperated out by car and address
for car, m in sorted(mismatches.items()):
print(car)
addrs = defaultdict(list)
for (addr, sub_addr, version) in m:
addrs[(addr, sub_addr)].append(version)
for (addr, sub_addr), versions in addrs.items():
print(f" ({hex(addr)}, {'None' if sub_addr is None else hex(sub_addr)}): [")
for v in versions:
print(f" {v},")
print(" ]")
print()
print()
print(f"Number of dongle ids checked: {len(dongles)}")
print(f"Fingerprinted: {good_exact}")
print(f"Not fingerprinted: {not_fingerprinted}")
print(f" of which had a fuzzy match: {solved_by_fuzzy}")
print()
print(f"Correct fuzzy matches: {good_fuzzy}")
print(f"Wrong fuzzy matches: {wrong_fuzzy}")
print()
| 34.973958
| 121
| 0.633656
|
129f7982c5811f20fe920f65044ceb59c534df6a
| 11,224
|
py
|
Python
|
_BACKUPS_V4/v4_5/LightPicture_Test.py
|
nagame/LightPicture
|
f9b00a39bc16aea4abac60c0dd0aab2acac5adcf
|
[
"Unlicense"
] | null | null | null |
_BACKUPS_V4/v4_5/LightPicture_Test.py
|
nagame/LightPicture
|
f9b00a39bc16aea4abac60c0dd0aab2acac5adcf
|
[
"Unlicense"
] | null | null | null |
_BACKUPS_V4/v4_5/LightPicture_Test.py
|
nagame/LightPicture
|
f9b00a39bc16aea4abac60c0dd0aab2acac5adcf
|
[
"Unlicense"
] | null | null | null |
from LightPicture import *
import unittest
import random
import time
class TestConstructor_Vertex(unittest.TestCase):
"""
Test Vertex class calls
"""
def test_none(self):
"""
Calling Vertex class with no key (key = None)
"""
v0 = Vertex()
self.assertIsNot(v0, None)
self.assertIsInstance(v0, Vertex)
def test_iterable_simple(self):
"""
Calling Vertex class with key containing simple types
"""
# self.assertRaises(TypeError, Vertex, [1])
# self.assertRaises(TypeError, Vertex, ['asc'])
v1 = Vertex([1, 2, 3])
self.assertIsNot(v1, None)
self.assertIsInstance(v1, Vertex)
def test_iterable_specific(self):
"""
Calling Vertex class with key containing specific types
"""
# Call Vertex class with Triangle object as key
t = Triangle()
v = Vertex(t)
self.assertIsInstance(v, Vertex)
v_parents = v.parents()
self.assertTrue(t in v_parents)
class TestConstructor_Triangle(unittest.TestCase):
"""
Test Triangle class call
"""
def test_none(self):
"""
Calling Triangle class with no key (key = None)
"""
t0 = Triangle()
self.assertIsNot(t0, None)
self.assertIsInstance(t0, Triangle)
def test_iterable(self):
"""
Calling Vertex class with iterable key
"""
# simple types iterables
t1 = Triangle([1, 2, 3])
self.assertIsNot(t1, None)
self.assertIsInstance(t1, Triangle)
t2 = Triangle('xyz')
self.assertIsNot(t2, None)
self.assertIsInstance(t2, Triangle)
t3 = Triangle(['x', 'y', 'z'])
self.assertIsNot(t3, None)
self.assertIsInstance(t3, Triangle)
# check vertices assignment
t1 = Triangle([1001, 1002, 1003])
self.assertIsNot(t1, None)
self.assertIsInstance(t1, Triangle)
result = t1.vertices()
self.assertIsInstance(result, list)
[r0, r1, r2] = result
self.assertEqual(r0, 1001)
self.assertEqual(r1, 1002)
self.assertEqual(r2, 1003)
t2 = Triangle('xyz')
self.assertIsNot(t2, None)
self.assertIsInstance(t2, Triangle)
t3 = Triangle(['x', 'y', 'z'])
self.assertIsNot(t3, None)
self.assertIsInstance(t3, Triangle)
def test_iterable_specific(self):
"""
Calling Vertex class with key containing specific types
"""
# create triangle using iterable of Vertex
v0 = Vertex([0, 0, 0])
v1 = Vertex([1, 1, 1])
v2 = Vertex([2, 2, 2])
t0 = Triangle([v0, v1, v2])
self.assertIsInstance(t0, Triangle)
vertices = t0.vertices()
self.assertIs(v0, vertices[0])
self.assertIs(v1, vertices[1])
self.assertIs(v2, vertices[2])
# create Triangle recursive Vertex construction
t1 = Triangle([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
])
self.assertIsInstance(t1, Triangle)
vertices = t1.vertices()
cv_0 = vertices[0].coordinates()
cv_1 = vertices[1].coordinates()
cv_2 = vertices[2].coordinates()
self.assertEqual([1, 2, 3], cv_0)
self.assertEqual([4, 5, 6], cv_1)
self.assertEqual([7, 8, 9], cv_2)
class TestConstructor_TriangleMesh(unittest.TestCase):
"""
Test Triangle class call
"""
def test_none(self):
"""
Calling TriangleMesh class with no key (key = None)
"""
m = TriangleMesh()
self.assertIsInstance(m, TriangleMesh)
def test_iterable(self):
"""
Calling TriangleMesh class with iterable key
"""
t0 = Triangle()
t1 = Triangle()
t2 = Triangle()
m0 = TriangleMesh([t0, t1, t2])
m0_triangles = m0.triangles()
self.assertIs(t0, m0_triangles[0])
self.assertIs(t1, m0_triangles[1])
self.assertIs(t2, m0_triangles[2])
def test_iterable_specific(self):
"""
Calling TriangleMesh class with key containing specific types
"""
# check TriangleMesh constructor with triangles as key
t0 = Triangle([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
t1 = Triangle([[3, 3, 3], [4, 4, 4], [5, 5, 5]])
t2 = Triangle([[6, 6, 6], [7, 7, 7], [8, 8, 8]])
m0 = TriangleMesh([t0, t1, t2])
m0_triangles = m0.triangles()
self.assertIs(t0, m0_triangles[0])
self.assertIs(t1, m0_triangles[1])
self.assertIs(t2, m0_triangles[2])
t0_v = m0_triangles[0].vertices()
t1_v = m0_triangles[1].vertices()
t2_v = m0_triangles[2].vertices()
self.assertEqual(t0_v[0].coordinates(), [0, 0, 0])
self.assertEqual(t0_v[1].coordinates(), [1, 1, 1])
self.assertEqual(t0_v[2].coordinates(), [2, 2, 2])
self.assertEqual(t1_v[0].coordinates(), [3, 3, 3])
self.assertEqual(t1_v[1].coordinates(), [4, 4, 4])
self.assertEqual(t1_v[2].coordinates(), [5, 5, 5])
self.assertEqual(t2_v[0].coordinates(), [6, 6, 6])
self.assertEqual(t2_v[1].coordinates(), [7, 7, 7])
self.assertEqual(t2_v[2].coordinates(), [8, 8, 8])
class TestTemporary(unittest.TestCase):
"""
Temporary tests or test currently in development
"""
def test_draft(self):
pass
def test_draft2(self):
pass
# # test with every triangle having distinct vertices
# t0 = Triangle([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
# t1 = Triangle([[3, 3, 3], [4, 4, 4], [5, 5, 5]])
# t2 = Triangle([[6, 6, 6], [7, 7, 7], [8, 8, 8]])
# m0 = TriangleMesh([t0, t1, t2])
# writer = Xml3mfWriter(m0)
# writer.write()
def test_draft2(self):
pass
# # test with common vertex objects between triangels
# v0 = Vertex([0, 0, 0])
# v1 = Vertex([1, 1, 1])
# v2 = Vertex([2, 2, 2])
# t0 = Triangle([v0, v1, v2])
# t1 = Triangle([v0, v2, v2])
# t2 = Triangle([v0, v0, v0])
# t3 = Triangle([v0, v1, v0])
#
# # throw in another 'dynamic' triangle
# t4 = Triangle([[6, 6, 6], [7, 7, 7], [8, 8, 8]])
#
# m0 = TriangleMesh([t0, t1, t2, t3, t4])
#
# writer = Xml3mfWriter(m0)
# writer.write_triangle_mesh()
def test_create_sample_file(self):
pass
# # create list of triangles defined by 3 points in space [p1, p2, p3]
# # the order of points is important
# triangles = [ [[x, x, x], [x+1, x+1, x+1], [x+2, x+2, x+2]] for x in range(3) ]
#
# # create mesh builder object
# # it will keep track of 'used points in space' and generate Triangle and Vertex objects and connect them
# # this abstraction layer allows us to free from thinking about which point have already been used
# # and the objects structure for writing 3mf file is being created 'on the fly'
# # pass [x, y, z] as space size
# tmb = TriangleMeshBuilder([200, 200, 200])
# # give all triangles to the mesh builder
# for t in triangles:
# tmb.triangle(t)
#
# tmb.write_3mf_file()
#
# # # # alternative way of doing the above by hand
# # # get the mesh from the builder
# # mesh = tmb.triangle_mesh
# # # create 3mf writer
# # writer = Xml3mfWriter(mesh)
# # # give the created mesh to the writer
# # writer.write_triangle_mesh()
def test_create_sample_file_2(self):
pass
# # create list of triangles defined by 3 points in space [p1, p2, p3]
# # the order of points is important
#
# triangles = [ [[x%400, x%400, x%400], [(x+1)%400, (x+1)%400, (x+1)%400], [(x+2)%400, (x+2)%400, (x+2)%400]] \
# for x in range(1000) ]
# # triangles = [[[1, 1, 1], [1, 1, 1], [1, 1, 1]] for x in range(10)]
#
# # create mesh builder object
# # it will keep track of 'used points in space' and generate Triangle and Vertex objects and connect them
# # this abstraction layer allows us to free from thinking about which point have already been used
# # and the objects structure for writing 3mf file is being created 'on the fly'
# # pass [x, y, z] as space size
# tmb = TriangleMeshBuilder([400, 400, 400])
# # give all triangles to the mesh builder
# for t in triangles:
# tmb.triangle(t)
#
# tmb.write_3mf_file()
def test_create_sample_mesh(self):
pass
# # create list of triangles defined by 3 points in space [p1, p2, p3]
# # the order of points is important
#
# # [
# # [], [], []
# # ],
#
# triangles = [
# [
# [0, 0, 0], [10, 0, 0], [5, 5, 0]
# ],
# [
# [0, 0, 0], [5, 5, 0], [2, 2, 20]
# ],
# [
# [2, 2, 20], [5, 5, 0], [10, 0, 0]
# ],
# [
# [0, 0, 0], [2, 2, 20], [10, 0, 0]
# ]
# ]
#
#
# # create mesh builder object
# tmb = TriangleMeshBuilder([400, 400, 400])
# # give all triangles to the mesh builder
# for t in triangles:
# tmb.triangle(t)
#
# tmb.write_3mf_file()
def test_random_big_file(self):
pass
# # generate a significant amount of triangels and save them to 3mf
# # generate random triangels
# t0 = time.perf_counter()
# rand = random.random
# for n in range(100_000):
# triangles.append([
# [int(rand() * x), int(rand() * y), int(rand() * z)],
# [int(rand() * x), int(rand() * y), int(rand() * z)],
# [int(rand() * x), int(rand() * y), int(rand() * z)]
# ])
# t0 = time.perf_counter() - t0
#
# # create mesh builder object
# t1 = time.perf_counter()
# tmb = TriangleMeshBuilder([x, y, z])
# t1 = time.perf_counter() - t1
#
# # give all triangles to the mesh builder
# t2 = time.perf_counter()
# for t in triangles:
# tmb.triangle(t)
# t2 = time.perf_counter() - t2
#
# # build xml structure
# t3 = time.perf_counter()
# tmb.mesh_to_xml()
# t3 = time.perf_counter() - t3
#
# # save 3mf file
# t4 = time.perf_counter()
# tmb.save_3mf()
# t4 = time.perf_counter() - t4
if __name__ == '__main__':
unittest.main()
| 34.324159
| 120
| 0.511226
|
c61784f5227ccf6ae3c6be9edfbda0becb696f07
| 1,214
|
py
|
Python
|
sdf_timing/sdfparse.py
|
chipsalliance/python-sdf-timing
|
119eee06c5a8700da594c5e066259a96858ececa
|
[
"Apache-2.0"
] | 1
|
2022-02-22T09:41:42.000Z
|
2022-02-22T09:41:42.000Z
|
sdf_timing/sdfparse.py
|
chipsalliance/f4pga-sdf-timing
|
119eee06c5a8700da594c5e066259a96858ececa
|
[
"Apache-2.0"
] | null | null | null |
sdf_timing/sdfparse.py
|
chipsalliance/f4pga-sdf-timing
|
119eee06c5a8700da594c5e066259a96858ececa
|
[
"Apache-2.0"
] | 1
|
2022-02-27T18:09:31.000Z
|
2022-02-27T18:09:31.000Z
|
#!/usr/bin/env python3
# coding: utf-8
#
# Copyright 2020-2022 F4PGA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
from . import sdflex
from . import sdfyacc
from . import sdfwrite
def init():
sdfyacc.timings = dict()
sdfyacc.header = dict()
sdfyacc.delays_list = list()
sdfyacc.cells = dict()
sdfyacc.tmp_delay_list = list()
sdfyacc.tmp_equation = list()
sdfyacc.tmp_constr_list = list()
sdflex.lexer.lineno = 1
def emit(input, timescale='1ps'):
return sdfwrite.emit_sdf(input, timescale)
def parse(input):
init()
sdflex.input_data = input
sdfyacc.parser.parse(sdflex.input_data)
return sdfyacc.timings
| 25.291667
| 74
| 0.720758
|
c3148a5580a5bee60640531f3799f422807c1316
| 299
|
py
|
Python
|
src/views/web.py
|
Dourv/tornado-mongo
|
95dbd1151abac2831d98b6d768a86f59b11c273d
|
[
"MIT"
] | 2
|
2015-04-21T14:49:05.000Z
|
2015-04-21T15:15:40.000Z
|
src/views/web.py
|
Dourv/tornado-mongo
|
95dbd1151abac2831d98b6d768a86f59b11c273d
|
[
"MIT"
] | null | null | null |
src/views/web.py
|
Dourv/tornado-mongo
|
95dbd1151abac2831d98b6d768a86f59b11c273d
|
[
"MIT"
] | null | null | null |
import calendar
import config
import hashlib
import json
import os
import pymongo
import tornado
import base # Heredando de base para todas las vistas.
from bson.objectid import ObjectId
from datetime import datetime, timedelta
class home(base.base):
def get(self):
self._render('index.html')
| 18.6875
| 55
| 0.795987
|
2b94a593790c06ec36614b382aacd4e5c002daf3
| 5,308
|
py
|
Python
|
src/garage/tf/policies/categorical_mlp_policy_with_model.py
|
researchai/unsupervised_meta_rl
|
9ca4b41438277ef6cfea047482b98de9da07815a
|
[
"MIT"
] | 1
|
2019-07-31T06:53:38.000Z
|
2019-07-31T06:53:38.000Z
|
src/garage/tf/policies/categorical_mlp_policy_with_model.py
|
researchai/unsupervised_meta_rl
|
9ca4b41438277ef6cfea047482b98de9da07815a
|
[
"MIT"
] | null | null | null |
src/garage/tf/policies/categorical_mlp_policy_with_model.py
|
researchai/unsupervised_meta_rl
|
9ca4b41438277ef6cfea047482b98de9da07815a
|
[
"MIT"
] | null | null | null |
"""CategoricalMLPPolicy with model."""
import akro
import tensorflow as tf
from garage.misc.overrides import overrides
from garage.tf.distributions import Categorical
from garage.tf.models import MLPModel
from garage.tf.policies.base2 import StochasticPolicy2
class CategoricalMLPPolicyWithModel(StochasticPolicy2):
"""
CategoricalMLPPolicy with model.
A policy that contains a MLP to make prediction based on
a categorical distribution.
It only works with akro.Discrete action space.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
name (str): Policy name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means the MLP of this policy consists of two
hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
env_spec,
name='CategoricalMLPPolicy',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.glorot_uniform_initializer(),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=tf.nn.softmax,
output_w_init=tf.glorot_uniform_initializer(),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
assert isinstance(env_spec.action_space, akro.Discrete), (
'CategoricalMLPPolicy only works with akro.Discrete action '
'space.')
super().__init__(name, env_spec)
self.obs_dim = env_spec.observation_space.flat_dim
self.action_dim = env_spec.action_space.n
self.model = MLPModel(
output_dim=self.action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization,
name='MLPModel')
self._initialize()
def _initialize(self):
state_input = tf.compat.v1.placeholder(
tf.float32, shape=(None, self.obs_dim))
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
self.model.build(state_input)
self._f_prob = tf.compat.v1.get_default_session().make_callable(
self.model.networks['default'].outputs,
feed_list=[self.model.networks['default'].input])
@property
def vectorized(self):
"""Vectorized or not."""
return True
@overrides
def dist_info_sym(self, obs_var, state_info_vars=None, name=None):
"""Symbolic graph of the distribution."""
with tf.compat.v1.variable_scope(self._variable_scope):
prob = self.model.build(obs_var, name=name)
return dict(prob=prob)
@overrides
def dist_info(self, obs, state_infos=None):
"""Distribution info."""
prob = self._f_prob(obs)
return dict(prob=prob)
@overrides
def get_action(self, observation):
"""Return a single action."""
flat_obs = self.observation_space.flatten(observation)
prob = self._f_prob([flat_obs])[0]
action = self.action_space.weighted_sample(prob)
return action, dict(prob=prob)
def get_actions(self, observations):
"""Return multiple actions."""
flat_obs = self.observation_space.flatten_n(observations)
probs = self._f_prob(flat_obs)
actions = list(map(self.action_space.weighted_sample, probs))
return actions, dict(prob=probs)
@property
def distribution(self):
"""Policy distribution."""
return Categorical(self.action_dim)
def __getstate__(self):
"""Object.__getstate__."""
new_dict = super().__getstate__()
del new_dict['_f_prob']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__."""
super().__setstate__(state)
self._initialize()
| 37.914286
| 78
| 0.648644
|
cd0185096a030d018ce0c4b5b59a1a3e6b1a98fe
| 3,990
|
py
|
Python
|
lib/surface/service_directory/endpoints/create.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
lib/surface/service_directory/endpoints/create.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/service_directory/endpoints/create.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 1
|
2020-07-25T01:40:19.000Z
|
2020-07-25T01:40:19.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`gcloud service-directory endpoints create` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.service_directory import endpoints
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.service_directory import flags
from googlecloudsdk.command_lib.service_directory import resource_args
from googlecloudsdk.command_lib.service_directory import util
from googlecloudsdk.core import log
_RESOURCE_TYPE = 'endpoint'
_ENDPOINT_LIMIT = 512
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Create(base.CreateCommand):
"""Creates an endpoint."""
detailed_help = {
'EXAMPLES':
"""\
To create a Service Directory endpoint, run:
$ {command} my-endpoint --service=my-service --namespace=my-namespace --location=us-east1 --address=1.2.3.4 --port=5 --annotations=a=b,c=d
""",
}
@staticmethod
def Args(parser):
resource_args.AddEndpointResourceArg(
parser,
"""to create. The endpoint id must be 1-63 characters long and match
the regular expression `[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?` which means
the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.""")
flags.AddAddressFlag(parser)
flags.AddPortFlag(parser)
flags.AddAnnotationsFlag(parser, _RESOURCE_TYPE, _ENDPOINT_LIMIT)
def Run(self, args):
client = endpoints.EndpointsClient()
endpoint_ref = args.CONCEPTS.endpoint.Parse()
annotations = util.ParseAnnotationsArg(args.annotations, _RESOURCE_TYPE)
result = client.Create(endpoint_ref, args.address, args.port, annotations)
log.CreatedResource(endpoint_ref.endpointsId, _RESOURCE_TYPE)
return result
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA)
class CreateBeta(base.CreateCommand):
"""Creates an endpoint."""
detailed_help = {
'EXAMPLES':
"""\
To create a Service Directory endpoint, run:
$ {command} my-endpoint --service=my-service --namespace=my-namespace --location=us-east1 --address=1.2.3.4 --port=5 --metadata=a=b,c=d --network=projects/123456789/locations/global/networks/default
""",
}
@staticmethod
def Args(parser):
resource_args.AddEndpointResourceArg(
parser,
"""to create. The endpoint id must be 1-63 characters long and match
the regular expression `[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?` which means
the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.""")
flags.AddAddressFlag(parser)
flags.AddPortFlag(parser)
flags.AddMetadataFlag(parser, _RESOURCE_TYPE, _ENDPOINT_LIMIT)
flags.AddNetworkFlag(parser)
def Run(self, args):
client = endpoints.EndpointsClientBeta()
endpoint_ref = args.CONCEPTS.endpoint.Parse()
metadata = util.ParseMetadataArg(args.metadata, _RESOURCE_TYPE)
result = client.Create(endpoint_ref, args.address, args.port, metadata,
args.network)
log.CreatedResource(endpoint_ref.endpointsId, _RESOURCE_TYPE)
return result
| 37.641509
| 210
| 0.72005
|
e866a1eda0f45a4ee1330431811c5b8591679963
| 1,639
|
py
|
Python
|
augly/video/augmenters/ffmpeg/pad.py
|
Ierezell/AugLy
|
a7dca8c36bc05dbd7694373fe9b883d6ff720f56
|
[
"MIT"
] | 1
|
2021-09-29T21:27:50.000Z
|
2021-09-29T21:27:50.000Z
|
augly/video/augmenters/ffmpeg/pad.py
|
Ierezell/AugLy
|
a7dca8c36bc05dbd7694373fe9b883d6ff720f56
|
[
"MIT"
] | null | null | null |
augly/video/augmenters/ffmpeg/pad.py
|
Ierezell/AugLy
|
a7dca8c36bc05dbd7694373fe9b883d6ff720f56
|
[
"MIT"
] | 1
|
2021-07-02T13:08:55.000Z
|
2021-07-02T13:08:55.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Dict, Tuple
from augly.utils import validate_rgb_color
from augly.video.augmenters.ffmpeg import BaseFFMPEGAugmenter
from augly.video.helpers import get_video_info
from ffmpeg.nodes import FilterableStream
class VideoAugmenterByPadding(BaseFFMPEGAugmenter):
def __init__(self, w_factor: float, h_factor: float, color: Tuple[int, int, int]):
assert w_factor >= 0, "w_factor cannot be a negative number"
assert h_factor >= 0, "h_factor cannot be a negative number"
validate_rgb_color(color)
self.w_factor = w_factor
self.h_factor = h_factor
self.hex_color = "%02x%02x%02x" % color
def add_augmenter(
self, in_stream: FilterableStream, **kwargs
) -> Tuple[FilterableStream, Dict]:
"""
Adds padding to the video
@param in_stream: the FFMPEG object of the video
@returns: a tuple containing the FFMPEG object with the augmentation
applied and a dictionary with any output arguments as necessary
"""
video_info = get_video_info(kwargs["video_path"])
left = int(video_info["width"] * self.w_factor)
top = int(video_info["height"] * self.h_factor)
return (
in_stream.video.filter(
"pad",
**{
"width": f"iw+{left*2}",
"height": f"ih+{top*2}",
"x": left,
"y": top,
"color": self.hex_color,
},
),
{},
)
| 32.137255
| 86
| 0.594875
|
184509d0ff629360ccc3f12be014115d963bc99d
| 4,661
|
py
|
Python
|
cifar_data.py
|
xiaoyoyoada/MNIST_CIFAR_Classification_TF
|
edad97f58a5259896b0c2bd677e75c4a703b54cb
|
[
"MIT"
] | null | null | null |
cifar_data.py
|
xiaoyoyoada/MNIST_CIFAR_Classification_TF
|
edad97f58a5259896b0c2bd677e75c4a703b54cb
|
[
"MIT"
] | null | null | null |
cifar_data.py
|
xiaoyoyoada/MNIST_CIFAR_Classification_TF
|
edad97f58a5259896b0c2bd677e75c4a703b54cb
|
[
"MIT"
] | null | null | null |
from urllib.request import urlretrieve
import tarfile
import os
import pickle
import numpy as np
from tqdm import tqdm
class DownloadProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
def load_label_names():
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def normalize(x):
min_val = np.min(x)
max_val = np.max(x)
x = (x - min_val) / (max_val - min_val)
return x
def one_hot_encode(x):
encoded = np.zeros((len(x), 10))
for idx, val in enumerate(x):
encoded[idx][val] = 1
return encoded
def process_and_save(normalizer, one_hot_encoder, features, labels, filename):
features = normalizer(features)
labels = one_hot_encoder(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def load_cifar10_batch(cifar10_dataset_folder_path, batch_id):
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
# note the encoding type is 'latin1'
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def preprocess_and_save_data(cifar10_dataset_folder_path, normalizer, one_hot_encoder, save_path):
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cifar10_batch(cifar10_dataset_folder_path, batch_i)
# find index to be the point as validation data in the whole dataset of the batch (10%)
index_of_validation = int(len(features) * 0.1)
process_and_save(normalizer, one_hot_encoder, features[:-index_of_validation], labels[:-index_of_validation],
save_path + 'batch_' + str(batch_i) + '.pkl')
valid_features.extend(features[-index_of_validation:])
valid_labels.extend(labels[-index_of_validation:])
# preprocess the all stacked validation dataset
process_and_save(normalizer, one_hot_encoder, np.array(valid_features), np.array(valid_labels),
save_path + 'valid.pkl')
# load the test dataset
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# preprocess the testing data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all testing data
process_and_save(normalizer, one_hot_encoder, np.array(test_features), np.array(test_labels),
save_path + 'test.pkl')
def maybe_download_and_extract():
parent_folder = "./data/"
if not os.path.exists(parent_folder):
os.makedirs(parent_folder)
cifar10_dataset_zip_path = os.path.join(parent_folder, "cifar-10-python.tar.gz")
# download the dataset (if not exist yet)
if not os.path.isfile(cifar10_dataset_zip_path):
with DownloadProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve('https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz', cifar10_dataset_zip_path, pbar.hook)
# extract file if not exist
cifar10_dataset_folder_path = os.path.join(parent_folder, "cifar-10-batches-py")
if not os.path.isdir(cifar10_dataset_folder_path):
with tarfile.open(cifar10_dataset_zip_path) as tar:
tar.extractall(path=parent_folder)
tar.close()
# preprocess if not exist
save_path = os.path.join(parent_folder, "cifar_pickle/")
if not os.path.exists(save_path):
os.makedirs(save_path)
if not os.path.exists(os.path.join(save_path, "batch_1.pkl")):
preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode, save_path)
return save_path
def batch_features_labels(features, labels, batch_size):
"""Split features and labels into batches"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_training_batch(batch_id, batch_size):
"""Load the Preprocessed Training data and return them in batches of <batch_size> or less"""
filename = 'data/cifar_pickle/' + 'batch_' + str(batch_id) + '.pkl'
features, labels = pickle.load(open(filename, mode='rb'))
return batch_features_labels(features, labels, batch_size)
| 41.247788
| 119
| 0.692555
|
191c5808b589c8a2ea58a45e445ff272718c3b60
| 17,529
|
py
|
Python
|
qmplot/modules/_manhattan.py
|
julibeg/qmplot
|
6fad9dffeb068aa5d88cb91ec0399bdc0c359ef6
|
[
"MIT"
] | 24
|
2021-02-20T08:03:00.000Z
|
2021-12-04T08:11:43.000Z
|
qmplot/modules/_manhattan.py
|
julibeg/qmplot
|
6fad9dffeb068aa5d88cb91ec0399bdc0c359ef6
|
[
"MIT"
] | 6
|
2021-05-28T14:13:51.000Z
|
2022-03-25T07:50:01.000Z
|
qmplot/modules/_manhattan.py
|
julibeg/qmplot
|
6fad9dffeb068aa5d88cb91ec0399bdc0c359ef6
|
[
"MIT"
] | 7
|
2021-02-21T17:21:29.000Z
|
2021-11-01T20:12:14.000Z
|
"""Plotting functions for manhattan plot.
Copyright (c) Shujia Huang
Date: 2021-02-21
This model is based on brentp's script on github:
https://github.com/brentp/bio-playground/blob/master/plots/manhattan-plot.py
Thanks for Brentp's contributions
"""
from itertools import cycle
from pandas import DataFrame
import numpy as np
import matplotlib.pyplot as plt
from ._utils import General
from ..utils import adjust_text
# learn something from "https://github.com/reneshbedre/bioinfokit/blob/38fb4966827337f00421119a69259b92bb67a7d0/bioinfokit/visuz.py"
def manhattanplot(data, chrom="#CHROM", pos="POS", pv="P", snp="ID", logp=True, ax=None,
marker=".", color="#3B5488,#53BBD5", alpha=0.8,
title=None, xlabel="Chromosome", ylabel=r"$-log_{10}{(P)}$",
xtick_label_set=None, CHR=None, xticklabel_kws=None,
suggestiveline=1e-5, genomewideline=5e-8, sign_line_cols="#D62728,#2CA02C", hline_kws=None,
sign_marker_p=None, sign_marker_color="r",
is_annotate_topsnp=False, highlight_other_SNPs_indcs=None,
highlight_other_SNPs_color='r', highlight_other_SNPs_kwargs=None,
text_kws=None, ld_block_size=50000,
is_show=None, dpi=300, figname=None, **kwargs):
"""Creates a manhattan plot from PLINK assoc output (or any data frame with chromosome, position, and p-value).
Parameters
----------
data : DataFrame.
A DataFrame with columns "#CHROM," "POS," "P," and optionally, "SNP."
chrom : string, default is "#CHROM", optional
A string denoting the column name for chromosome. Defaults to be PLINK2.x's "#CHROM".
Said column must be a character.
pos : string, default is "POS", optional.
A string denoting the column name for chromosomal position. Default to PLINK2.x's "POS".
Said column must be numeric.
pv : string, default is "P", optional.
A string denoting the column name for chromosomal p-value. Default to PLINK2.x's "P".
Said column must be float type.
snp : string, default is "ID", optional.
A string denoting the column name for the SNP name (rs number) or the column which you want to
represent the variants. Default to PLINK2.x's "P". Said column should be a character.
logp : bool, optional
If TRUE, the -log10 of the p-value is plotted. It isn't very useful
to plot raw p-values, but plotting the raw value could be useful for
other genome-wide plots, for example, peak heights, bayes factors, test
statistics, other "scores," etc. default: True
ax : matplotlib axis, optional
Axis to plot on, otherwise uses current axis.
marker : matplotlib markers for scatter plot, default is "o", optional
color : matplotlib color, optional, default: color_palette('colorful', 4)
Color used for the plot elements. Could hex-code or rgb,
e.g: '#3B5488,#53BBD5' or 'rb'
alpha : float scalar, default is 0.8, optional
The alpha blending value, between 0(transparent) and 1(opaque)
title : string, or None, optional
Set the title of the current plot.
xlabel: string, optional
Set the x axis label of the current axis.
ylabel: string, optional
Set the y axis label of the current axis.
xtick_label_set : a set. optional
Set the current x axis ticks of the current axis.
CHR : string, or None, optional
Select a specific chromosome to plot. And the x-axis will be the
position of this chromosome instead of the chromosome id.
CAUTION: this parameter could not be used with ``xtick_label_set``
together.
xticklabel_kws : key, value pairings, or None, optional
Other keyword arguments are passed to set xtick labels in
maplotlib.axis.Axes.set_xticklabels.
suggestiveline : float or None, default is 1e-5, optional
Where to draw a suggestive ax.axhline. Set None to be disable.
genomewideline : float or None, default is 5e-8
Where to draw a genome-wide significant ax.axhline. Set None to be disable.
sign_line_cols : matplotlib color, default: "#D62728,#2CA02C", optional.
Color used for ``suggestiveline`` and ``genomewideline``.
Could be hex-code or rgb, e.g: "#D62728,#2CA02C" or 'rb'
hline_kws : key, value pairings, or None, optional
keyword arguments for plotting ax.axhline(``suggestiveline`` and ``genomewideline``)
except the "color" key-pair.
sign_marker_p : float or None, default None, optional.
A P-value threshold (suggestive to be 1e-6) for marking the significant SNP sites.
sign_marker_color : matplotlib color, default: "r", optional.
Define a color code for significant SNP sites.
is_annotate_topsnp : boolean, default is False, optional.
Annotate the top SNP or not for the significant locus.
highlight_other_SNPs_indcs : iterable, or None, optional
Numerical indices of other SNPs (i.e. not the top SNP) to highlight.
highlight_other_SNPs_color : matplotlib color, default: "r", optional.
Define a color code for other highlighted SNP sites.
highlight_other_SNPs_kwargs=None : Dict, or None, optional
Dict of keyword arguments passed to the command highlighting the other SNPs.
text_kws: key, value pairings, or None, optional
keyword arguments for plotting in`` matplotlib.axes.Axes.text(x, y, s, fontdict=None, **kwargs)``
ld_block_size : integer, default is 50000, optional
Set the size of LD block which for finding top SNP. And the top SNP's annotation represent the block.
is_show : boolean or None, default is None, Optional.
Display the plot in screen or not.
You can set this parameter by your wish, or it'll set to be True automatically
if ``is_show`` and ``figname`` are None simultaneously.
dpi : float or 'figure', default is 300, optional.
The resolution in dots-pet-inch for plot. If 'figure', use the figure's dpi value.
figname : string, or None, optional
Output plot file name.
kwargs : key, value pairings, optional
Other keyword arguments are passed to ``plt.scatter()`` or
``plt.vlines()`` (in matplotlib.pyplot) depending on whether
a scatter or line plot is being drawn.
Returns
-------
ax : matplotlib Axes
Axes object with the manhattanplot.
Notes
-----
1. This plot function is not just suit for GWAS manhattan plot,
it could also be used for any input data which have [chromo-
some, position and p-value] dataframe.
2. The right and top spines of the plot have been set to be
invisible by hand.
Examples
--------
Plot a basic manhattan plot from PLINK2.x association output and reture the figure:
.. plot::
:context: close-figs
>>> import pandas as pd
>>> from qmplot import manhattanplot
>>> df = pd.read_table("tests/data/gwas_plink_result.tsv", sep="\t")
>>> df = df.dropna(how="any", axis=0) # clean data
>>> ax = manhattanplot(data=df)
Plot a basic manhattan plot with horizontal xtick labels and save the plot
to a file name "manhattan.png":
.. plot::
:context: close-figs
>>> xtick = set(['chr' + i for i in list(map(str, range(1, 10))) + ['11', '13', '15', '18', '21', 'X']])
>>> manhattanplot(data=df, xlabel="Chromosome", ylabel=r"$-log_{10}{(P)}$",
... xtick_label_set=xtick, figname="manhattan.png")
Add a horizontal at y position=3 line with linestyle="--" and lingwidth=1.3
across the axis:
.. plot::
:context: close-figs
>>> manhattanplot(data=df,
... hline_kws={"linestyle": "--", "lw": 1.3},
... xlabel="Chromosome",
... ylabel=r"$-log_{10}{(P)}$",
... xtick_label_set = xtick)
Rotate the x-axis ticklabel by setting ``xticklabel_kws``:
.. plot::
:context: close-figs
>>> manhattanplot(data=df,
... hline_kws={"linestyle": "--", "lw": 1.3},
... xlabel="Chromosome",
... ylabel=r"$-log_{10}{(P)}$",
... xticklabel_kws={"rotation": "vertical"})
Plot a better one with genome-wide significant mark and annotate the Top SNP and save
the figure to "output_manhattan_plot.png":
.. plot::
:context: close-figs
>>> fig, ax = plt.subplots(figsize=(12, 4), facecolor="w", edgecolor="k") # define a plot
>>> manhattanplot(data=df,
... marker=".",
... sign_marker_p=1e-6, # Genome wide significant p-value
... sign_marker_color="r",
... snp="ID",
... title="Test",
... xtick_label_set=xtick,
... xlabel="Chromosome",
... ylabel=r"$-log_{10}{(P)}$",
... sign_line_cols=["#D62728", "#2CA02C"],
... hline_kws={"linestyle": "--", "lw": 1.3},
... is_annotate_topsnp=True,
... ld_block_size=50000, # 50000 bp
... text_kws={"fontsize": 12, # The fontsize of annotate text
... "arrowprops": dict(arrowstyle="-", color="k", alpha=0.6)},
... dpi=300, # set the resolution of plot figure
... is_show=False, # do not show the figure
... figname="output_manhattan_plot.png",
... ax=ax)
"""
if not isinstance(data, DataFrame):
raise ValueError("[ERROR] Input data must be a pandas.DataFrame.")
if chrom not in data:
raise ValueError("[ERROR] Column \"%s\" not found!" % chrom)
if pos not in data:
raise ValueError("[ERROR] Column \"%s\" not found!" % pos)
if pv not in data:
raise ValueError("[ERROR] Column \"%s\" not found!" % pv)
if is_annotate_topsnp and (snp not in data):
raise ValueError("[ERROR] You're trying to annotate a set of SNPs but "
"NO SNP \"%s\" column found!" % snp)
if CHR is not None and xtick_label_set is not None:
raise ValueError("[ERROR] ``CHR`` and ``xtick_label_set`` can't be set simultaneously.")
data[[chrom]] = data[[chrom]].astype(str) # make sure all the chromosome id are character.
# Draw the plot and return the Axes
if ax is None:
# ax = plt.gca()
_, ax = plt.subplots(figsize=(9, 3), facecolor="w", edgecolor="k") # default
if xticklabel_kws is None:
xticklabel_kws = {}
if hline_kws is None:
hline_kws = {}
if text_kws is None:
text_kws = {}
if "," in color:
color = color.split(",")
colors = cycle(color)
last_xpos = 0
xs_by_id = [] # use for collecting chromosome's position on x-axis
x, y, c = [], [], []
sign_snp_sites = []
for seqid, group_data in data.groupby(by=chrom, sort=False): # keep the raw order of chromosome
if (CHR is not None) and (seqid != CHR):
continue
color = next(colors)
for i, (site, p_value) in enumerate(zip(group_data[pos], group_data[pv])):
y_value = -np.log10(p_value) if logp else p_value
x.append(last_xpos + site)
y.append(y_value)
c.append(sign_marker_color if ((sign_marker_p is not None) and (p_value <= sign_marker_p)) else color)
if (sign_marker_p is not None) and (p_value <= sign_marker_p):
snp_id = group_data[snp].iloc[i]
sign_snp_sites.append([last_xpos + site, y_value, snp_id]) # x_pos, y_value, text
# ``xs_by_id`` is for setting up positions and ticks. Ticks should
# be placed in the middle of a chromosome. The a new pos column is
# added that keeps a running sum of the positions of each successive
# chromsome.
xs_by_id.append([seqid, last_xpos + (group_data[pos].iloc[0] + group_data[pos].iloc[-1]) / 2])
last_xpos = x[-1] # keep track so that chromosome will not overlap in the plot.
if not x:
raise ValueError("zero-size array to reduction operation minimum which has no "
"identity. This could be caused by zero-size array of ``x`` "
"in the ``manhattanplot(...)`` function.")
if "marker" not in kwargs:
kwargs["marker"] = marker
# plot the main manhattan dot plot
ax.scatter(x, y, c=c, alpha=alpha, edgecolors="none", **kwargs)
if is_annotate_topsnp is not None:
index = _find_SNPs_which_overlap_sign_neighbour_region(
sign_snp_neighbour_region=_sign_snp_regions(sign_snp_sites, ld_block_size),
x=x)
# reset color for all SNPs which nearby the top SNPs.
for i in index:
ax.scatter(x[i], y[i], c=sign_marker_color, alpha=alpha, edgecolors="none", **kwargs)
highlight_other_SNPs_kwargs = dict() if highlight_other_SNPs_kwargs is \
None else highlight_other_SNPs_kwargs
# highlight other SNPs
if highlight_other_SNPs_indcs is not None:
for i in highlight_other_SNPs_indcs:
ax.scatter(x[i], y[i], c=highlight_other_SNPs_color,
alpha=alpha, edgecolors="none", **highlight_other_SNPs_kwargs)
# Add GWAS significant lines
if "color" in hline_kws:
hline_kws.pop("color")
sign_line_cols = sign_line_cols.split(",") if "," in sign_line_cols else sign_line_cols
if suggestiveline is not None:
ax.axhline(y=-np.log10(suggestiveline) if logp else suggestiveline, color=sign_line_cols[0], **hline_kws)
if genomewideline is not None:
ax.axhline(y=-np.log10(genomewideline) if logp else genomewideline, color=sign_line_cols[1], **hline_kws)
# Plotting the Top SNP for each significant block
if is_annotate_topsnp:
sign_top_snp = _find_top_snp(sign_snp_sites, ld_block_size=ld_block_size, is_get_biggest=logp)
if sign_top_snp: # not empty
texts = [ax.text(_x, _y, _text) for _x, _y, _text in sign_top_snp]
adjust_text(texts, ax=ax, **text_kws)
if CHR is None:
if xtick_label_set is not None:
ax.set_xticks([v for c, v in xs_by_id if c in xtick_label_set])
ax.set_xticklabels([c for c, v in xs_by_id if c in xtick_label_set], **xticklabel_kws)
else:
ax.set_xticks([v for c, v in xs_by_id])
ax.set_xticklabels([c for c, v in xs_by_id], **xticklabel_kws)
else:
# show the whole chromosomal position without scientific notation
# if you are just interesting in this chromosome.
ax.get_xaxis().get_major_formatter().set_scientific(False)
ax.set_xlim(0, x[-1])
ax.set_ylim(ymin=min(y), ymax=1.2 * max(y))
if title:
ax.set_title(title)
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
if (is_show is None) and (figname is None):
is_show = True
General.get_figure(is_show, fig_name=figname, dpi=dpi)
return ax
def _find_top_snp(sign_snp_data, ld_block_size, is_get_biggest=True):
"""
:param sign_snp_data: A 2D array: [[xpos1, yvalue1, text1], [xpos2, yvalue2, text2], ...]
"""
top_snp = []
tmp_cube = []
for i, (_x, _y, text) in enumerate(sign_snp_data):
if i == 0:
tmp_cube.append([_x, _y, text])
continue
if _x > tmp_cube[-1][0] + ld_block_size:
# Sorted by y_value in increase/decrease order and only get the first value [0], which is the TopSNP.
top_snp.append(sorted(tmp_cube, key=(lambda x: x[1]), reverse=is_get_biggest)[0])
tmp_cube = []
tmp_cube.append([_x, _y, text])
if tmp_cube: # deal the last one
top_snp.append(sorted(tmp_cube, key=(lambda x: x[1]), reverse=True)[0])
return top_snp
def _sign_snp_regions(sign_snp_data, ld_block_size):
"""Create region according to the coordinate of sign_snp_data."""
regions = []
for i, (_x, _y, _t) in enumerate(sign_snp_data):
if i == 0:
regions.append([_x - ld_block_size, _x])
continue
if _x > regions[-1][1] + ld_block_size:
regions[-1][1] += ld_block_size
regions.append([_x - ld_block_size, _x])
else:
regions[-1][1] = _x
# The last
if regions:
regions[-1][1] += ld_block_size
return regions
def _find_SNPs_which_overlap_sign_neighbour_region(sign_snp_neighbour_region, x):
"""
"""
x_size = len(x)
reg_size = len(sign_snp_neighbour_region)
index = []
tmp_index = 0
for i in range(x_size):
_x = x[i]
is_overlap = False
iter_index = range(tmp_index, reg_size)
for j in iter_index:
if _x > sign_snp_neighbour_region[j][1]: continue
if _x < sign_snp_neighbour_region[j][0]: break
tmp_index = j
is_overlap = True
break
if is_overlap:
index.append(i)
# return the index
return index
| 39.127232
| 132
| 0.618518
|
ce92d2bbc5c110ae686f5899b6af721fdfb1fab5
| 3,706
|
py
|
Python
|
mindspore/ops/_op_impl/_custom_op/correction_mul.py
|
ZephyrChenzf/mindspore
|
8f191847cf71e12715ced96bc3575914f980127a
|
[
"Apache-2.0"
] | 1
|
2020-06-17T07:05:45.000Z
|
2020-06-17T07:05:45.000Z
|
mindspore/ops/_op_impl/_custom_op/correction_mul.py
|
ZephyrChenzf/mindspore
|
8f191847cf71e12715ced96bc3575914f980127a
|
[
"Apache-2.0"
] | null | null | null |
mindspore/ops/_op_impl/_custom_op/correction_mul.py
|
ZephyrChenzf/mindspore
|
8f191847cf71e12715ced96bc3575914f980127a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""CorrectionMul op"""
import te.lang.cce
from te import tvm
from te.platform.fusion_manager import fusion_manager
from topi import generic
from topi.cce import util
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
SHAPE_SIZE_LIMIT = 2147483648
correction_mul_op_info = TBERegOp("CorrectionMul") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("correction_mul.so") \
.compute_cost(10) \
.kernel_name("correction_mul") \
.partial_flag(True) \
.op_pattern("formatAgnostic") \
.attr("channel_axis", "optional", "int", "all") \
.input(0, "x", None, "required", None) \
.input(1, "batch_std", None, "required", None) \
.input(2, "running_std", None, "required", None) \
.output(0, "y", True, "required", "all") \
.dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \
.get_op_info()
@op_info_register(correction_mul_op_info)
def _correction_mul_tbe():
"""CorrectionMul TBE register"""
return
@fusion_manager.register("correction_mul")
def correction_mul_compute(x, batch_std, running_std, kernel_name="correction_mul"):
"""CorrectionMul compute"""
shape_x = te.lang.cce.util.shape_to_list(x.shape)
factor = te.lang.cce.vdiv(batch_std, running_std)
factor_b = te.lang.cce.broadcast(factor, shape_x)
res = te.lang.cce.vmul(x, factor_b)
return res
@util.check_input_type(dict, dict, dict, dict, int, str)
def correction_mul(x, batch_std, running_std, y, channel, kernel_name="correction_mul"):
"""CorrectionMul op"""
shape = x.get("shape")
data_format = x.get("format")
util.check_kernel_name(kernel_name)
util.check_shape_rule(shape)
util.check_shape_size(shape, SHAPE_SIZE_LIMIT)
check_list = ["float16", "float32"]
inp_dtype = x.get("dtype").lower()
if not inp_dtype in check_list:
raise RuntimeError("Dtype of input only support float16, float32")
# shape = util.shape_refine(shape)
x_t = tvm.placeholder(shape, name="x", dtype=inp_dtype)
shape_c = [1] * len(shape)
shape_c[channel] = batch_std.get("ori_shape")[0]
if data_format == "NC1HWC0" and channel == 1:
shape_c = batch_std.get("shape")
batch_std_t = tvm.placeholder(shape_c, name="batch_std", dtype=inp_dtype)
running_std_t = tvm.placeholder(shape_c, name="running_std", dtype=inp_dtype)
res = correction_mul_compute(x_t, batch_std_t, running_std_t, kernel_name)
with tvm.target.cce():
sch = generic.auto_schedule(res)
config = {"print_ir": False,
"name": kernel_name,
"tensor_list": [x_t, batch_std_t, running_std_t, res]}
te.lang.cce.cce_build_code(sch, config)
| 39.849462
| 107
| 0.702105
|
ac78e1b19bff863c157491ad0153d38fd0f75fd5
| 9,343
|
py
|
Python
|
tests/conftest.py
|
theonlyklas/vyper
|
e3d802fd471e663d75847bdfebea81edda59b472
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
theonlyklas/vyper
|
e3d802fd471e663d75847bdfebea81edda59b472
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
theonlyklas/vyper
|
e3d802fd471e663d75847bdfebea81edda59b472
|
[
"MIT"
] | null | null | null |
import eth_tester
import logging
import pytest
import web3
from functools import wraps
from eth_tester import (
EthereumTester,
)
from eth_tester.exceptions import (
TransactionFailed
)
from web3.providers.eth_tester import (
EthereumTesterProvider,
)
from web3 import (
Web3,
)
from web3.contract import (
ConciseContract,
ConciseMethod
)
from vyper.parser.parser_utils import (
LLLnode
)
from vyper import (
compile_lll,
compiler,
optimizer,
)
class VyperMethod(ConciseMethod):
ALLOWED_MODIFIERS = {'call', 'estimateGas', 'transact', 'buildTransaction'}
def __call__(self, *args, **kwargs):
return self.__prepared_function(*args, **kwargs)
def __prepared_function(self, *args, **kwargs):
if not kwargs:
modifier, modifier_dict = 'call', {}
fn_abi = [x for x in self._function.contract_abi if x['name'] == self._function.function_identifier].pop()
modifier_dict.update({'gas': fn_abi['gas'] + 50000}) # To make tests faster just supply some high gas value.
elif len(kwargs) == 1:
modifier, modifier_dict = kwargs.popitem()
if modifier not in self.ALLOWED_MODIFIERS:
raise TypeError(
"The only allowed keyword arguments are: %s" % self.ALLOWED_MODIFIERS)
else:
raise TypeError("Use up to one keyword argument, one of: %s" % self.ALLOWED_MODIFIERS)
return getattr(self._function(*args), modifier)(modifier_dict)
class VyperContract(ConciseContract):
def __init__(self, classic_contract, method_class=VyperMethod):
super().__init__(classic_contract, method_class)
############
# PATCHING #
############
setattr(eth_tester.backends.pyevm.main, 'GENESIS_GAS_LIMIT', 10**9)
setattr(eth_tester.backends.pyevm.main, 'GENESIS_DIFFICULTY', 1)
def set_evm_verbose_logging():
logger = logging.getLogger('evm')
logger.setLevel('TRACE')
# Useful options to comment out whilst working:
# set_evm_verbose_logging()
# vdb.set_evm_opcode_debugger()
@pytest.fixture(autouse=True)
def patch_log_filter_remove(monkeypatch):
def Filter_remove(self, *values):
def get_key(v):
return v.get('transaction_hash'), v.get('log_index'), v.get('transaction_index')
values_to_remove = set([
get_key(value)
for value in values
])
queued_values = self.get_changes()
self.values = [
value
for value
in self.get_all()
if get_key(value) not in values_to_remove
]
for value in queued_values:
if get_key(value) in values_to_remove:
continue
self.queue.put_nowait(value)
monkeypatch.setattr(eth_tester.utils.filters.Filter, 'remove', Filter_remove)
@pytest.fixture(autouse=True)
def patch_is_encodeable_for_fixed(monkeypatch):
original_is_encodable = web3.utils.abi.is_encodable
def utils_abi_is_encodable(_type, value):
from eth_utils import is_integer
from eth_abi.abi import process_type
try:
base, sub, arrlist = _type
except ValueError:
base, sub, arrlist = process_type(_type)
if not arrlist:
if base == 'fixed' and not arrlist:
return True
elif base == 'int':
if not is_integer(value):
return False
exp = int(sub)
if value < -1 * 2**(exp - 1) or value > 2**(exp - 1) + 1:
return False
return True
# default behaviour
return original_is_encodable(_type, value)
monkeypatch.setattr(web3.utils.abi, 'is_encodable', utils_abi_is_encodable)
@pytest.fixture(scope="module")
def tester():
t = EthereumTester()
return t
def zero_gas_price_strategy(web3, transaction_params=None):
return 0 # zero gas price makes testing simpler.
@pytest.fixture(scope="module")
def w3(tester):
w3 = Web3(EthereumTesterProvider(tester))
w3.eth.setGasPriceStrategy(zero_gas_price_strategy)
return w3
@pytest.fixture
def keccak():
return Web3.sha3
@pytest.fixture
def bytes_helper():
def bytes_helper(str, length):
return bytes(str, 'utf-8') + bytearray(length - len(str))
return bytes_helper
@pytest.fixture
def get_contract_from_lll(w3):
def lll_compiler(lll, *args, **kwargs):
lll = optimizer.optimize(LLLnode.from_list(lll))
bytecode = compile_lll.assembly_to_evm(compile_lll.compile_to_assembly(lll))
abi = []
contract = w3.eth.contract(bytecode=bytecode, abi=abi)
deploy_transaction = {
'data': contract._encode_constructor_data(args, kwargs)
}
tx = w3.eth.sendTransaction(deploy_transaction)
address = w3.eth.getTransactionReceipt(tx)['contractAddress']
contract = w3.eth.contract(address, abi=abi, bytecode=bytecode, ContractFactoryClass=VyperContract)
return contract
return lll_compiler
def _get_contract(w3, source_code, *args, **kwargs):
abi = compiler.mk_full_signature(source_code)
bytecode = '0x' + compiler.compile(source_code).hex()
contract = w3.eth.contract(abi=abi, bytecode=bytecode)
value = kwargs.pop('value', 0)
value_in_eth = kwargs.pop('value_in_eth', 0)
value = value_in_eth * 10**18 if value_in_eth else value # Handle deploying with an eth value.
gasPrice = kwargs.pop('gasPrice', 0)
deploy_transaction = {
'from': w3.eth.accounts[0],
'data': contract._encode_constructor_data(args, kwargs),
'value': value,
'gasPrice': gasPrice,
}
tx = w3.eth.sendTransaction(deploy_transaction)
address = w3.eth.getTransactionReceipt(tx)['contractAddress']
contract = w3.eth.contract(address, abi=abi, bytecode=bytecode, ContractFactoryClass=VyperContract)
# Filter logs.
contract._logfilter = w3.eth.filter({
'fromBlock': w3.eth.blockNumber - 1,
'address': contract.address
})
return contract
@pytest.fixture
def get_contract(w3):
def get_contract(source_code, *args, **kwargs):
return _get_contract(w3, source_code, *args, **kwargs)
return get_contract
def get_compiler_gas_estimate(code, func):
if func:
return compiler.gas_estimate(code)[func] + 22000
else:
return sum(compiler.gas_estimate(code).values()) + 22000
def check_gas_on_chain(w3, tester, code, func=None, res=None):
gas_estimate = get_compiler_gas_estimate(code, func)
gas_actual = tester.get_block_by_number('latest')['gas_used']
# Computed upper bound on the gas consumption should
# be greater than or equal to the amount of gas used
if gas_estimate < gas_actual:
raise Exception("Gas upper bound fail: bound %d actual %d" % (gas_estimate, gas_actual))
print('Function name: {} - Gas estimate {}, Actual: {}'.format(
func, gas_estimate, gas_actual)
)
def gas_estimation_decorator(w3, tester, fn, source_code, func):
def decorator(*args, **kwargs):
@wraps(fn)
def decorated_function(*args, **kwargs):
result = fn(*args, **kwargs)
if 'transact' in kwargs:
check_gas_on_chain(w3, tester, source_code, func, res=result)
return result
return decorated_function(*args, **kwargs)
return decorator
def set_decorator_to_contract_function(w3, tester, contract, source_code, func):
func_definition = getattr(contract, func)
func_with_decorator = gas_estimation_decorator(
w3, tester, func_definition, source_code, func
)
setattr(contract, func, func_with_decorator)
@pytest.fixture
def get_contract_with_gas_estimation(tester, w3):
def get_contract_with_gas_estimation(source_code, *args, **kwargs):
contract = _get_contract(w3, source_code, *args, **kwargs)
for abi in contract._classic_contract.functions.abi:
if abi['type'] == 'function':
set_decorator_to_contract_function(
w3, tester, contract, source_code, abi['name']
)
return contract
return get_contract_with_gas_estimation
@pytest.fixture
def get_contract_with_gas_estimation_for_constants(w3):
def get_contract_with_gas_estimation_for_constants(
source_code,
*args, **kwargs):
return _get_contract(w3, source_code, *args, **kwargs)
return get_contract_with_gas_estimation_for_constants
@pytest.fixture
def assert_tx_failed(tester):
def assert_tx_failed(function_to_test, exception=TransactionFailed):
snapshot_id = tester.take_snapshot()
with pytest.raises(exception):
function_to_test()
tester.revert_to_snapshot(snapshot_id)
return assert_tx_failed
@pytest.fixture
def assert_compile_failed():
def assert_compile_failed(function_to_test, exception=Exception):
with pytest.raises(exception):
function_to_test()
return assert_compile_failed
@pytest.fixture
def get_logs(w3):
def get_logs(tx_hash, c, event_name):
tx_receipt = w3.eth.getTransactionReceipt(tx_hash)
logs = c._classic_contract.events[event_name]().processReceipt(tx_receipt)
return logs
return get_logs
| 30.632787
| 121
| 0.670341
|
e01a70d5313911173aa592ea2cc70e8aa47e0ed2
| 595
|
py
|
Python
|
L1Trigger/L1TTrackMatch/python/L1GTTInputProducer_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 2
|
2018-06-01T05:18:55.000Z
|
2021-04-08T21:44:06.000Z
|
L1Trigger/L1TTrackMatch/python/L1GTTInputProducer_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 26
|
2018-10-30T12:47:58.000Z
|
2022-03-29T08:39:00.000Z
|
L1Trigger/L1TTrackMatch/python/L1GTTInputProducer_cfi.py
|
p2l1pfp/cmssw
|
9bda22bf33ecf18dd19a3af2b3a8cbdb1de556a9
|
[
"Apache-2.0"
] | 1
|
2021-11-23T09:25:45.000Z
|
2021-11-23T09:25:45.000Z
|
import FWCore.ParameterSet.Config as cms
L1GTTInputProducer = cms.EDProducer('L1GTTInputProducer',
l1TracksInputTag = cms.InputTag("TTTracksFromTrackletEmulation", "Level1TTTracks"),
outputCollectionName = cms.string("Level1TTTracksConverted"),
debug = cms.int32(0) # Verbosity levels: 0, 1, 2, 3
)
L1GTTInputProducerExtended = cms.EDProducer('L1GTTInputProducer',
l1TracksInputTag = cms.InputTag("TTTracksFromExtendedTrackletEmulation", "Level1TTTracks"),
outputCollectionName = cms.string("Level1TTTracksExtendedConverted"),
debug = cms.int32(0) # Verbosity levels: 0, 1, 2, 3
)
| 42.5
| 93
| 0.784874
|
b7bbedf98e3c95b50e91b2339bc0c7317fa7cf8a
| 536
|
py
|
Python
|
examples/basic/texturecubes.py
|
leftwillow/vedo
|
b2e2cfc3453bbd118b6c81a2227b8ce6f1d22b7b
|
[
"CC0-1.0"
] | 1
|
2021-04-25T06:28:01.000Z
|
2021-04-25T06:28:01.000Z
|
examples/basic/texturecubes.py
|
leftwillow/vedo
|
b2e2cfc3453bbd118b6c81a2227b8ce6f1d22b7b
|
[
"CC0-1.0"
] | null | null | null |
examples/basic/texturecubes.py
|
leftwillow/vedo
|
b2e2cfc3453bbd118b6c81a2227b8ce6f1d22b7b
|
[
"CC0-1.0"
] | null | null | null |
"""
Show a cube for each available texture name.
Any jpg file can be used as texture.
"""
from vedo import settings, Plotter, Cube, Text2D
from vedo.settings import textures, textures_path
print(__doc__)
print('textures_path:', settings.textures_path)
print('textures:', settings.textures)
settings.immediateRendering = False
vp = Plotter(N=len(settings.textures), axes=0)
for i, name in enumerate(settings.textures):
if i>30: break
cb = Cube().texture(name)
vp.show(cb, name, at=i, azimuth=1)
vp.show(interactive=True)
| 25.52381
| 49
| 0.740672
|
84540425aad038e7ecf2f4a40e52fffd74b48ce8
| 4,638
|
py
|
Python
|
doc/conf.py
|
rfoliva/tmuxp
|
5c1d9bc7f4fce8b68a50838c9c6d08c08a4dad92
|
[
"MIT"
] | null | null | null |
doc/conf.py
|
rfoliva/tmuxp
|
5c1d9bc7f4fce8b68a50838c9c6d08c08a4dad92
|
[
"MIT"
] | null | null | null |
doc/conf.py
|
rfoliva/tmuxp
|
5c1d9bc7f4fce8b68a50838c9c6d08c08a4dad92
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import inspect
import os
import sys
from os.path import dirname, relpath
import alagitpull
import tmuxp
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
sys.path.insert(0, project_root)
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "_ext")))
# package data
about = {}
with open("../tmuxp/__about__.py") as fp:
exec(fp.read(), about)
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinxcontrib.napoleon',
'sphinx.ext.linkcode',
'aafig',
'releases',
'alagitpull',
]
releases_unstable_prehistory = True
releases_document_name = "history"
releases_issue_uri = "https://github.com/tmux-python/tmuxp/issues/%s"
releases_release_uri = "https://github.com/tmux-python/tmuxp/tree/v%s"
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = about['__title__']
copyright = about['__copyright__']
version = '%s' % ('.'.join(about['__version__'].split('.'))[:2])
release = '%s' % (about['__version__'])
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme_path = [alagitpull.get_path()]
html_favicon = '_static/favicon.ico'
html_theme = 'alagitpull'
html_static_path = ['_static']
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'more.html',
'book.html',
'searchbox.html',
]
}
html_theme_options = {
'logo': 'img/tmuxp.svg',
'github_user': 'tmux-python',
'github_repo': 'tmuxp',
'github_type': 'star',
'github_banner': True,
'projects': alagitpull.projects,
'project_name': about['__title__'],
}
alagitpull_internal_hosts = ['tmuxp.git-pull.com', '0.0.0.0']
alagitpull_external_hosts_new_window = True
htmlhelp_basename = '%sdoc' % about['__title__']
latex_documents = [
(
'index',
'{0}.tex'.format(about['__package_name__']),
'{0} Documentation'.format(about['__title__']),
about['__author__'],
'manual',
)
]
man_pages = [
(
'index',
about['__package_name__'],
'{0} Documentation'.format(about['__title__']),
about['__author__'],
1,
)
]
texinfo_documents = [
(
'index',
'{0}'.format(about['__package_name__']),
'{0} Documentation'.format(about['__title__']),
about['__author__'],
about['__package_name__'],
about['__description__'],
'Miscellaneous',
)
]
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'libtmux': ('https://libtmux.readthedocs.io/en/latest', None),
'click': ('http://click.pocoo.org/5', None),
}
# aafig format, try to get working with pdf
aafig_format = dict(latex='pdf', html='gif')
aafig_default_options = dict(scale=.75, aspect=0.5, proportional=True)
def linkcode_resolve(domain, info): # NOQA: C901
"""
Determine the URL corresponding to Python object
Notes
-----
From https://github.com/numpy/numpy/blob/v1.15.1/doc/source/conf.py, 7c49cfa
on Jul 31. License BSD-3. https://github.com/numpy/numpy/blob/v1.15.1/LICENSE.txt
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except Exception:
return None
# strip decorators, which would resolve to the source of the decorator
# possibly an upstream bug in getsourcefile, bpo-1764286
try:
unwrap = inspect.unwrap
except AttributeError:
pass
else:
obj = unwrap(obj)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(tmuxp.__file__))
if 'dev' in about['__version__']:
return "%s/blob/master/%s/%s%s" % (
about['__github__'],
about['__package_name__'],
fn,
linespec,
)
else:
return "%s/blob/v%s/%s/%s%s" % (
about['__github__'],
about['__version__'],
about['__package_name__'],
fn,
linespec,
)
| 23.306533
| 85
| 0.603924
|
7df01957682ccacc16f049051d9ddcd71fbcb756
| 852
|
py
|
Python
|
example.py
|
JakeRoggenbuck/mongofastlogger
|
92b23e8fb7134c3a01b24f273f3a7774c10c716f
|
[
"MIT"
] | null | null | null |
example.py
|
JakeRoggenbuck/mongofastlogger
|
92b23e8fb7134c3a01b24f273f3a7774c10c716f
|
[
"MIT"
] | 1
|
2020-11-28T06:41:26.000Z
|
2020-11-28T06:41:26.000Z
|
example.py
|
JakeRoggenbuck/mongofastlogger
|
92b23e8fb7134c3a01b24f273f3a7774c10c716f
|
[
"MIT"
] | null | null | null |
from mongofastlogger import Logger, LogViewer
# Make logger
logger = Logger()
# Log message with tag of "Something"
logger.log("Something", "This is bad as well i guess but i dont actually know")
# Log message with tag of "Something" and display log in console
logger.log("Something", "This is a message", display=True)
# Make Viewer
viewer = LogViewer()
# Print all logs
viewer.view_log()
# Search logs that have the tag "Something"
viewer.search_logs_by_tag("Something")
# Search logs in the last 3 days
viewer.check_by_time("days", 3)
# Export logs to example.log
viewer.export_log("example.log")
print("Production")
# Make logger with name
production_logger = Logger("Production")
production_logger.log("Error", "Critical error in production")
# Make viewer with name
production_viewer = LogViewer("Production")
production_viewer.view_log()
| 26.625
| 79
| 0.762911
|
885b9722cd8350c97b3a388a9eb240207fb87c4c
| 4,130
|
py
|
Python
|
ament_package/templates.py
|
DLu/ament_package
|
f5cc3b4bc4ceb1937d8e15a85035f14270f5af71
|
[
"Apache-2.0"
] | 6
|
2016-11-20T21:33:10.000Z
|
2020-03-19T21:21:33.000Z
|
ament_package/templates.py
|
DLu/ament_package
|
f5cc3b4bc4ceb1937d8e15a85035f14270f5af71
|
[
"Apache-2.0"
] | 95
|
2015-03-10T16:28:45.000Z
|
2021-12-20T20:00:02.000Z
|
ament_package/templates.py
|
DLu/ament_package
|
f5cc3b4bc4ceb1937d8e15a85035f14270f5af71
|
[
"Apache-2.0"
] | 16
|
2016-06-24T17:06:12.000Z
|
2021-11-15T14:07:26.000Z
|
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
try:
import importlib.resources as importlib_resources
except ModuleNotFoundError:
import importlib_resources
IS_WINDOWS = os.name == 'nt'
def get_environment_hook_template_path(name):
with importlib_resources.path('ament_package.template.environment_hook', name) as path:
return str(path)
def get_package_level_template_names(all_platforms=False):
names = ['local_setup.%s.in' % ext for ext in [
'bash',
'bat',
'sh',
'zsh',
]]
if not all_platforms:
names = [name for name in names if _is_platform_specific_extension(name)]
return names
def get_package_level_template_path(name):
with importlib_resources.path('ament_package.template.package_level', name) as path:
return str(path)
def get_prefix_level_template_names(*, all_platforms=False):
extensions = [
'bash',
'bat.in',
'sh.in',
'zsh',
]
names = ['local_setup.%s' % ext for ext in extensions] + \
['setup.%s' % ext for ext in extensions] + \
['_local_setup_util.py']
if not all_platforms:
names = [name for name in names if _is_platform_specific_extension(name)]
return names
def get_prefix_level_template_path(name):
with importlib_resources.path('ament_package.template.prefix_level', name) as path:
return str(path)
def get_isolated_prefix_level_template_names(*, all_platforms=False):
extensions = [
'bash',
'bat.in',
'sh.in',
'zsh',
]
names = ['local_setup.%s' % ext for ext in extensions] + \
['_order_isolated_packages.py']
# + ['setup.%s' % ext for ext in extensions]
if not all_platforms:
names = [name for name in names if _is_platform_specific_extension(name)]
return names
def get_isolated_prefix_level_template_path(name):
with importlib_resources.path('ament_package.template.isolated_prefix_level', name) as path:
return str(path)
def configure_file(template_file, environment):
"""
Evaluate a .in template file used in CMake with configure_file.
:param template_file: path to the template, ``str``
:param environment: dictionary of placeholders to substitute,
``dict``
:returns: string with evaluates template
:raises: KeyError for placeholders in the template which are not
in the environment
"""
with open(template_file, 'r') as f:
template = f.read()
return configure_string(template, environment)
def configure_string(template, environment):
"""
Substitute variables enclosed by @ characters.
:param template: the template, ``str``
:param environment: dictionary of placeholders to substitute,
``dict``
:returns: string with evaluates template
:raises: KeyError for placeholders in the template which are not
in the environment
"""
def substitute(match):
var = match.group(0)[1:-1]
if var in environment:
return environment[var]
return ''
return re.sub(r'\@[a-zA-Z0-9_]+\@', substitute, template)
def _is_platform_specific_extension(filename):
if filename.endswith('.in'):
filename = filename[:-3]
if not IS_WINDOWS and filename.endswith('.bat'):
# On non-Windows system, ignore .bat
return False
if IS_WINDOWS and os.path.splitext(filename)[1] not in ['.bat', '.py']:
# On Windows, ignore anything other than .bat and .py
return False
return True
| 31.052632
| 96
| 0.680872
|
e1b224931b177f86ed6b976f16cff2d8db30eecb
| 114
|
py
|
Python
|
tests/test_version.py
|
cariad/naughtty
|
76822b016dc71ca1705384fcdddc3292f857699a
|
[
"MIT"
] | null | null | null |
tests/test_version.py
|
cariad/naughtty
|
76822b016dc71ca1705384fcdddc3292f857699a
|
[
"MIT"
] | 3
|
2021-09-19T08:08:12.000Z
|
2021-10-05T14:47:24.000Z
|
tests/test_version.py
|
cariad/naughtty
|
76822b016dc71ca1705384fcdddc3292f857699a
|
[
"MIT"
] | null | null | null |
from naughtty.version import get_version
def test_get_version() -> None:
assert get_version() == "-1.-1.-1"
| 19
| 40
| 0.692982
|
b7681b5861e3ef4cf3f5a0eead8fe14283ecc7d7
| 6,961
|
py
|
Python
|
clients/client/python/ory_client/model/ui_nodes.py
|
extraymond/sdk
|
d4e9ffe7335648b7af3fb8d4363e7991d1ba36b5
|
[
"Apache-2.0"
] | null | null | null |
clients/client/python/ory_client/model/ui_nodes.py
|
extraymond/sdk
|
d4e9ffe7335648b7af3fb8d4363e7991d1ba36b5
|
[
"Apache-2.0"
] | null | null | null |
clients/client/python/ory_client/model/ui_nodes.py
|
extraymond/sdk
|
d4e9ffe7335648b7af3fb8d4363e7991d1ba36b5
|
[
"Apache-2.0"
] | null | null | null |
"""
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v0.0.1-alpha.15
Contact: support@ory.sh
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from ory_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from ory_client.model.ui_node import UiNode
globals()['UiNode'] = UiNode
class UiNodes(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'value': ([UiNode],),
}
@cached_property
def discriminator():
return None
attribute_map = {}
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""UiNodes - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] ([UiNode]): # noqa: E501
Keyword Args:
value ([UiNode]): # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
| 37.424731
| 194
| 0.575492
|
417916767ba7ab49d889aac227ed5d61a0e8b5d9
| 2,081
|
py
|
Python
|
tests/snippets/types_snippet.py
|
psy2848048/RustPython
|
ff6935b37f6d93d3cd016d805772ebbb7303a326
|
[
"CC-BY-4.0",
"MIT"
] | 1
|
2019-08-16T06:53:06.000Z
|
2019-08-16T06:53:06.000Z
|
tests/snippets/types_snippet.py
|
psy2848048/RustPython
|
ff6935b37f6d93d3cd016d805772ebbb7303a326
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
tests/snippets/types_snippet.py
|
psy2848048/RustPython
|
ff6935b37f6d93d3cd016d805772ebbb7303a326
|
[
"CC-BY-4.0",
"MIT"
] | 1
|
2022-02-28T08:54:49.000Z
|
2022-02-28T08:54:49.000Z
|
try:
import gc
except ImportError:
gc = None
assert type(type) is type
assert type(object) is type
assert type(object()) is object
new_type = type('New', (object,), {})
assert type(new_type) is type
assert type(new_type()) is new_type
metaclass = type('MCl', (type,), {})
cls = metaclass('Cls', (object,), {})
inst = cls()
assert type(inst) is cls
assert type(cls) is metaclass
assert type(metaclass) is type
assert issubclass(metaclass, type)
assert isinstance(cls, type)
assert inst.__class__ is cls
assert cls.__class__ is metaclass
assert metaclass.__class__ is type
assert type.__class__ is type
assert None.__class__ is type(None)
assert isinstance(type, type)
assert issubclass(type, type)
assert not isinstance(type, (int, float))
assert isinstance(type, (int, object))
assert not issubclass(type, (int, float))
assert issubclass(type, (int, type))
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
assert A.__subclasses__() == [B, C]
assert B.__subclasses__() == [D]
assert C.__subclasses__() == [D]
assert D.__subclasses__() == []
assert D.__bases__ == (B, C)
assert A.__bases__ == (object,)
assert B.__bases__ == (A,)
del D
if gc:
# gc sweep is needed here for CPython...
gc.collect()
# ...while RustPython doesn't have `gc` yet.
if gc:
# D.__new__ is a method bound to the D type, so just deleting D
# from globals won't actually invalidate the weak reference that
# subclasses holds. TODO: implement a proper tracing gc
assert B.__subclasses__() == []
assert C.__subclasses__() == []
assert type in object.__subclasses__()
assert cls.__name__ == 'Cls'
# mro
assert int.mro() == [int, object]
assert bool.mro() == [bool, int, object]
assert object.mro() == [object]
class A:
pass
class B(A):
pass
assert A.mro() == [A, object]
assert B.mro() == [B, A, object]
class AA:
pass
class BB(AA):
pass
class C(B, BB):
pass
assert C.mro() == [C, B, A, BB, AA, object]
assert type(Exception.args).__name__ == 'getset_descriptor'
assert type(None).__bool__(None) is False
| 20.401961
| 68
| 0.681884
|
05610beef60c7102cba0bea3b1b8e5625e0ce535
| 1,673
|
py
|
Python
|
tests/test_proxy.py
|
toke-kogama/proxy-python
|
e0153a3df8e7df936b2b656dfbf8f86832b0032b
|
[
"MIT"
] | null | null | null |
tests/test_proxy.py
|
toke-kogama/proxy-python
|
e0153a3df8e7df936b2b656dfbf8f86832b0032b
|
[
"MIT"
] | null | null | null |
tests/test_proxy.py
|
toke-kogama/proxy-python
|
e0153a3df8e7df936b2b656dfbf8f86832b0032b
|
[
"MIT"
] | 1
|
2019-06-12T10:37:44.000Z
|
2019-06-12T10:37:44.000Z
|
import proxy
import time
import unittest
from proxy.proxy import ClientState
from proxy.compat import _urlparse
class ClientStateTest(unittest.TestCase):
def test_should_try_online(self):
state = ClientState()
self.assertEquals(state.should_try(), True)
def test_should_try_new_error(self):
state = ClientState()
state.status = state.ERROR
state.last_check = time.time()
state.retry_number = 1
self.assertEquals(state.should_try(), False)
def test_should_try_time_passed_error(self):
state = ClientState()
state.status = state.ERROR
state.last_check = time.time() - 10
state.retry_number = 1
self.assertEquals(state.should_try(), True)
def test_set_fail(self):
state = ClientState()
state.set_fail()
self.assertEquals(state.status, state.ERROR)
self.assertNotEquals(state.last_check, None)
self.assertEquals(state.retry_number, 1)
def test_set_success(self):
state = ClientState()
state.status = state.ERROR
state.last_check = 'foo'
state.retry_number = 0
state.set_success()
self.assertEquals(state.status, state.ONLINE)
self.assertEquals(state.last_check, None)
self.assertEquals(state.retry_number, 0)
class TrackTestCase(unittest.TestCase):
pass
#def test_http_proxy(self):
# uri = _urlparse.urljoin('http://localhost:6063/shop/v1/', 'product/')
# h = proxy.RESTProxy(uri)
# for x in range(10):
# rv = h.getmany(params={'profile_id': 1})
# print '{}ms'.format(rv.elapsed.microseconds/1000.0)
| 32.803922
| 78
| 0.655111
|
b7366981120047b736934c90ecd9f5bcb9227b0c
| 585
|
py
|
Python
|
mc/__init__.py
|
iximeow/binja-m16c
|
debf368e5df90a96d6c8b0bc128626a9d6834bb4
|
[
"0BSD"
] | 12
|
2020-01-15T00:51:06.000Z
|
2021-10-02T12:45:50.000Z
|
mc/__init__.py
|
iximeow/binja-m16c
|
debf368e5df90a96d6c8b0bc128626a9d6834bb4
|
[
"0BSD"
] | 2
|
2020-02-03T08:26:26.000Z
|
2020-07-01T19:51:44.000Z
|
mc/__init__.py
|
iximeow/binja-m16c
|
debf368e5df90a96d6c8b0bc128626a9d6834bb4
|
[
"0BSD"
] | 4
|
2020-02-03T07:51:12.000Z
|
2021-02-14T19:13:07.000Z
|
from binaryninja import log
from .coding import *
from .instr import Instruction
from . import opcodes
__all__ = ['decode', 'encode']
def decode(data, addr):
decoder = Decoder(data)
try:
instr = Instruction(decoder)
instr.decode(decoder, addr)
return instr
except KeyError:
log.log_warn('At address {:05x}: unknown encoding {}'
.format(addr, data.hex()))
except coding.BufferTooShort:
pass
def encode(instr, addr):
encoder = Encoder()
instr.encode(encoder, addr)
return bytes(encoder.buf)
| 20.892857
| 61
| 0.632479
|
be0dcc59423ec46feab51ed104f2edd188a947fd
| 2,517
|
py
|
Python
|
python3/rplugin.py
|
lambdalisue/vim-rplugin
|
4352c403356228c4716d3cb223620a3eddace0b5
|
[
"MIT"
] | 10
|
2016-09-29T03:22:14.000Z
|
2020-04-01T14:39:42.000Z
|
python3/rplugin.py
|
lambdalisue/vim-rplugin
|
4352c403356228c4716d3cb223620a3eddace0b5
|
[
"MIT"
] | null | null | null |
python3/rplugin.py
|
lambdalisue/vim-rplugin
|
4352c403356228c4716d3cb223620a3eddace0b5
|
[
"MIT"
] | null | null | null |
import vim
# NOTE:
# vim.options['encoding'] returns bytes so use vim.eval('&encoding')
ENCODING = vim.eval('&encoding')
def reform_bytes(value):
if isinstance(value, bytes):
return value.decode(ENCODING, 'surrogateescape')
elif isinstance(value, (dict, vim.Dictionary, vim.Options)):
return {
reform_bytes(k): reform_bytes(v) for k, v in value.items()
}
elif isinstance(value, (list, tuple, vim.List)):
return list(map(reform_bytes, value))
else:
return value
class Proxy:
def __init__(self, component):
self._component = component
self.__class__ = build_proxy(self, component)
def __getattr__(self, name):
value = getattr(self._component, name)
return decorate(value)
class ContainerProxy(Proxy):
def __getitem__(self, key):
return reform_bytes(self._component[key])
def __setitem__(self, key, value):
if isinstance(value, str):
value = value.encode(ENCODING, 'surrogateescape')
self._component[key] = value
class FuncNamespace:
__slots__ = ['vim']
def __init__(self, vim):
self.vim = vim
def __getattr__(self, name):
fn = self.vim.Function(name)
return lambda *args: reform_bytes(fn(*args))
class Neovim(Proxy):
def __init__(self, vim):
self.funcs = FuncNamespace(vim)
super().__init__(vim)
def call(self, name, *args):
return reform_bytes(self.Function(name)(*args))
def build_proxy(child, parent):
proxy = type(
"%s:%s" % (
type(parent).__name__,
child.__class__.__name__,
),
(child.__class__,), {}
)
child_class = child.__class__
parent_class = parent.__class__
def bind(attr):
if hasattr(child_class, attr) or not hasattr(parent_class, attr):
return
ori = getattr(parent_class, attr)
def mod(self, *args, **kwargs):
return ori(self._component, *args, **kwargs)
setattr(proxy, attr, mod)
for attr in parent_class.__dict__.keys():
bind(attr)
return proxy
def decorate(component):
if component in (vim.buffers, vim.windows, vim.tabpages, vim.current):
return Proxy(component)
elif isinstance(component, (vim.Buffer, vim.Window, vim.TabPage)):
return Proxy(component)
elif isinstance(component, (vim.List, vim.Dictionary, vim.Options)):
return ContainerProxy(component)
return component
| 25.683673
| 74
| 0.629718
|
8f9f20588dffd0243d574cf6a40d2ce9a9a50f5b
| 7,597
|
py
|
Python
|
sktime/forecasting/theta.py
|
pabworks/sktime
|
3ab1ecc20ab2e5d2a19f3df13f07fa88b7c8d652
|
[
"BSD-3-Clause"
] | 1
|
2021-02-04T00:31:02.000Z
|
2021-02-04T00:31:02.000Z
|
sktime/forecasting/theta.py
|
pabworks/sktime
|
3ab1ecc20ab2e5d2a19f3df13f07fa88b7c8d652
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/forecasting/theta.py
|
pabworks/sktime
|
3ab1ecc20ab2e5d2a19f3df13f07fa88b7c8d652
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
__all__ = ["ThetaForecaster"]
__author__ = ["@big-o", "Markus Löning"]
from warnings import warn
import numpy as np
import pandas as pd
from scipy.stats import norm
from sktime.forecasting.base._base import DEFAULT_ALPHA
from sktime.forecasting.exp_smoothing import ExponentialSmoothing
from sktime.transformations.series.detrend import Deseasonalizer
from sktime.utils.slope_and_trend import _fit_trend
from sktime.utils.validation.forecasting import check_sp
from sktime.utils.validation.forecasting import check_y_X
class ThetaForecaster(ExponentialSmoothing):
"""
Theta method of forecasting.
The theta method as defined in [1]_ is equivalent to simple exponential
smoothing
(SES) with drift. This is demonstrated in [2]_.
The series is tested for seasonality using the test outlined in A&N. If
deemed
seasonal, the series is seasonally adjusted using a classical
multiplicative
decomposition before applying the theta method. The resulting forecasts
are then
reseasonalised.
In cases where SES results in a constant forecast, the theta forecaster
will revert
to predicting the SES constant plus a linear trend derived from the
training data.
Prediction intervals are computed using the underlying state space model.
Parameters
----------
initial_level : float, optional
The alpha value of the simple exponential smoothing, if the value is
set then
this will be used, otherwise it will be estimated from the data.
deseasonalize : bool, optional (default=True)
If True, data is seasonally adjusted.
sp : int, optional (default=1)
The number of observations that constitute a seasonal period for a
multiplicative deseasonaliser, which is used if seasonality is
detected in the
training data. Ignored if a deseasonaliser transformer is provided.
Default is
1 (no seasonality).
Attributes
----------
initial_level_ : float
The estimated alpha value of the SES fit.
drift_ : float
The estimated drift of the fitted model.
se_ : float
The standard error of the predictions. Used to calculate prediction
intervals.
References
----------
.. [1] `Assimakopoulos, V. and Nikolopoulos, K. The theta model: a
decomposition
approach to forecasting. International Journal of Forecasting 16,
521-530,
2000.
<https://www.sciencedirect.com/science/article/pii
/S0169207000000662>`_
.. [2] `Hyndman, Rob J., and Billah, Baki. Unmasking the Theta method.
International J. Forecasting, 19, 287-290, 2003.
<https://www.sciencedirect.com/science/article/pii
/S0169207001001431>`_
"""
_fitted_param_names = ("initial_level", "smoothing_level")
def __init__(self, initial_level=None, deseasonalize=True, sp=1):
self.sp = sp
self.deseasonalize = deseasonalize
self.deseasonalizer_ = None
self.trend_ = None
self.initial_level_ = None
self.drift_ = None
self.se_ = None
super(ThetaForecaster, self).__init__(initial_level=initial_level, sp=sp)
def fit(self, y, X=None, fh=None):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list or np.array, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored
Returns
-------
self : returns an instance of self.
"""
y, _ = check_y_X(y, X)
sp = check_sp(self.sp)
if sp > 1 and not self.deseasonalize:
warn("`sp` is ignored when `deseasonalise`=False")
if self.deseasonalize:
self.deseasonalizer_ = Deseasonalizer(sp=self.sp, model="multiplicative")
y = self.deseasonalizer_.fit_transform(y)
# fit exponential smoothing forecaster
# find theta lines: Theta lines are just SES + drift
super(ThetaForecaster, self).fit(y, fh=fh)
self.initial_level_ = self._fitted_forecaster.params["smoothing_level"]
# compute trend
self.trend_ = self._compute_trend(y)
self._is_fitted = True
return self
def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
"""
Make forecasts.
Parameters
----------
fh : array-like
The forecasters horizon with the steps ahead to to predict.
Default is
one-step ahead forecast, i.e. np.array([1]).
Returns
-------
y_pred : pandas.Series
Returns series of predicted values.
"""
y_pred = super(ThetaForecaster, self)._predict(
fh, X, return_pred_int=False, alpha=alpha
)
# Add drift.
drift = self._compute_drift()
y_pred += drift
if self.deseasonalize:
y_pred = self.deseasonalizer_.inverse_transform(y_pred)
if return_pred_int:
pred_int = self.compute_pred_int(y_pred=y_pred, alpha=alpha)
return y_pred, pred_int
return y_pred
@staticmethod
def _compute_trend(y):
# Trend calculated through least squares regression.
coefs = _fit_trend(y.values.reshape(1, -1), order=1)
return coefs[0, 0] / 2
def _compute_drift(self):
fh = self.fh.to_relative(self.cutoff)
if np.isclose(self.initial_level_, 0.0):
# SES was constant, so revert to simple trend
drift = self.trend_ * fh
else:
# Calculate drift from SES parameters
n_timepoints = len(self._y)
drift = self.trend_ * (
fh
+ (1 - (1 - self.initial_level_) ** n_timepoints) / self.initial_level_
)
return drift
def _compute_pred_err(self, alphas):
"""
Get the prediction errors for the forecast.
"""
self.check_is_fitted()
n_timepoints = len(self._y)
self.sigma_ = np.sqrt(self._fitted_forecaster.sse / (n_timepoints - 1))
sem = self.sigma_ * np.sqrt(
self.fh.to_relative(self.cutoff) * self.initial_level_ ** 2 + 1
)
errors = []
for alpha in alphas:
z = _zscore(1 - alpha)
error = z * sem
errors.append(pd.Series(error, index=self.fh.to_absolute(self.cutoff)))
return errors
def update(self, y, X=None, update_params=True):
super(ThetaForecaster, self).update(y, X, update_params=update_params)
if update_params:
if self.deseasonalize:
y = self.deseasonalizer_.transform(y)
self.initial_level_ = self._fitted_forecaster.params["smoothing_level"]
self.trend_ = self._compute_trend(y)
return self
def _zscore(level: float, two_tailed: bool = True) -> float:
"""
Calculate a z-score from a confidence level.
Parameters
----------
level : float
A confidence level, in the open interval (0, 1).
two_tailed : bool (default=True)
If True, return the two-tailed z score.
Returns
-------
z : float
The z score.
"""
alpha = 1 - level
if two_tailed:
alpha /= 2
return -norm.ppf(alpha)
| 30.388
| 87
| 0.625247
|
017ffada3473be5d4df9f3b0b0e96bbe6d67e6b1
| 1,874
|
py
|
Python
|
bob/pipelines/tests/test_datasets.py
|
bioidiap/bob.pipelines
|
cbefdaf3b384ee11cb26a279281f007adc2d8f19
|
[
"BSD-3-Clause"
] | 1
|
2020-10-13T19:58:44.000Z
|
2020-10-13T19:58:44.000Z
|
bob/pipelines/tests/test_datasets.py
|
bioidiap/bob.pipelines
|
cbefdaf3b384ee11cb26a279281f007adc2d8f19
|
[
"BSD-3-Clause"
] | null | null | null |
bob/pipelines/tests/test_datasets.py
|
bioidiap/bob.pipelines
|
cbefdaf3b384ee11cb26a279281f007adc2d8f19
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
"""Test code for datasets"""
import os
import numpy as np
import pkg_resources
import pytest
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
from bob.pipelines.datasets import FileListDatabase
from bob.pipelines.transformers import Str_To_Types
def iris_data_tranform(samples):
for s in samples:
data = np.array(
[s.sepal_length, s.sepal_width, s.petal_length, s.petal_width]
)
s.data = data
return samples
def test_iris_list_database():
dataset_protocol_path = pkg_resources.resource_filename(
__name__, os.path.join("data", "iris_database")
)
database = FileListDatabase(dataset_protocol_path, None)
assert database.protocol == "default"
assert database.protocols() == ["default"]
assert database.groups() == ["test", "train"]
with pytest.raises(ValueError):
database.protocol = "none"
samples = database.samples()
assert len(samples) == 150
assert samples[0].data is None
assert samples[0].sepal_length == "5"
assert samples[0].petal_width == "0.2"
assert samples[0].target == "Iris-setosa"
with pytest.raises(ValueError):
database.samples(groups="random")
database.transformer = make_pipeline(
Str_To_Types(
fieldtypes=dict(
sepal_length=float,
sepal_width=float,
petal_length=float,
petal_width=float,
)
),
FunctionTransformer(iris_data_tranform),
)
samples = database.samples(groups="train")
assert len(samples) == 75
np.testing.assert_allclose(samples[0].data, [5.1, 3.5, 1.4, 0.2])
assert samples[0].sepal_length == 5.1
assert samples[0].petal_width == 0.2
assert samples[0].target == "Iris-setosa"
| 27.970149
| 74
| 0.661153
|
a39d4a34a3c676dbd81f374e2ca5858869effc68
| 2,967
|
py
|
Python
|
quantitative/variable_transforms.py
|
sujason/quantitative
|
42b7fdea05629934f513a7b15e5b03c7697c5c46
|
[
"MIT"
] | 7
|
2016-02-21T21:30:52.000Z
|
2022-01-07T18:17:55.000Z
|
quantitative/variable_transforms.py
|
sujason/quantitative
|
42b7fdea05629934f513a7b15e5b03c7697c5c46
|
[
"MIT"
] | null | null | null |
quantitative/variable_transforms.py
|
sujason/quantitative
|
42b7fdea05629934f513a7b15e5b03c7697c5c46
|
[
"MIT"
] | 3
|
2016-01-27T09:52:38.000Z
|
2020-05-19T03:59:39.000Z
|
import numpy as np
def normalize_transform(z, match_array=None, total=1.):
"""
Takes an array and turns them into positive weights. This transform is scale invariant.
"""
if match_array is not None:
z, _ = np.broadcast_arrays(z, match_array)
w = z**2.
return total*w/w.sum()
def normalize_alternate_transform(z, total=1., add_element=False):
"""
Takes an array and turns them into positive weights. This transform is sensitive to scale.
Due to this, the last element is determined by the rest of the array as the sum must be 1.
add_element=False -- ignores the last given value and overwrites it with the remainder
add_element=True -- adds an element to the end of the array for the remainder
"""
s = np.sin(z)**2.
if add_element:
s = np.append(s, 1.)
else:
s[-1] = 1.
c = np.cumprod(np.cos(z)**2.)
if add_element:
c = np.append(1., c)
else:
c = np.roll(c, 1)
c[0] = 1.
return total*s*c
def squared_transform(z, offset=0.):
"""
Takes an array and makes it non-negative (or >= offset) via squaring.
"""
return z**2. + offset
def ascending_nonnegative_transform(z, offset=0., nonnegative_transform=squared_transform):
"""
Takes an array and makes it ascend in value by nonnegative_transform and incrementing by the previous element.
"""
x = nonnegative_transform(z)
y = np.roll(x, 1)
y[0] = offset
return x + y.cumsum()
def sine_bounded_transform(z, lower=-1., upper=1.):
"""
Takes an array and makes it bound by lower and upper limits via sine:
z = -1 corresponds to the lower and z = 1 the upper.
"""
center = (upper+lower) / 2.
full_width = upper - lower
return full_width/2.*np.sin(np.pi/2.*z) + center
def sigmoid_bounded_transform(z, lower=-1., upper=1.):
"""
Takes an array and makes it bound by lower and upper limits via sigmoid:
z = -1 corresponds to the lower and z = 1 the upper.
"""
center = (upper+lower) / 2.
full_width = upper - lower
return full_width*(1./(1.+np.exp(-z))-0.5) + center
def reciprocal_quadratic_bounded_transform(z, lower=-1., upper=1.):
center = (upper+lower) / 2.
full_width = upper - lower
return full_width*(1./(z**2+1)-0.5) + center
def ascending_bounded_box_transform(z, lower=-1., upper=1., bounded_transform=sine_bounded_transform):
"""
Takes an array and makes each element succesively lower bounded by the previous value via sine (by default).
lower and upper correspond to the global lower and upper limit on the vector, i.e. lower affects the
first element and upper bounds all of them.
"""
x = np.zeros_like(z, dtype=np.float)
for i, val in enumerate(z):
if i == 0:
prev = lower
else:
prev = x[i-1]
x[i] = bounded_transform(val, lower=prev, upper=upper)
return x
# TODO write unit tests
| 31.231579
| 114
| 0.644085
|
2589bb5c89d75adf0b6d3f8041bb478a01f9efeb
| 3,020
|
py
|
Python
|
rally_os/plugins/openstack/scenario.py
|
vishnu-kumar/PeformanceFramework
|
79daebe41ee654aa6d9ca3c37aa0c3d77858dadc
|
[
"Apache-2.0"
] | null | null | null |
rally_os/plugins/openstack/scenario.py
|
vishnu-kumar/PeformanceFramework
|
79daebe41ee654aa6d9ca3c37aa0c3d77858dadc
|
[
"Apache-2.0"
] | null | null | null |
rally_os/plugins/openstack/scenario.py
|
vishnu-kumar/PeformanceFramework
|
79daebe41ee654aa6d9ca3c37aa0c3d77858dadc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally import osclients
from rally.task import scenario
# NOTE(boris-42): Shortcut to remove import of both rally.task.scenario and
# rally.plugins.openstack.scenario
configure = scenario.configure
class OpenStackScenario(scenario.Scenario):
"""Base class for all OpenStack scenarios."""
# TODO(stpierre): this is still used by some cleanup routines;
# remove it when they're using the new random name generator
RESOURCE_NAME_PREFIX = "rally_"
def __init__(self, context=None, admin_clients=None, clients=None):
super(OpenStackScenario, self).__init__(context)
if context:
if "admin" in context:
self._admin_clients = osclients.Clients(
context["admin"]["endpoint"])
if "user" in context:
self._clients = osclients.Clients(context["user"]["endpoint"])
if admin_clients:
if hasattr(self, "_admin_clients"):
raise ValueError(
"Only one of context[\"admin\"] or admin_clients"
" must be supplied")
self._admin_clients = admin_clients
if clients:
if hasattr(self, "_clients"):
raise ValueError(
"Only one of context[\"user\"] or clients"
" must be supplied")
self._clients = clients
def clients(self, client_type, version=None):
"""Returns a python openstack client of the requested type.
The client will be that for one of the temporary non-administrator
users created before the benchmark launch.
:param client_type: Client type ("nova"/"glance" etc.)
:param version: client version ("1"/"2" etc.)
:returns: Standard python OpenStack client instance
"""
client = getattr(self._clients, client_type)
return client(version) if version is not None else client()
def admin_clients(self, client_type, version=None):
"""Returns a python admin openstack client of the requested type.
:param client_type: Client type ("nova"/"glance" etc.)
:param version: client version ("1"/"2" etc.)
:returns: Python openstack client object
"""
client = getattr(self._admin_clients, client_type)
return client(version) if version is not None else client()
| 38.717949
| 78
| 0.646358
|
ec1f89d06411a4f3a7b1f1c0eeba614677e592fa
| 444
|
py
|
Python
|
src/supplier/migrations/0011_auto_20191204_0821.py
|
vandana0608/Pharmacy-Managament
|
f99bdec11c24027a432858daa19247a21cecc092
|
[
"bzip2-1.0.6"
] | null | null | null |
src/supplier/migrations/0011_auto_20191204_0821.py
|
vandana0608/Pharmacy-Managament
|
f99bdec11c24027a432858daa19247a21cecc092
|
[
"bzip2-1.0.6"
] | null | null | null |
src/supplier/migrations/0011_auto_20191204_0821.py
|
vandana0608/Pharmacy-Managament
|
f99bdec11c24027a432858daa19247a21cecc092
|
[
"bzip2-1.0.6"
] | null | null | null |
# Generated by Django 2.0.7 on 2019-12-04 02:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('supplier', '0010_auto_20191013_1630'),
]
operations = [
migrations.AlterField(
model_name='supplier',
name='supplier_id',
field=models.CharField(default='SID', max_length=10, primary_key=True, serialize=False),
),
]
| 23.368421
| 100
| 0.626126
|
f370427bf7ea3e2fcc458b911c9e9698162a63f6
| 859
|
py
|
Python
|
tests/test_dbscan.py
|
kristinebilgrav/SVDB
|
ebb4b189a6a6e70725781dcfb87771e6c37b5a35
|
[
"MIT"
] | 20
|
2017-03-07T13:58:07.000Z
|
2022-03-18T14:03:10.000Z
|
tests/test_dbscan.py
|
kristinebilgrav/SVDB
|
ebb4b189a6a6e70725781dcfb87771e6c37b5a35
|
[
"MIT"
] | 48
|
2017-02-01T10:11:41.000Z
|
2022-03-31T02:13:15.000Z
|
tests/test_dbscan.py
|
kristinebilgrav/SVDB
|
ebb4b189a6a6e70725781dcfb87771e6c37b5a35
|
[
"MIT"
] | 13
|
2016-07-08T11:35:45.000Z
|
2022-03-28T08:43:56.000Z
|
import unittest
import numpy
from svdb.DBSCAN import main
class TestReadVCFLine(unittest.TestCase):
#test that distant points are not merged
def test_distant_points(self):
data = numpy.array([[1,1],[1,101]])
epsilon=100
m=2
result=main(data,epsilon,m)
assert (result[0] == -1 and result[1] == -1)
#test that close points are merged
def test_close_points(self):
data = numpy.array([[1,1],[1,101]])
epsilon=200
m=2
result=main(data,epsilon,m)
assert (result[0] == 0 and result[1] == 0)
#test that small clusters smaller than m are not merged
def test_small_cluster(self):
data = numpy.array([[1,1],[1,1],[1,101],[1,101]])
epsilon=100
m=3
result=main(data,epsilon,m)
assert (result[0] == -1 and result[1] == -1)
| 26.84375
| 59
| 0.592549
|
1cba2a599023cc9a587e568323d555ddd049367c
| 1,016
|
py
|
Python
|
ScheduledDeliveryWebApplication/app/resources/order_resource.py
|
leitao-bcc/MovileNext3_Backend_LucasLeitao
|
15bdd8a96711a2e305078cd2f152b86374dbe276
|
[
"Unlicense"
] | null | null | null |
ScheduledDeliveryWebApplication/app/resources/order_resource.py
|
leitao-bcc/MovileNext3_Backend_LucasLeitao
|
15bdd8a96711a2e305078cd2f152b86374dbe276
|
[
"Unlicense"
] | null | null | null |
ScheduledDeliveryWebApplication/app/resources/order_resource.py
|
leitao-bcc/MovileNext3_Backend_LucasLeitao
|
15bdd8a96711a2e305078cd2f152b86374dbe276
|
[
"Unlicense"
] | null | null | null |
from flask_restful import Resource, reqparse
class OrderResource(Resource):
parser = reqparse.RequestParser()
parser.add_argument('customerId', type=str, required=True)
parser.add_argument('merchantId', type=str, required=True)
parser.add_argument('deliveryAddress', type=str, required=True)
parser.add_argument('deliveryDateTime', type=str, required=True)
parser.add_argument('items', type=str, required=True)
def post(self):
from app.transformers.base_transform import BaseTransform
data = OrderResource.parser.parse_args(strict=True)
transform = BaseTransform()
order = transform.transform_canonic_order(data)
if not order:
return '', 400
return str(order.id), 201
class OrderConfirmResource(Resource):
def post(self, order_id):
from app.providers.ifood_provider import IfoodProvider
provider = IfoodProvider()
data = {"orderId": order_id}
return provider.post_order(**data)
| 27.459459
| 68
| 0.697835
|
bf621ea543e556fea7933b5127a18a99b2564d9d
| 1,357
|
py
|
Python
|
Evaluation/helper.py
|
jnice-81/FpgaHbmForDaCe
|
b80749524264b4884cbd852d2db825cf8a6007aa
|
[
"BSD-3-Clause"
] | null | null | null |
Evaluation/helper.py
|
jnice-81/FpgaHbmForDaCe
|
b80749524264b4884cbd852d2db825cf8a6007aa
|
[
"BSD-3-Clause"
] | null | null | null |
Evaluation/helper.py
|
jnice-81/FpgaHbmForDaCe
|
b80749524264b4884cbd852d2db825cf8a6007aa
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import List
from dace import memlet
from dace.sdfg.state import SDFGState
from dace.transformation.optimizer import Optimizer
from hbm_bank_split import HbmBankSplit
from hbm_transform import set_shape
from dace.sdfg import graph, nodes, propagation, utils
def get_first_node(state: SDFGState, cond):
for node, state in state.all_nodes_recursive():
if cond(node):
return node
def distribute_along_dim0(sdfg, array_list: List[str]):
for array in array_list:
desc = sdfg.arrays[array]
if len(desc.shape) > 2:
new_shape = [desc.shape[0] * desc.shape[1], *desc.shape[2:]]
else:
new_shape = [desc.shape[0] * desc.shape[1]]
set_shape(desc, new_shape)
for match in Optimizer(sdfg).get_pattern_matches(patterns=HbmBankSplit):
match.apply(sdfg)
def update_access(state: SDFGState, old_acc_node: nodes.AccessNode, new_data: str, new_memlet: memlet.Memlet):
old_edge = state.all_edges(old_acc_node)[0]
path = state.memlet_path(old_edge)
if path[0] == old_edge:
path[-1].data = new_memlet
else:
path[0].data = new_memlet
old_acc_node.data = new_data
def get_nodes_of_path(path: List[graph.MultiConnectorEdge]):
nodes = []
nodes.append(path[0].src)
for e in path:
nodes.append(e.dst)
return nodes
| 33.097561
| 110
| 0.690494
|
418253a108f32f7b3792a40f796b4eb26df21b09
| 8,682
|
py
|
Python
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/monitoring/grafana_plugin.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 17
|
2017-06-07T23:15:01.000Z
|
2021-08-30T14:32:36.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/monitoring/grafana_plugin.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 9
|
2017-06-25T03:31:52.000Z
|
2021-05-17T23:43:12.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/monitoring/grafana_plugin.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 3
|
2018-05-26T21:31:22.000Z
|
2019-09-28T17:00:45.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Thierry Sallé (@seuf)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'
}
DOCUMENTATION = '''
---
module: grafana_plugin
author:
- Thierry Sallé (@seuf)
version_added: "2.5"
short_description: Manage Grafana plugins via grafana-cli
description:
- Install and remove Grafana plugins.
- See U(https://grafana.com/docs/plugins/installation/) for upstream documentation.
options:
name:
description:
- Name of the plugin.
required: true
version:
description:
- Version of the plugin to install.
- Defaults to C(latest).
grafana_plugins_dir:
description:
- Directory where the Grafana plugin will be installed.
- If omitted, defaults to C(/var/lib/grafana/plugins).
grafana_repo:
description:
- URL to the Grafana plugin repository.
- "If omitted, grafana-cli will use the default value: U(https://grafana.com/api/plugins)."
grafana_plugin_url:
description:
- Full URL to the plugin zip file instead of downloading the file from U(https://grafana.com/api/plugins).
- Requires grafana 4.6.x or later.
state:
description:
- Whether the plugin should be installed.
choices:
- present
- absent
default: present
'''
EXAMPLES = '''
---
- name: Install/update Grafana piechart panel plugin
grafana_plugin:
name: grafana-piechart-panel
version: latest
state: present
'''
RETURN = '''
---
version:
description: version of the installed/removed/updated plugin.
type: str
returned: always
'''
import base64
import json
import os
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
class GrafanaCliException(Exception):
pass
def grafana_cli_bin(params):
'''
Get the grafana-cli binary path with global options.
Raise a GrafanaCliException if the grafana-cli is not present or not in PATH
:param params: ansible module params. Used to fill grafana-cli global params.
'''
program = 'grafana-cli'
grafana_cli = None
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
grafana_cli = program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
grafana_cli = exe_file
break
if grafana_cli is None:
raise GrafanaCliException('grafana-cli binary is not present or not in PATH')
else:
if 'grafana_plugin_url' in params and params['grafana_plugin_url']:
grafana_cli = '{0} {1} {2}'.format(grafana_cli, '--pluginUrl', params['grafana_plugin_url'])
if 'grafana_plugins_dir' in params and params['grafana_plugins_dir']:
grafana_cli = '{0} {1} {2}'.format(grafana_cli, '--pluginsDir', params['grafana_plugins_dir'])
if 'grafana_repo' in params and params['grafana_repo']:
grafana_cli = '{0} {1} {2}'.format(grafana_cli, '--repo', params['grafana_repo'])
if 'validate_certs' in params and params['validate_certs'] is False:
grafana_cli = '{0} {1}'.format(grafana_cli, '--insecure')
return '{0} {1}'.format(grafana_cli, 'plugins')
def get_grafana_plugin_version(module, params):
'''
Fetch grafana installed plugin version. Return None if plugin is not installed.
:param module: ansible module object. used to run system commands.
:param params: ansible module params.
'''
grafana_cli = grafana_cli_bin(params)
rc, stdout, stderr = module.run_command('{0} ls'.format(grafana_cli))
stdout_lines = stdout.split("\n")
for line in stdout_lines:
if line.find(' @ ') != -1:
line = line.rstrip()
plugin_name, plugin_version = line.split(' @ ')
if plugin_name == params['name']:
return plugin_version
return None
def get_grafana_plugin_version_latest(module, params):
'''
Fetch the latest version available from grafana-cli.
Return the newest version number or None not found.
:param module: ansible module object. used to run system commands.
:param params: ansible module params.
'''
grafana_cli = grafana_cli_bin(params)
rc, stdout, stderr = module.run_command('{0} list-versions {1}'.format(grafana_cli,
params['name']))
stdout_lines = stdout.split("\n")
if stdout_lines[0]:
return stdout_lines[0].rstrip()
return None
def grafana_plugin(module, params):
'''
Install update or remove grafana plugin
:param module: ansible module object. used to run system commands.
:param params: ansible module params.
'''
grafana_cli = grafana_cli_bin(params)
if params['state'] == 'present':
grafana_plugin_version = get_grafana_plugin_version(module, params)
if grafana_plugin_version is not None:
if 'version' in params and params['version']:
if params['version'] == grafana_plugin_version:
return {'msg': 'Grafana plugin already installed',
'changed': False,
'version': grafana_plugin_version}
else:
if params['version'] == 'latest' or params['version'] is None:
latest_version = get_grafana_plugin_version_latest(module, params)
if latest_version == grafana_plugin_version:
return {'msg': 'Grafana plugin already installed',
'changed': False,
'version': grafana_plugin_version}
cmd = '{0} update {1}'.format(grafana_cli, params['name'])
else:
cmd = '{0} install {1} {2}'.format(grafana_cli, params['name'], params['version'])
else:
return {'msg': 'Grafana plugin already installed',
'changed': False,
'version': grafana_plugin_version}
else:
if 'version' in params:
if params['version'] == 'latest' or params['version'] is None:
cmd = '{0} install {1}'.format(grafana_cli, params['name'])
else:
cmd = '{0} install {1} {2}'.format(grafana_cli, params['name'], params['version'])
else:
cmd = '{0} install {1}'.format(grafana_cli, params['name'])
else:
cmd = '{0} uninstall {1}'.format(grafana_cli, params['name'])
rc, stdout, stderr = module.run_command(cmd)
if rc == 0:
stdout_lines = stdout.split("\n")
for line in stdout_lines:
if line.find(params['name']):
if line.find(' @ ') != -1:
line = line.rstrip()
plugin_name, plugin_version = line.split(' @ ')
else:
plugin_version = None
return {'msg': 'Grafana plugin {0} installed : {1}'.format(params['name'], cmd),
'changed': True,
'version': plugin_version}
else:
raise GrafanaCliException("'{0}' execution returned an error : [{1}] {2} {3}".format(cmd, rc, stdout, stderr))
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True,
type='str'),
version=dict(type='str'),
grafana_plugins_dir=dict(type='str'),
grafana_repo=dict(type='str'),
grafana_plugin_url=dict(type='str'),
state=dict(choices=['present', 'absent'],
default='present')
),
supports_check_mode=False
)
try:
result = grafana_plugin(module, module.params)
except GrafanaCliException as e:
module.fail_json(
failed=True,
msg="{0}".format(e)
)
return
except Exception as e:
module.fail_json(
failed=True,
msg="{0} : {1} ".format(type(e), e)
)
return
module.exit_json(
failed=False,
**result
)
return
if __name__ == '__main__':
main()
| 33.651163
| 118
| 0.590187
|
6aa1d360e22530723943e4fd2d32da6fe0b4f614
| 3,091
|
py
|
Python
|
databuilder/wm_data_loaders/wm_databuilder/extractor/wm_redshift_metadata_extractor.py
|
jkomar-wm/amundsen
|
4381e47ade902f2614358be7a6b59586616cb7c0
|
[
"Apache-2.0"
] | null | null | null |
databuilder/wm_data_loaders/wm_databuilder/extractor/wm_redshift_metadata_extractor.py
|
jkomar-wm/amundsen
|
4381e47ade902f2614358be7a6b59586616cb7c0
|
[
"Apache-2.0"
] | null | null | null |
databuilder/wm_data_loaders/wm_databuilder/extractor/wm_redshift_metadata_extractor.py
|
jkomar-wm/amundsen
|
4381e47ade902f2614358be7a6b59586616cb7c0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from typing import ( # noqa: F401
Any, Dict, Iterator, Union,
)
from pyhocon import ConfigFactory, ConfigTree # noqa: F401
from .wm_base_postgres_metadata_extractor import BasePostgresMetadataExtractor
class RedshiftMetadataExtractor(BasePostgresMetadataExtractor):
"""
Extracts Redshift table and column metadata from underlying meta store database using SQLAlchemyExtractor
This differs from the PostgresMetadataExtractor because in order to support Redshift's late binding views,
we need to join the INFORMATION_SCHEMA data against the function PG_GET_LATE_BINDING_VIEW_COLS().
"""
def get_sql_statement(self, use_catalog_as_cluster_name: bool, where_clause_suffix: str) -> str:
if use_catalog_as_cluster_name:
cluster_source = "CURRENT_DATABASE()"
else:
cluster_source = f"'{self._cluster}'"
return """
WITH table1 AS (
SELECT
{cluster_source} as cluster,
c.table_schema as schema,
c.table_name as name,
pgtd.description as description,
c.column_name as col_name,
c.data_type as col_type,
pgcd.description as col_description,
ordinal_position as col_sort_order
FROM INFORMATION_SCHEMA.COLUMNS c
INNER JOIN
pg_catalog.pg_statio_all_tables as st on c.table_schema=st.schemaname and c.table_name=st.relname
LEFT JOIN
pg_catalog.pg_description pgcd on pgcd.objoid=st.relid and pgcd.objsubid=c.ordinal_position
LEFT JOIN
pg_catalog.pg_description pgtd on pgtd.objoid=st.relid and pgtd.objsubid=0
UNION
SELECT
{cluster_source} as cluster,
view_schema as schema,
view_name as name,
NULL as description,
column_name as col_name,
data_type as col_type,
NULL as col_description,
ordinal_position as col_sort_order
FROM
PG_GET_LATE_BINDING_VIEW_COLS()
COLS(view_schema NAME, view_name NAME, column_name NAME, data_type VARCHAR, ordinal_position INT)
UNION
SELECT
{cluster_source} AS cluster,
schemaname AS schema,
tablename AS name,
NULL AS description,
columnname AS col_name,
external_type AS col_type,
NULL AS col_description,
columnnum AS col_sort_order
FROM svv_external_columns
)
SELECT
*
FROM
table1
WHERE
{where_clause_suffix}
ORDER by cluster, schema, name, col_sort_order ;
""".format(
cluster_source=cluster_source,
where_clause_suffix=where_clause_suffix,
)
def get_scope(self) -> str:
return 'extractor.redshift_metadata'
| 34.730337
| 117
| 0.622452
|
f9a6c92c3f8017a4b3a57b2eea931e655fcfc772
| 20,733
|
py
|
Python
|
tests/metarl/tf/models/test_lstm.py
|
icml2020submission6857/metarl
|
9b66cefa2b6bcb6a38096d629ce8853b47c7171d
|
[
"MIT"
] | 2
|
2020-03-15T14:35:15.000Z
|
2021-02-15T16:38:00.000Z
|
tests/metarl/tf/models/test_lstm.py
|
icml2020submission6857/metarl
|
9b66cefa2b6bcb6a38096d629ce8853b47c7171d
|
[
"MIT"
] | null | null | null |
tests/metarl/tf/models/test_lstm.py
|
icml2020submission6857/metarl
|
9b66cefa2b6bcb6a38096d629ce8853b47c7171d
|
[
"MIT"
] | 1
|
2020-02-24T03:04:23.000Z
|
2020-02-24T03:04:23.000Z
|
import numpy as np
import pytest
import tensorflow as tf
from metarl.tf.models.lstm import lstm
from tests.fixtures import TfGraphTestCase
from tests.helpers import recurrent_step_lstm
class TestLSTM(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 2
self.hidden_dim = 2
self._step_hidden_var = tf.compat.v1.placeholder(
shape=(self.batch_size, self.hidden_dim),
name='initial_hidden',
dtype=tf.float32)
self._step_cell_var = tf.compat.v1.placeholder(shape=(self.batch_size,
self.hidden_dim),
name='initial_cell',
dtype=tf.float32)
self.lstm_cell = tf.keras.layers.LSTMCell(
units=self.hidden_dim,
activation=tf.nn.tanh,
kernel_initializer=tf.constant_initializer(1),
recurrent_activation=tf.nn.sigmoid,
recurrent_initializer=tf.constant_initializer(1),
name='lstm_layer')
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init, cell_init', [
(1, 1, 1, 0, 0), # noqa: E122
(1, 1, 3, 0, 0),
(1, 3, 1, 0, 0),
(3, 1, 1, 0, 0),
(3, 3, 1, 0, 0),
(3, 3, 3, 0, 0),
(1, 1, 1, 0.5, 0.5),
(1, 1, 3, 0.5, 0.5),
(1, 3, 1, 0.5, 0.5),
(3, 1, 1, 0.5, 0.5),
(3, 3, 1, 0.5, 0.5),
(3, 3, 3, 0.5, 0.5),
])
# yapf: enable
def test_output_shapes(self, time_step, input_dim, output_dim, hidden_init,
cell_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
_step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='input')
_output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('LSTM'):
self.lstm = lstm(
all_input_var=_input_var,
name='lstm',
lstm_cell=self.lstm_cell,
step_input_var=_step_input_var,
step_hidden_var=self._step_hidden_var,
step_cell_var=self._step_cell_var,
hidden_state_init=tf.constant_initializer(hidden_init),
cell_state_init=tf.constant_initializer(cell_init),
output_nonlinearity_layer=_output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the lstm cell
outputs_t, output_t, h_t, c_t, hidden_init, cell_init = self.lstm
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
cell = np.full((self.batch_size, self.hidden_dim), cell_init.eval())
for _ in range(time_step):
output, hidden, cell = self.sess.run(
[output_t, h_t, c_t],
feed_dict={
_step_input_var: obs_input,
self._step_hidden_var: hidden,
self._step_cell_var: cell
})
assert output.shape == (self.batch_size, output_dim)
assert hidden.shape == (self.batch_size, self.hidden_dim)
assert cell.shape == (self.batch_size, self.hidden_dim)
full_output = self.sess.run(outputs_t,
feed_dict={_input_var: obs_inputs})
assert full_output.shape == (self.batch_size, time_step, output_dim)
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init, cell_init', [
(1, 1, 1, 0, 0), # noqa: E122
(1, 1, 3, 0, 0),
(1, 3, 1, 0, 0),
(3, 1, 1, 0, 0),
(3, 3, 1, 0, 0),
(3, 3, 3, 0, 0),
(1, 1, 1, 0.5, 0.5),
(1, 1, 3, 0.5, 0.5),
(1, 3, 1, 0.5, 0.5),
(3, 1, 1, 0.5, 0.5),
(3, 3, 1, 0.5, 0.5),
(3, 3, 3, 0.5, 0.5),
])
# yapf: enable
def test_output_value(self, time_step, input_dim, output_dim, hidden_init,
cell_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
_step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='input')
_output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('LSTM'):
self.lstm = lstm(
all_input_var=_input_var,
name='lstm',
lstm_cell=self.lstm_cell,
step_input_var=_step_input_var,
step_hidden_var=self._step_hidden_var,
step_cell_var=self._step_cell_var,
hidden_state_init=tf.constant_initializer(hidden_init),
cell_state_init=tf.constant_initializer(cell_init),
output_nonlinearity_layer=_output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the lstm cell
outputs_t, output_t, h_t, c_t, hidden_init, cell_init = self.lstm
hidden1 = hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
cell1 = cell2 = np.full((self.batch_size, self.hidden_dim),
cell_init.eval())
for i in range(time_step):
output1, hidden1, cell1 = self.sess.run(
[output_t, h_t, c_t],
feed_dict={
_step_input_var: obs_input,
self._step_hidden_var: hidden1,
self._step_cell_var: cell1
})
hidden2, cell2 = recurrent_step_lstm(
input_val=obs_input,
num_units=self.hidden_dim,
step_hidden=hidden2,
step_cell=cell2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. / (1. + np.exp(-x)))
output_nonlinearity = np.full(
(np.prod(hidden2.shape[1:]), output_dim), 1.)
output2 = np.matmul(hidden2, output_nonlinearity)
assert np.allclose(output1, output2)
assert np.allclose(hidden1, hidden2)
assert np.allclose(cell1, cell2)
full_output1 = self.sess.run(outputs_t,
feed_dict={_input_var: obs_inputs})
hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
cell2 = np.full((self.batch_size, self.hidden_dim), cell_init.eval())
stack_hidden = None
for i in range(time_step):
hidden2, cell2 = recurrent_step_lstm(
input_val=obs_inputs[:, i, :],
num_units=self.hidden_dim,
step_hidden=hidden2,
step_cell=cell2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. / (1. + np.exp(-x)))
if stack_hidden is None:
stack_hidden = hidden2[:, np.newaxis, :]
else:
stack_hidden = np.concatenate(
(stack_hidden, hidden2[:, np.newaxis, :]), axis=1)
output_nonlinearity = np.full((np.prod(hidden2.shape[1:]), output_dim),
1.)
full_output2 = np.matmul(stack_hidden, output_nonlinearity)
assert np.allclose(full_output1, full_output2)
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim', [
(1, 1, 1),
(1, 1, 3),
(1, 3, 1),
(3, 1, 1),
(3, 3, 1),
(3, 3, 3),
])
# yapf: enable
def test_output_value_trainable_hidden_and_cell(self, time_step, input_dim,
output_dim):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
_step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='input')
_output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('LSTM'):
self.lstm = lstm(all_input_var=_input_var,
name='lstm',
lstm_cell=self.lstm_cell,
step_input_var=_step_input_var,
step_hidden_var=self._step_hidden_var,
step_cell_var=self._step_cell_var,
hidden_state_init_trainable=True,
cell_state_init_trainable=True,
output_nonlinearity_layer=_output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the lstm cell
outputs_t, _, h_t, c_t, hidden_init, cell_init = self.lstm
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
cell = np.full((self.batch_size, self.hidden_dim), cell_init.eval())
hidden, cell = self.sess.run(
[h_t, c_t],
feed_dict={
_step_input_var: obs_input,
self._step_hidden_var: hidden,
self._step_cell_var: cell
})
with tf.compat.v1.variable_scope('LSTM/lstm', reuse=True):
hidden_init_var = tf.compat.v1.get_variable(name='initial_hidden')
cell_init_var = tf.compat.v1.get_variable(name='initial_cell')
assert hidden_init_var in tf.compat.v1.trainable_variables()
assert cell_init_var in tf.compat.v1.trainable_variables()
full_output1 = self.sess.run(outputs_t,
feed_dict={_input_var: obs_inputs})
hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
cell2 = np.full((self.batch_size, self.hidden_dim), cell_init.eval())
stack_hidden = None
for i in range(time_step):
hidden2, cell2 = recurrent_step_lstm(
input_val=obs_inputs[:, i, :],
num_units=self.hidden_dim,
step_hidden=hidden2,
step_cell=cell2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. / (1. + np.exp(-x)))
if stack_hidden is None:
stack_hidden = hidden2[:, np.newaxis, :]
else:
stack_hidden = np.concatenate(
(stack_hidden, hidden2[:, np.newaxis, :]), axis=1)
output_nonlinearity = np.full((np.prod(hidden2.shape[1:]), output_dim),
1.)
full_output2 = np.matmul(stack_hidden, output_nonlinearity)
assert np.allclose(full_output1, full_output2)
def test_gradient_paths(self):
time_step = 3
input_dim = 2
output_dim = 4
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
_step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='input')
_output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('LSTM'):
self.lstm = lstm(all_input_var=_input_var,
name='lstm',
lstm_cell=self.lstm_cell,
step_input_var=_step_input_var,
step_hidden_var=self._step_hidden_var,
step_cell_var=self._step_cell_var,
output_nonlinearity_layer=_output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the lstm cell
outputs_t, output_t, h_t, c_t, hidden_init, cell_init = self.lstm
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
cell = np.full((self.batch_size, self.hidden_dim), cell_init.eval())
grads_step_o_i = tf.gradients(output_t, _step_input_var)
grads_step_o_h = tf.gradients(output_t, self._step_hidden_var)
grads_step_o_c = tf.gradients(output_t, self._step_cell_var)
grads_step_h = tf.gradients(h_t, _step_input_var)
grads_step_c = tf.gradients(c_t, _step_input_var)
self.sess.run(
[
grads_step_o_i, grads_step_o_h, grads_step_o_c, grads_step_h,
grads_step_c
],
feed_dict={
_step_input_var: obs_input,
self._step_hidden_var: hidden,
self._step_cell_var: cell
})
grads_full = tf.gradients(outputs_t, _input_var)
self.sess.run(grads_full, feed_dict={_input_var: obs_inputs})
grads_step_o_i = tf.gradients(outputs_t, _step_input_var)
grads_step_o_h = tf.gradients(outputs_t, self._step_hidden_var)
grads_step_o_c = tf.gradients(outputs_t, self._step_cell_var)
grads_step_h = tf.gradients(h_t, _input_var)
grads_step_c = tf.gradients(c_t, _input_var)
# No gradient flow
with pytest.raises(TypeError):
self.sess.run(grads_step_o_i,
feed_dict={
_step_input_var: obs_input,
self._step_hidden_var: hidden,
self._step_cell_var: cell
})
with pytest.raises(TypeError):
self.sess.run(grads_step_o_h,
feed_dict={
_step_input_var: obs_input,
self._step_hidden_var: hidden,
self._step_cell_var: cell
})
with pytest.raises(TypeError):
self.sess.run(grads_step_o_c,
feed_dict={
_step_input_var: obs_input,
self._step_hidden_var: hidden,
self._step_cell_var: cell
})
with pytest.raises(TypeError):
self.sess.run(grads_step_h, feed_dict={_input_var: obs_inputs})
with pytest.raises(TypeError):
self.sess.run(grads_step_c, feed_dict={_input_var: obs_inputs})
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init, cell_init', [
(1, 1, 1, 0, 0), # noqa: E122
(1, 1, 3, 0, 0),
(1, 3, 1, 0, 0),
(3, 1, 1, 0, 0),
(3, 3, 1, 0, 0),
(3, 3, 3, 0, 0),
(1, 1, 1, 0.5, 0.5),
(1, 1, 3, 0.5, 0.5),
(1, 3, 1, 0.5, 0.5),
(3, 1, 1, 0.5, 0.5),
(3, 3, 1, 0.5, 0.5),
(3, 3, 3, 0.5, 0.5),
])
# yapf: enable
def test_output_same_as_rnn(self, time_step, input_dim, output_dim,
hidden_init, cell_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
_step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='input')
_output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('LSTM'):
self.lstm = lstm(
all_input_var=_input_var,
name='lstm',
lstm_cell=self.lstm_cell,
step_input_var=_step_input_var,
step_hidden_var=self._step_hidden_var,
step_cell_var=self._step_cell_var,
hidden_state_init=tf.constant_initializer(hidden_init),
cell_state_init=tf.constant_initializer(cell_init),
output_nonlinearity_layer=_output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Create a RNN and compute the entire outputs
rnn_layer = tf.keras.layers.RNN(cell=self.lstm_cell,
return_sequences=True,
return_state=True)
# Set initial state to all 0s
hidden_var = tf.compat.v1.get_variable(
name='initial_hidden',
shape=(self.batch_size, self.hidden_dim),
initializer=tf.constant_initializer(hidden_init),
trainable=False,
dtype=tf.float32)
cell_var = tf.compat.v1.get_variable(
name='initial_cell',
shape=(self.batch_size, self.hidden_dim),
initializer=tf.constant_initializer(cell_init),
trainable=False,
dtype=tf.float32)
outputs, hiddens, cells = rnn_layer(
_input_var, initial_state=[hidden_var, cell_var])
outputs = _output_nonlinearity(outputs)
self.sess.run(tf.compat.v1.global_variables_initializer())
outputs, hiddens, cells = self.sess.run(
[outputs, hiddens, cells], feed_dict={_input_var: obs_inputs})
# Compute output by doing t step() on the lstm cell
hidden = np.full((self.batch_size, self.hidden_dim), hidden_init)
cell = np.full((self.batch_size, self.hidden_dim), cell_init)
_, output_t, hidden_t, cell_t, _, _ = self.lstm
for i in range(time_step):
output, hidden, cell = self.sess.run(
[output_t, hidden_t, cell_t],
feed_dict={
_step_input_var: obs_input,
self._step_hidden_var: hidden,
self._step_cell_var: cell
})
# The output from i-th timestep
assert np.array_equal(output, outputs[:, i, :])
assert np.array_equal(hidden, hiddens)
assert np.array_equal(cell, cells)
# Also the full output from lstm
full_outputs = self.sess.run(self.lstm[0],
feed_dict={_input_var: obs_inputs})
assert np.array_equal(outputs, full_outputs)
| 43.374477
| 79
| 0.526504
|
aaf88cb5cfdfdaf26657d75bb059665258b8930f
| 1,949
|
py
|
Python
|
omas/examples/connect_gkdb.py
|
gafusion/omas
|
8e9b725483655db0ccbf9a4f7aa9eba7c6c04864
|
[
"MIT"
] | 20
|
2017-11-07T14:36:21.000Z
|
2021-03-27T19:14:17.000Z
|
omas/examples/connect_gkdb.py
|
Reksoatr/omas
|
6740fd040d6af59e0aec54f977637b221733bd07
|
[
"MIT"
] | 170
|
2017-11-09T06:40:11.000Z
|
2022-03-29T17:33:26.000Z
|
omas/examples/connect_gkdb.py
|
Reksoatr/omas
|
6740fd040d6af59e0aec54f977637b221733bd07
|
[
"MIT"
] | 6
|
2017-11-22T14:44:54.000Z
|
2022-01-10T19:52:47.000Z
|
#!/usr/bin/env python
# # -*- coding: utf-8 -*-
"""
Interface with GKDB
===================
Use OMAS to interface with Gyro-Kinetic DataBase (GKDB) https://gitlab.com/gkdb/gkdb
GKDB is a publicly accessible database of delta-f flux-tube gyro-kinetic simulations of tokamak plasmas
which stores its data according to the `gyrokinetic` IMAS IDS https://gafusion.github.io/omas/schema/schema_gyrokinetics.html
"""
from omas import ODS, omas_dir, omas_testdir
from pprint import pprint
import sys
# load a sample GKDB sample json file
sample_filename = omas_dir + 'samples/gkdb_linear_eigenvalue.json'
ods = ODS()
# warn about `gyrokinetics.fluxes_integrated_norm = []` and drop it
ods['gyrokinetics'].load(sample_filename, consistency_check='warn_drop')
# show content
pprint(ods.pretty_paths())
# save a copy
try:
__file__
except NameError:
import inspect
__file__ = inspect.getfile(lambda: None)
filename = omas_testdir(__file__) + '/gkdb_linear_initialvalue.json'
ods['gyrokinetics'].save(filename)
# load the newly saved copy
ods1 = ODS()
ods1['gyrokinetics'].load(filename)
# look for differences between original GKDB json and OMAS json
differences = ods.diff(ods1, ignore_type=True)
if not differences:
print('\nPrint no differences found: save/load of GKDB json file worked\n')
else:
pprint(differences)
raise RuntimeError('Save/Load of GKDB on json file failed')
# raise error if trying to run GKDB under Python2x
try:
import gkdb.core.model
except ImportError as _excp:
print('Could not import gkdb library: %s' % repr(_excp))
else:
# Check that GKDB file written by OMAS is valid also according to GKDB
if gkdb.core.ids_checks.check_json(filename, only_input=False):
print('json file saved via OMAS is valid for gkdb')
# This requires an account on the GKDB server
if False:
gkdb.core.model.connect_to_gkdb()
gkdb.core.model.Ids_properties.from_json(filename)
| 32.483333
| 125
| 0.741919
|
b2644caed688a96ba4da4820bf5c6f16f8e1c8fa
| 384
|
py
|
Python
|
ngo_cms/wsgi.py
|
MicroPyramid/ngo-cms
|
5f0baf69ce646ab6b895d3ae2f49b782630c9959
|
[
"MIT"
] | 5
|
2019-08-12T17:56:25.000Z
|
2021-08-31T04:36:42.000Z
|
ngo_cms/wsgi.py
|
MicroPyramid/ngo-cms
|
5f0baf69ce646ab6b895d3ae2f49b782630c9959
|
[
"MIT"
] | 12
|
2020-02-12T00:38:11.000Z
|
2022-03-11T23:50:12.000Z
|
ngo_cms/wsgi.py
|
MicroPyramid/ngo-cms
|
5f0baf69ce646ab6b895d3ae2f49b782630c9959
|
[
"MIT"
] | 8
|
2019-06-19T18:54:02.000Z
|
2021-01-05T19:31:30.000Z
|
"""
WSGI config for blog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cjws.settings")
application = get_wsgi_application()
| 24
| 78
| 0.783854
|
16377b1d054031285830631099162d4c9d1a63c7
| 952
|
py
|
Python
|
43_Hammurabi/python/test_hamurabi.py
|
Omega7379/basic-computer-games
|
37033c40ebd267dc56de3db9813a23940445142f
|
[
"Unlicense"
] | 1
|
2022-03-24T17:56:31.000Z
|
2022-03-24T17:56:31.000Z
|
43_Hammurabi/python/test_hamurabi.py
|
Omega7379/basic-computer-games
|
37033c40ebd267dc56de3db9813a23940445142f
|
[
"Unlicense"
] | 1
|
2022-03-24T20:16:26.000Z
|
2022-03-24T20:16:26.000Z
|
43_Hammurabi/python/test_hamurabi.py
|
Omega7379/basic-computer-games
|
37033c40ebd267dc56de3db9813a23940445142f
|
[
"Unlicense"
] | 1
|
2022-03-11T14:14:06.000Z
|
2022-03-11T14:14:06.000Z
|
import io
import hamurabi
def test_main(monkeypatch, capsys):
monkeypatch.setattr("sys.stdin", io.StringIO("100\n100\n100"))
hamurabi.main()
captured = capsys.readouterr()
actual_lines = captured.out.splitlines()
expected_lines = [
"HAMURABI", # 0
"CREATIVE COMPUTING MORRISTOWN, NEW JERSEY", # 1
"", # 2
"", # 3
"", # 4
"", # 5
"TRY YOUR HAND AT GOVERNING ANCIENT SUMERIA", # 6
"FOR A TEN-YEAR TERM OF OFFICE.", # 7
"", # 8
"", # 9
"", # 10
"", # 11
"HAMURABI: I BEG TO REPORT TO YOU\n", # 12
"IN YEAR 1 , 0 PEOPLE STARVED, 5 CAME TO THE CITY,\n", # 13
"POPULATION IS NOW 100\n", # 14
"THE CITY NOW OWNS 1000.0 ACRES.", # 15
]
for i, (actual, expected) in enumerate(zip(actual_lines, expected_lines)):
assert actual.strip() == expected.strip(), f"Line {i} is wrong"
| 30.709677
| 78
| 0.537815
|
cce7a09d2d557d9ad28ef8f69a29b750fe3e70e5
| 3,868
|
py
|
Python
|
pymc3/tuning/scaling.py
|
cowirihy/pymc3
|
f0b95773047af12f3c0ded04d707f02ddc4d4f6b
|
[
"Apache-2.0"
] | 1
|
2020-09-30T06:26:53.000Z
|
2020-09-30T06:26:53.000Z
|
pymc3/tuning/scaling.py
|
cowirihy/pymc3
|
f0b95773047af12f3c0ded04d707f02ddc4d4f6b
|
[
"Apache-2.0"
] | null | null | null |
pymc3/tuning/scaling.py
|
cowirihy/pymc3
|
f0b95773047af12f3c0ded04d707f02ddc4d4f6b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numpy import exp, log, sqrt
from ..model import modelcontext, Point
from ..theanof import hessian_diag, inputvars
from ..blocking import DictToArrayBijection, ArrayOrdering
from ..util import get_var_name
__all__ = ['find_hessian', 'trace_cov', 'guess_scaling']
def fixed_hessian(point, vars=None, model=None):
"""
Returns a fixed Hessian for any chain location.
Parameters
----------
model: Model (optional if in `with` context)
point: dict
vars: list
Variables for which Hessian is to be calculated.
"""
model = modelcontext(model)
if vars is None:
vars = model.cont_vars
vars = inputvars(vars)
point = Point(point, model=model)
bij = DictToArrayBijection(ArrayOrdering(vars), point)
rval = np.ones(bij.map(point).size) / 10
return rval
def find_hessian(point, vars=None, model=None):
"""
Returns Hessian of logp at the point passed.
Parameters
----------
model: Model (optional if in `with` context)
point: dict
vars: list
Variables for which Hessian is to be calculated.
"""
model = modelcontext(model)
H = model.fastd2logp(vars)
return H(Point(point, model=model))
def find_hessian_diag(point, vars=None, model=None):
"""
Returns Hessian of logp at the point passed.
Parameters
----------
model: Model (optional if in `with` context)
point: dict
vars: list
Variables for which Hessian is to be calculated.
"""
model = modelcontext(model)
H = model.fastfn(hessian_diag(model.logpt, vars))
return H(Point(point, model=model))
def guess_scaling(point, vars=None, model=None, scaling_bound=1e-8):
model = modelcontext(model)
try:
h = find_hessian_diag(point, vars, model=model)
except NotImplementedError:
h = fixed_hessian(point, vars, model=model)
return adjust_scaling(h, scaling_bound)
def adjust_scaling(s, scaling_bound):
if s.ndim < 2:
return adjust_precision(s, scaling_bound)
else:
val, vec = np.linalg.eigh(s)
val = adjust_precision(val, scaling_bound)
return eig_recompose(val, vec)
def adjust_precision(tau, scaling_bound=1e-8):
mag = sqrt(abs(tau))
bounded = bound(log(mag), log(scaling_bound), log(1./scaling_bound))
return exp(bounded)**2
def bound(a, l, u):
return np.maximum(np.minimum(a, u), l)
def eig_recompose(val, vec):
return vec.dot(np.diag(val)).dot(vec.T)
def trace_cov(trace, vars=None, model=None):
"""
Calculate the flattened covariance matrix using a sample trace
Useful if you want to base your covariance matrix for further sampling on some initial samples.
Parameters
----------
trace: Trace
vars: list
variables for which to calculate covariance matrix
Returns
-------
r: array (n,n)
covariance matrix
"""
model = modelcontext(model)
if model is not None:
vars = model.free_RVs
elif vars is None:
vars = trace.varnames
def flat_t(var):
x = trace[get_var_name(var)]
return x.reshape((x.shape[0], np.prod(x.shape[1:], dtype=int)))
return np.cov(np.concatenate(list(map(flat_t, vars)), 1).T)
| 27.048951
| 99
| 0.66727
|
48de49b7f548b12936c40b7822febcc1c38724d3
| 4,035
|
py
|
Python
|
influxdb_client/domain/lesser_threshold.py
|
MASIFAYUB/influxdb-client-python
|
a067fa5670a6fbc600db2ac4e54e29e1b7124998
|
[
"MIT"
] | null | null | null |
influxdb_client/domain/lesser_threshold.py
|
MASIFAYUB/influxdb-client-python
|
a067fa5670a6fbc600db2ac4e54e29e1b7124998
|
[
"MIT"
] | null | null | null |
influxdb_client/domain/lesser_threshold.py
|
MASIFAYUB/influxdb-client-python
|
a067fa5670a6fbc600db2ac4e54e29e1b7124998
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
InfluxDB OSS API Service.
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from influxdb_client.domain.threshold_base import ThresholdBase
class LesserThreshold(ThresholdBase):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'value': 'float',
'level': 'CheckStatusLevel',
'all_values': 'bool'
}
attribute_map = {
'type': 'type',
'value': 'value',
'level': 'level',
'all_values': 'allValues'
}
def __init__(self, type="lesser", value=None, level=None, all_values=None): # noqa: E501,D401,D403
"""LesserThreshold - a model defined in OpenAPI.""" # noqa: E501
ThresholdBase.__init__(self, level=level, all_values=all_values) # noqa: E501
self._type = None
self._value = None
self.discriminator = None
self.type = type
self.value = value
@property
def type(self):
"""Get the type of this LesserThreshold.
:return: The type of this LesserThreshold.
:rtype: str
""" # noqa: E501
return self._type
@type.setter
def type(self, type):
"""Set the type of this LesserThreshold.
:param type: The type of this LesserThreshold.
:type: str
""" # noqa: E501
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def value(self):
"""Get the value of this LesserThreshold.
:return: The value of this LesserThreshold.
:rtype: float
""" # noqa: E501
return self._value
@value.setter
def value(self, value):
"""Set the value of this LesserThreshold.
:param value: The value of this LesserThreshold.
:type: float
""" # noqa: E501
if value is None:
raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
self._value = value
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, LesserThreshold):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
| 28.415493
| 159
| 0.566543
|
d6ac06429b2d87afdbfce0c1ef279e76f7da24da
| 19,774
|
py
|
Python
|
code/datasets.py
|
1o0ko/StackGAN-v2
|
b3b94d0ddf2372b849d7bb79db78469a4cce77ca
|
[
"MIT"
] | null | null | null |
code/datasets.py
|
1o0ko/StackGAN-v2
|
b3b94d0ddf2372b849d7bb79db78469a4cce77ca
|
[
"MIT"
] | null | null | null |
code/datasets.py
|
1o0ko/StackGAN-v2
|
b3b94d0ddf2372b849d7bb79db78469a4cce77ca
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import os.path
import random
import six
import sys
from collections import Counter
import h5py
import numpy as np
import pandas as pd
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image
from miscc.config import cfg
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP']
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def transform_img(img, imsize, transform=None, normalize=None):
if transform is not None:
img = transform(img)
ret = []
for i in range(cfg.TREE.BRANCH_NUM):
if i < (cfg.TREE.BRANCH_NUM - 1):
re_img = transforms.Scale(imsize[i])(img)
else:
re_img = img
ret.append(normalize(re_img))
return ret
def get_imgs(img_path, imsize, bbox=None,
transform=None, normalize=None):
img = Image.open(img_path).convert('RGB')
width, height = img.size
if bbox is not None:
r = int(np.maximum(bbox[2], bbox[3]) * 0.75)
center_x = int((2 * bbox[0] + bbox[2]) / 2)
center_y = int((2 * bbox[1] + bbox[3]) / 2)
y1 = np.maximum(0, center_y - r)
y2 = np.minimum(height, center_y + r)
x1 = np.maximum(0, center_x - r)
x2 = np.minimum(width, center_x + r)
img = img.crop([x1, y1, x2, y2])
if transform is not None:
img = transform(img)
ret = []
for i in range(cfg.TREE.BRANCH_NUM):
if i < (cfg.TREE.BRANCH_NUM - 1):
re_img = transforms.Scale(imsize[i])(img)
else:
re_img = img
ret.append(normalize(re_img))
return ret
class ImageFolder(data.Dataset):
def __init__(self, root, split_dir='train', custom_classes=None,
base_size=64, transform=None, target_transform=None):
root = os.path.join(root, split_dir)
classes, class_to_idx = self.find_classes(root, custom_classes)
imgs = self.make_dataset(classes, class_to_idx)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.classes = classes
self.num_classes = len(classes)
self.class_to_idx = class_to_idx
self.transform = transform
self.target_transform = target_transform
self.norm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
self.imsize = []
for i in range(cfg.TREE.BRANCH_NUM):
self.imsize.append(base_size)
base_size = base_size * 2
print('num_classes', self.num_classes)
def find_classes(self, dir, custom_classes):
classes = []
for d in os.listdir(dir):
if os.path.isdir:
if custom_classes is None or d in custom_classes:
classes.append(os.path.join(dir, d))
print('Valid classes: ', len(classes), classes)
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def make_dataset(self, classes, class_to_idx):
images = []
for d in classes:
for root, _, fnames in sorted(os.walk(d)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
item = (path, class_to_idx[d])
images.append(item)
print('The number of images: ', len(images))
return images
def __getitem__(self, index):
path, target = self.imgs[index]
imgs_list = get_imgs(path, self.imsize,
transform=self.transform,
normalize=self.norm)
return imgs_list
def __len__(self):
return len(self.imgs)
class LSUNClass(data.Dataset):
def __init__(self, db_path, base_size=64,
transform=None, target_transform=None):
import lmdb
self.db_path = db_path
self.env = lmdb.open(db_path, max_readers=1, readonly=True, lock=False,
readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
self.length = txn.stat()['entries']
print('length: ', self.length)
cache_file = db_path + '/cache'
if os.path.isfile(cache_file):
self.keys = pickle.load(open(cache_file, "rb"))
print('Load:', cache_file, 'keys: ', len(self.keys))
else:
with self.env.begin(write=False) as txn:
self.keys = [key for key, _ in txn.cursor()]
pickle.dump(self.keys, open(cache_file, "wb"))
self.imsize = []
for i in range(cfg.TREE.BRANCH_NUM):
self.imsize.append(base_size)
base_size = base_size * 2
self.transform = transform
self.target_transform = target_transform
self.norm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
def __getitem__(self, index):
env = self.env
with env.begin(write=False) as txn:
imgbuf = txn.get(self.keys[index])
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
imgs = get_imgs(buf, self.imsize,
transform=self.transform,
normalize=self.norm)
return imgs
def __len__(self):
return self.length
def __repr__(self):
return self.__class__.__name__ + ' (' + self.db_path + ')'
class TextDataset(data.Dataset):
def __init__(self, data_dir, split='train', embedding_type='cnn-rnn',
base_size=64, transform=None, target_transform=None):
self.transform = transform
self.target_transform = target_transform
self.norm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
self.imsize = []
for i in range(cfg.TREE.BRANCH_NUM):
self.imsize.append(base_size)
base_size = base_size * 2
self.data = []
self.data_dir = data_dir
if data_dir.find('birds') != -1:
self.bbox = self.load_bbox()
else:
self.bbox = None
split_dir = os.path.join(data_dir, split)
self.filenames = self.load_filenames(split_dir)
self.embeddings = self.load_embedding(split_dir, embedding_type)
self.class_id = self.load_class_id(split_dir, len(self.filenames))
self.captions = self.load_all_captions()
if cfg.TRAIN.FLAG:
self.iterator = self.prepair_training_pairs
else:
self.iterator = self.prepair_test_pairs
def load_bbox(self):
data_dir = self.data_dir
bbox_path = os.path.join(data_dir, 'CUB_200_2011/bounding_boxes.txt')
df_bounding_boxes = pd.read_csv(bbox_path,
delim_whitespace=True,
header=None).astype(int)
#
filepath = os.path.join(data_dir, 'CUB_200_2011/images.txt')
df_filenames = \
pd.read_csv(filepath, delim_whitespace=True, header=None)
filenames = df_filenames[1].tolist()
print('Total filenames: ', len(filenames), filenames[0])
#
filename_bbox = {img_file[:-4]: [] for img_file in filenames}
numImgs = len(filenames)
for i in xrange(0, numImgs):
# bbox = [x-left, y-top, width, height]
bbox = df_bounding_boxes.iloc[i][1:].tolist()
key = filenames[i][:-4]
filename_bbox[key] = bbox
#
return filename_bbox
def load_all_captions(self):
def load_captions(caption_name): # self,
cap_path = caption_name
with open(cap_path, "r") as f:
captions = f.read().decode('utf8').split('\n')
captions = [cap.replace("\ufffd\ufffd", " ")
for cap in captions if len(cap) > 0]
return captions
caption_dict = {}
for key in self.filenames:
caption_name = '%s/text/%s.txt' % (self.data_dir, key)
captions = load_captions(caption_name)
caption_dict[key] = captions
return caption_dict
def load_embedding(self, data_dir, embedding_type):
if embedding_type == 'cnn-rnn':
embedding_filename = '/char-CNN-RNN-embeddings.pickle'
elif embedding_type == 'cnn-gru':
embedding_filename = '/char-CNN-GRU-embeddings.pickle'
elif embedding_type == 'skip-thought':
embedding_filename = '/skip-thought-embeddings.pickle'
with open(data_dir + embedding_filename, 'rb') as f:
embeddings = pickle.load(f)
embeddings = np.array(embeddings)
# embedding_shape = [embeddings.shape[-1]]
print('embeddings: ', embeddings.shape)
return embeddings
def load_class_id(self, data_dir, total_num):
if os.path.isfile(data_dir + '/class_info.pickle'):
with open(data_dir + '/class_info.pickle', 'rb') as f:
class_id = pickle.load(f)
else:
class_id = np.arange(total_num)
return class_id
def load_filenames(self, data_dir):
filepath = os.path.join(data_dir, 'filenames.pickle')
with open(filepath, 'rb') as f:
filenames = pickle.load(f)
print('Load filenames from: %s (%d)' % (filepath, len(filenames)))
return filenames
def prepair_training_pairs(self, index):
key = self.filenames[index]
if self.bbox is not None:
bbox = self.bbox[key]
data_dir = '%s/CUB_200_2011' % self.data_dir
else:
bbox = None
data_dir = self.data_dir
# captions = self.captions[key]
embeddings = self.embeddings[index, :, :]
img_name = '%s/images/%s.jpg' % (data_dir, key)
imgs = get_imgs(img_name, self.imsize,
bbox, self.transform, normalize=self.norm)
wrong_ix = random.randint(0, len(self.filenames) - 1)
if(self.class_id[index] == self.class_id[wrong_ix]):
wrong_ix = random.randint(0, len(self.filenames) - 1)
wrong_key = self.filenames[wrong_ix]
if self.bbox is not None:
wrong_bbox = self.bbox[wrong_key]
else:
wrong_bbox = None
wrong_img_name = '%s/images/%s.jpg' % \
(data_dir, wrong_key)
wrong_imgs = get_imgs(wrong_img_name, self.imsize,
wrong_bbox, self.transform, normalize=self.norm)
embedding_ix = random.randint(0, embeddings.shape[0] - 1)
embedding = embeddings[embedding_ix, :]
if self.target_transform is not None:
embedding = self.target_transform(embedding)
return imgs, wrong_imgs, embedding, key # captions
def prepair_test_pairs(self, index):
key = self.filenames[index]
if self.bbox is not None:
bbox = self.bbox[key]
data_dir = '%s/CUB_200_2011' % self.data_dir
else:
bbox = None
data_dir = self.data_dir
# captions = self.captions[key]
embeddings = self.embeddings[index, :, :]
img_name = '%s/images/%s.jpg' % (data_dir, key)
imgs = get_imgs(img_name, self.imsize,
bbox, self.transform, normalize=self.norm)
if self.target_transform is not None:
embeddings = self.target_transform(embeddings)
return imgs, embeddings, key # captions
def __getitem__(self, index):
return self.iterator(index)
def __len__(self):
return len(self.filenames)
class SplitType:
train = 'train'
valid = 'validation'
baby = 'baby'
class Dictionary(object):
''' Holds info about vocabulary '''
def __init__(self, path):
print(path)
self.word2idx = {'UNK': 0, '<eos>': 1, '<pad>': 2}
self.idx2word = ['UNK', '<eos>', '<pad>']
self.counter = Counter()
self.total = len(self.idx2word)
with open(path, 'r') as vocab:
for i, line in enumerate(vocab.readlines()):
word = line.decode('latin1').strip().split('\t')[0]
self.add_word(word)
print("Loaded dictionary with %d words" % len(self.idx2word))
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
token_id = self.word2idx[word]
self.counter[token_id] += 1
self.total += 1
return self.word2idx[word]
def _word2id(self, word):
if word not in self.word2idx:
# print "It's unkown"
return self.word2idx['UNK']
return self.word2idx[word]
def words2ids(self, words):
ids = np.asarray(map(self._word2id, words))
return ids
def __len__(self):
return len(self.idx2word)
class SsenseDataset(data.Dataset):
def __init__(self, data_dir, vocab_path, max_len, dataset_name,
base_size=64, split_name='train', transform=None):
self.category2idx = {
'POCKET SQUARES & TIE BARS': 38, 'WALLETS & CARD HOLDERS': 48, 'FINE JEWELRY': 19, 'JACKETS & COATS': 5,
'HATS': 10, 'TOPS': 0, 'SOCKS': 39, 'SHOULDER BAGS': 21, 'LOAFERS': 37, 'SHIRTS': 1, 'TIES': 8,
'BRIEFCASES': 40, 'BELTS & SUSPENDERS': 14, 'TOTE BAGS': 27, 'TRAVEL BAGS': 47,
'DUFFLE & TOP HANDLE BAGS': 32, 'BAG ACCESSORIES': 46, 'KEYCHAINS': 26,
'DUFFLE BAGS': 45, 'SNEAKERS': 17, 'PANTS': 3, 'SWEATERS': 4,
'JEWELRY': 23, 'SHORTS': 2, 'ESPADRILLES': 43, 'MESSENGER BAGS': 44,
'EYEWEAR': 31, 'HEELS': 41, 'MONKSTRAPS': 36, 'MESSENGER BAGS & SATCHELS': 42,
'FLATS': 33, 'BLANKETS': 22, 'POUCHES & DOCUMENT HOLDERS': 29,
'DRESSES': 11, 'JUMPSUITS': 13, 'UNDERWEAR & LOUNGEWEAR': 25,
'BOAT SHOES & MOCCASINS': 28, 'CLUTCHES & POUCHES': 20, 'JEANS': 6,
'SWIMWEAR': 12, 'SUITS & BLAZERS': 7, 'LINGERIE': 16, 'GLOVES': 18, 'BOOTS': 34,
'LACE UPS': 35, 'SCARVES': 15, 'SANDALS': 30, 'BACKPACKS': 24, 'SKIRTS': 9
}
self.max_desc_length = max_len
self.dictionary = Dictionary(vocab_path)
self.split_name = split_name
self.transform = transform
self.norm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
self.imsize = []
for i in range(cfg.TREE.BRANCH_NUM):
self.imsize.append(base_size)
base_size = base_size * 2
self.data_size = 0
self.dataset_name = dataset_name
split_dir = os.path.join(data_dir, self.split_name)
print("Split Dir: %s" % split_dir)
self.images = self.load_h5_images(split_dir)
self.categories = self.load_categories(split_dir)
self.descriptions = self.load_descriptions(split_dir)
if cfg.TRAIN.FLAG:
self.iterator = self.prepair_training_pairs
else:
self.iterator = self.prepair_test_pairs
def pad_sequence(self, seq):
eos_id = self.dictionary.word2idx['<eos>']
pad_id = self.dictionary.word2idx['<pad>']
if len(seq) < self.max_desc_length:
seq = np.concatenate([seq, [eos_id], [pad_id] * (self.max_desc_length - len(seq) - 1)])
# seq = np.concatenate([seq, [eos_id] * (self.max_desc_length - len(seq))])
return seq
elif len(seq) >= self.max_desc_length:
seq = np.concatenate([seq[:self.max_desc_length - 1], [eos_id]])
return seq
def load_descriptions(self, data_dir):
filename = '%s_%s.h5' % (self.dataset_name, self.split_name)
print("Loading descriptions file from %s" % filename)
with h5py.File(os.path.join(data_dir, filename)) as data_file:
descriptions = np.asarray(data_file['input_description'].value)
print('Loaded descriptions, shape: ', descriptions.shape)
return descriptions
def load_categories(self, data_dir):
filename = '%s_%s.h5' % (self.dataset_name, self.split_name)
print("Loading Categories file from %s" % filename)
with h5py.File(os.path.join(data_dir, filename)) as data_file:
categories = np.asarray(data_file['input_category'].value)
print('loaded Categories, shape: ', categories.shape)
return categories
def load_h5_images(self, data_dir):
filename = '%s_%s.h5' % (self.dataset_name, self.split_name)
print("Loading image file from %s" % filename)
with h5py.File(os.path.join(data_dir, filename)) as data_file:
images = np.asarray(data_file['input_image'].value)
print('loaded images, shape: ', images.shape)
self.data_size = images.shape[0]
return images
def old__getitem__(self, index):
img = self.images[index]
img = Image.fromarray(img.astype('uint8'), 'RGB').convert('RGB')
img = self.get_img(img)
desc = self.descriptions[index][0].decode('latin1')
desc_ids = self.dictionary.words2ids(desc.split())
desc_ids = self.pad_sequence(desc_ids)
desc_tensor = torch.from_numpy(desc_ids).type(torch.LongTensor)
img_tensor = img.type(torch.FloatTensor)
return img_tensor, desc_tensor, desc
def prepair_training_pairs(self, index):
img = self.images[index]
img = Image.fromarray(img.astype('uint8'), 'RGB').convert('RGB')
imgs = transform_img(img, self.imsize, self.transform, normalize=self.norm)
desc = self.descriptions[index][0].decode('latin1')
desc_ids = self.dictionary.words2ids(desc.split())
desc_ids = self.pad_sequence(desc_ids)
desc_tensor = torch.from_numpy(desc_ids).type(torch.LongTensor)
wrong_ix = random.randint(0, self.data_size - 1)
if(self.categories[index] == self.categories[wrong_ix]):
wrong_ix = random.randint(0, self.data_size - 1)
wrong_img = self.images[index]
wrong_img = Image.fromarray(wrong_img.astype('uint8'), 'RGB').convert('RGB')
wrong_imgs = transform_img(wrong_img, self.imsize, self.transform, normalize=self.norm)
return imgs, wrong_imgs, desc_tensor, desc # captions
def prepair_test_pairs(self, index):
img = self.images[index]
img = Image.fromarray(img.astype('uint8'), 'RGB').convert('RGB')
imgs = transform_img(img, self.imsize, self.transform, normalize=self.norm)
desc = self.descriptions[index][0].decode('latin1')
desc_ids = self.dictionary.words2ids(desc.split())
desc_ids = self.pad_sequence(desc_ids)
desc_tensor = torch.from_numpy(desc_ids).type(torch.LongTensor)
return imgs, desc_tensor, desc # captions
def __getitem__(self, index):
return self.iterator(index)
def __len__(self):
return self.data_size
| 36.754647
| 116
| 0.592697
|
5e88f31760bc1e31cfaa60f3a56835c9e826c9c5
| 6,764
|
py
|
Python
|
project/User/api/views.py
|
mrvafa/flask-user-jwt-api-token
|
80379e3e8eca8e78a82d31b3f851768181430931
|
[
"MIT"
] | null | null | null |
project/User/api/views.py
|
mrvafa/flask-user-jwt-api-token
|
80379e3e8eca8e78a82d31b3f851768181430931
|
[
"MIT"
] | null | null | null |
project/User/api/views.py
|
mrvafa/flask-user-jwt-api-token
|
80379e3e8eca8e78a82d31b3f851768181430931
|
[
"MIT"
] | null | null | null |
from flask import request, make_response, jsonify
from flask.views import MethodView
from werkzeug.routing import ValidationError
from project import db, bcrypt
from project.User.models import User
class RegisterAPI(MethodView):
def post(self):
post_data = request.get_json()
username = post_data.get('username') if post_data and 'username' in post_data else None
email = post_data.get('email') if post_data and 'email' in post_data else None
password = post_data.get('password') if post_data and 'password' in post_data else None
if not username or not email or not password:
response_object = {
'error': 'username, email and password is required',
}
return make_response(jsonify(response_object)), 400
user_username = User.query.filter_by(username=username).first()
user_email = User.query.filter_by(email=email).first()
if not user_username and not user_email:
try:
User.create(
username=username,
email=email,
password=password
)
response_object = {
'success': 'User has been created. now try to login.'
}
return make_response(jsonify(response_object)), 201
except ValidationError as e:
response_object = {
'error': f'{e}'
}
return make_response(jsonify(response_object)), 400
else:
response_object = {
'error': 'User already exists. Please Log in.',
}
return make_response(jsonify(response_object)), 400
class LoginAPI(MethodView):
def post(self):
post_data = request.get_json()
username = post_data.get('username') if post_data and 'username' in post_data else None
email = post_data.get('email') if post_data and 'email' in post_data else None
password = post_data.get('password') if post_data and 'password' in post_data else None
if not password or not (username or email):
response_object = {
'error': 'Password and email or username is required.',
}
return make_response(jsonify(response_object)), 400
if username and not email:
user = User.query.filter_by(
username=username
).first()
if user and bcrypt.check_password_hash(
user.password, password
):
token = user.encode_auth_token()
response_object = {
'token': token.decode()
}
return make_response(jsonify(response_object)), 200
if email and not username:
user = User.query.filter_by(
email=email
).first()
if user and bcrypt.check_password_hash(
user.password, password
):
response_object = {
'token': user.encode_auth_token().decode(),
}
return make_response(jsonify(response_object)), 200
if email and username:
user = User.query.filter_by(username=username).first()
if user and User.query.filter_by(email=email).first() == user:
if user and bcrypt.check_password_hash(
user.password, password
):
token = user.encode_auth_token()
response_object = {
'token': token.decode()
}
return make_response(jsonify(response_object)), 200
response_object = {
'error': 'combination of User/Password is wrong'
}
return make_response(jsonify(response_object)), 400
class UserProfileAPI(MethodView):
def get(self, user_id):
user = User.query.filter_by(id=user_id).first()
if user:
response_object = {
'user_id': user.id,
'username': user.username,
'email': user.email,
'registered_on': user.registered_on
}
return make_response(jsonify(response_object)), 200
return make_response(jsonify({"error": "page not found"})), 404
def put(self):
auth_header = request.headers.get('Authorization')
auth_token = ''
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
response_object = {
'error': 'Bearer token malformed.'
}
return make_response(jsonify(response_object)), 401
if auth_token:
resp = User.decode_auth_token(auth_token)
if not isinstance(resp, str):
user = User.query.filter_by(id=resp).first()
post_data = request.get_json()
username = post_data.get('username') if post_data and 'username' in post_data else None
email = post_data.get('email') if post_data and 'email' in post_data else None
errors = []
if email:
if (User.query.filter_by(email=email).first() and user.email == email) \
or not User.query.filter_by(email=email).first():
user.email = email
else:
errors.append('email has taken')
if username:
if (User.query.filter_by(username=username).first() and user.username == username) \
or not User.query.filter_by(username=username).first():
user.username = username
else:
errors.append('username has taken')
if errors:
return make_response(jsonify({"errors": errors})), 400
else:
db.session.commit()
response_object = {
'user_id': user.id,
'username': user.username,
'email': user.email,
'registered_on': user.registered_on
}
return make_response(jsonify(response_object)), 202
response_object = {
'error': resp
}
return make_response(jsonify(response_object)), 401
else:
response_object = {
'error': 'Provide a valid auth token.'
}
return make_response(jsonify(response_object)), 401
| 40.746988
| 104
| 0.532821
|
c754a0d82dd11c7998c134455e3d336ddc9ab702
| 1,516
|
py
|
Python
|
Section5/Part1/login_user.py
|
ochsec/Hands-On-Amazon-DynamoDB-for-Developers-V-
|
4700a372c2de017b015f947815fc555b862fc8a5
|
[
"MIT"
] | 5
|
2020-03-25T08:17:35.000Z
|
2021-10-04T04:52:59.000Z
|
Section5/Part1/login_user.py
|
ochsec/Hands-On-Amazon-DynamoDB-for-Developers-V-
|
4700a372c2de017b015f947815fc555b862fc8a5
|
[
"MIT"
] | 2
|
2021-03-25T22:35:27.000Z
|
2021-03-25T22:35:58.000Z
|
Section5/Part1/login_user.py
|
ochsec/Hands-On-Amazon-DynamoDB-for-Developers-V-
|
4700a372c2de017b015f947815fc555b862fc8a5
|
[
"MIT"
] | 5
|
2020-08-27T04:14:28.000Z
|
2022-01-27T20:40:36.000Z
|
import hashlib
import json
import os
import time
import traceback
import uuid
import boto3
from boto3.dynamodb.conditions import Key
dynamodb = boto3.resource('dynamodb')
table_name = os.environ['USER_TABLE']
table = dynamodb.Table(table_name)
def lambda_handler(event, context):
request_body = json.loads(event["body"])
user_id = event["pathParameters"]["userid"]
print("Loading user ID " + user_id)
user = __get_user_by_id(user_id)
print("User is " + str(user))
if user is None:
return {
"statusCode": 404,
}
request_password = request_body["password"]
encrypted_request_password = __encrypt_password(request_password)
user_password = user["password"]
if encrypted_request_password != user_password:
return {
"statusCode": 403,
}
return {
"statusCode": 200,
"body": json.dumps(
{
"User ID": user["user_id"],
"First Name": user["first_name"],
"Last Name": user["last_name"],
"Email Address": user["email_address"]
}
)
}
def __get_user_by_id(user_id):
response = table.query(
KeyConditionExpression=Key('user_id').eq(user_id)
)
if len(response['Items']) != 1:
return None
return response['Items'][0]
def __encrypt_password(candidate_password):
sha_signature = \
hashlib.sha256(candidate_password.encode()).hexdigest()
return sha_signature
| 23.6875
| 69
| 0.623351
|
79e2d324eb015069b500704118b53e1800a9701d
| 1,593
|
py
|
Python
|
PollingExample.py
|
Poohl/Gamepad
|
ca3884211e03b867138b6b7135d6fe4d4314af78
|
[
"MIT"
] | 50
|
2019-10-17T21:52:51.000Z
|
2022-03-31T20:20:39.000Z
|
PollingExample.py
|
Poohl/Gamepad
|
ca3884211e03b867138b6b7135d6fe4d4314af78
|
[
"MIT"
] | 6
|
2020-01-30T08:31:22.000Z
|
2022-03-21T09:42:44.000Z
|
PollingExample.py
|
Poohl/Gamepad
|
ca3884211e03b867138b6b7135d6fe4d4314af78
|
[
"MIT"
] | 20
|
2020-01-30T08:38:10.000Z
|
2022-02-16T14:59:09.000Z
|
#!/usr/bin/env python
# coding: utf-8
# Load the gamepad and time libraries
import Gamepad
import time
# Gamepad settings
gamepadType = Gamepad.PS4
buttonHappy = 'CROSS'
buttonBeep = 'CIRCLE'
buttonExit = 'PS'
joystickSpeed = 'LEFT-Y'
joystickSteering = 'RIGHT-X'
# Wait for a connection
if not Gamepad.available():
print('Please connect your gamepad...')
while not Gamepad.available():
time.sleep(1.0)
gamepad = gamepadType()
print('Gamepad connected')
# Set some initial state
speed = 0.0
steering = 0.0
# Handle joystick updates one at a time
while gamepad.isConnected():
# Wait for the next event
eventType, control, value = gamepad.getNextEvent()
# Determine the type
if eventType == 'BUTTON':
# Button changed
if control == buttonHappy:
# Happy button (event on press and release)
if value:
print(':)')
else:
print(':(')
elif control == buttonBeep:
# Beep button (event on press)
if value:
print('BEEP')
elif control == buttonExit:
# Exit button (event on press)
if value:
print('EXIT')
break
elif eventType == 'AXIS':
# Joystick changed
if control == joystickSpeed:
# Speed control (inverted)
speed = -value
elif control == joystickSteering:
# Steering control (not inverted)
steering = value
print('%+.1f %% speed, %+.1f %% steering' % (speed * 100, steering * 100))
| 26.55
| 82
| 0.57941
|
33dea30317028a208c0c06d9078f544879efe2f7
| 456
|
py
|
Python
|
config/variable/string.py
|
phirasit/TestcaseGenerator
|
443f320e927a606d9d64933b60591c67c83b6630
|
[
"MIT"
] | null | null | null |
config/variable/string.py
|
phirasit/TestcaseGenerator
|
443f320e927a606d9d64933b60591c67c83b6630
|
[
"MIT"
] | null | null | null |
config/variable/string.py
|
phirasit/TestcaseGenerator
|
443f320e927a606d9d64933b60591c67c83b6630
|
[
"MIT"
] | null | null | null |
from config.variable.char import Char
class String(Char):
def __init__(self, args, props):
super().__init__(args, props)
self.var = Char(args, props)
assert ('length' in props)
def generate(self):
self.var.props = self.props
return [self.var.generate() for _ in range(self.extract_ref(self.props['length']))]
def display(self, data):
return ''.join(map(lambda x: self.var.display(x), data))
| 26.823529
| 91
| 0.629386
|
3898ca002d106a1462fdc186606af308bc26ce0b
| 1,710
|
py
|
Python
|
Nduja/utility/pattern.py
|
herrBez/Nduja
|
51f93c6a8827ddf8605f88cf062d524b0ca5cebf
|
[
"BSD-3-Clause"
] | 2
|
2019-07-12T00:52:39.000Z
|
2020-02-13T17:09:07.000Z
|
Nduja/utility/pattern.py
|
herrBez/Nduja
|
51f93c6a8827ddf8605f88cf062d524b0ca5cebf
|
[
"BSD-3-Clause"
] | 2
|
2018-05-04T09:28:37.000Z
|
2019-11-09T13:37:00.000Z
|
Nduja/utility/pattern.py
|
herrBez/Nduja
|
51f93c6a8827ddf8605f88cf062d524b0ca5cebf
|
[
"BSD-3-Clause"
] | 2
|
2018-12-04T11:33:31.000Z
|
2021-09-07T20:13:52.000Z
|
import re
from typing import List, Tuple, Dict
class Pattern:
def __init__(self, format_object: Dict[str, str]) -> None:
self._pattern = re.compile(format_object["wallet_regexp"])
self._name = format_object["name"]
self._group = format_object["group"]
self._symbol = format_object["symbol"]
@property
def pattern(self):
return self._pattern
@pattern.setter
def pattern(self, value):
self._pattern = value
@property
def symbol(self) -> str: return self._symbol
@symbol.setter
def symbol(self, value: str):
self._symbol = value
@property
def name(self) -> str: return self._name
@name.setter
def name(self, value: str):
self._name = value
def __str__(self) -> str:
return self.symbol + " Pattern "
def match(self, content: str) -> List[Tuple[str, str]]:
matches_iterator = self.pattern.finditer(content)
matches = [(self.symbol, x.group()) for x in matches_iterator]
return matches
def match_email(text: str) -> List[str]:
"""Check if inside the text there is a list of emails"""
pattern = re.compile(
"\\b([a-zA-Z0-9_.]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)\\b")
emails = pattern.findall(text)
return emails
def match_personal_website(text: str) -> List[str]:
"""Check if inside the given text there is a list of websites"""
pattern = re.compile("\\b(https?://([^/\\s]+/?)*)\\b")
website_matches = pattern.findall(text)
# Filter out all results that links to license reference
website_matches = [w[0] for w in website_matches if
"license" not in w[0]]
return website_matches
| 28.5
| 70
| 0.621637
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.