hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e0473dcab1e2b2649982bf87416c4fae41e639fb
| 9,579
|
py
|
Python
|
tests/users/test_views.py
|
amanbansal2709/ctfd
|
941335a5e205ca818ce1758076858b628e4fa05b
|
[
"Apache-2.0"
] | null | null | null |
tests/users/test_views.py
|
amanbansal2709/ctfd
|
941335a5e205ca818ce1758076858b628e4fa05b
|
[
"Apache-2.0"
] | null | null | null |
tests/users/test_views.py
|
amanbansal2709/ctfd
|
941335a5e205ca818ce1758076858b628e4fa05b
|
[
"Apache-2.0"
] | 1
|
2021-12-23T14:11:15.000Z
|
2021-12-23T14:11:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from flask import url_for
from tests.helpers import create_ctfd, destroy_ctfd, register_user, \
login_as_user, gen_challenge, gen_file, gen_page
from CTFd.utils import set_config
from CTFd.utils.encoding import hexencode
from freezegun import freeze_time
def test_index():
"""Does the index page return a 200 by default"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.get('/')
assert r.status_code == 200
destroy_ctfd(app)
def test_page():
"""Test that users can access pages that are created in the database"""
app = create_ctfd()
with app.app_context():
gen_page(app.db, title="Title", route="this-is-a-route", content="This is some HTML")
with app.test_client() as client:
r = client.get('/this-is-a-route')
assert r.status_code == 200
destroy_ctfd(app)
def test_draft_pages():
"""Test that draft pages can't be seen"""
app = create_ctfd()
with app.app_context():
gen_page(app.db, title="Title", route="this-is-a-route", content="This is some HTML", draft=True)
with app.test_client() as client:
r = client.get('/this-is-a-route')
assert r.status_code == 404
register_user(app)
client = login_as_user(app)
r = client.get('/this-is-a-route')
assert r.status_code == 404
destroy_ctfd(app)
def test_page_requiring_auth():
"""Test that pages properly require authentication"""
app = create_ctfd()
with app.app_context():
gen_page(app.db, title="Title", route="this-is-a-route", content="This is some HTML", auth_required=True)
with app.test_client() as client:
r = client.get('/this-is-a-route')
assert r.status_code == 302
assert r.location == 'http://localhost/login?next=%2Fthis-is-a-route%3F'
register_user(app)
client = login_as_user(app)
r = client.get('/this-is-a-route')
assert r.status_code == 200
destroy_ctfd(app)
def test_not_found():
"""Should return a 404 for pages that are not found"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.get('/this-should-404')
assert r.status_code == 404
r = client.post('/this-should-404')
assert r.status_code == 404
destroy_ctfd(app)
def test_themes_handler():
"""Test that the themes handler is working properly"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.get('/themes/core/static/css/style.css')
assert r.status_code == 200
r = client.get('/themes/core/static/css/404_NOT_FOUND')
assert r.status_code == 404
r = client.get('/themes/core/static/%2e%2e/%2e%2e/%2e%2e/utils.py')
assert r.status_code == 404
r = client.get('/themes/core/static/%2e%2e%2f%2e%2e%2f%2e%2e%2futils.py')
assert r.status_code == 404
r = client.get('/themes/core/static/..%2f..%2f..%2futils.py')
assert r.status_code == 404
r = client.get('/themes/core/static/../../../utils.py')
assert r.status_code == 404
destroy_ctfd(app)
def test_pages_routing_and_rendering():
"""Test that pages are routing and rendering"""
app = create_ctfd()
with app.app_context():
html = '''##The quick brown fox jumped over the lazy dog'''
route = 'test'
title = 'Test'
gen_page(app.db, title, route, html)
with app.test_client() as client:
r = client.get('/test')
output = r.get_data(as_text=True)
assert "<h2>The quick brown fox jumped over the lazy dog</h2>" in output
destroy_ctfd(app)
def test_user_get_profile():
"""Can a registered user load their private profile (/profile)"""
app = create_ctfd()
with app.app_context():
register_user(app)
client = login_as_user(app)
r = client.get('/profile')
assert r.status_code == 200
destroy_ctfd(app)
def test_user_can_access_files():
app = create_ctfd()
with app.app_context():
from CTFd.utils.uploads import rmdir
chal = gen_challenge(app.db)
chal_id = chal.id
path = app.config.get('UPLOAD_FOLDER')
location = os.path.join(path, 'test_file_path', 'test.txt')
directory = os.path.dirname(location)
model_path = os.path.join('test_file_path', 'test.txt')
try:
os.makedirs(directory)
with open(location, 'wb') as obj:
obj.write('testing file load'.encode())
gen_file(app.db, location=model_path, challenge_id=chal_id)
url = url_for('views.files', path=model_path)
# Unauthed user should be able to see challenges if challenges are public
set_config('challenge_visibility', 'public')
with app.test_client() as client:
r = client.get(url)
assert r.status_code == 200
assert r.get_data(as_text=True) == 'testing file load'
# Unauthed user should not be able to see challenges if challenges are private
set_config('challenge_visibility', 'private')
with app.test_client() as client:
r = client.get(url)
assert r.status_code == 403
assert r.get_data(as_text=True) != 'testing file load'
# Authed user should be able to see files if challenges are private
register_user(app)
client = login_as_user(app)
r = client.get(url)
assert r.status_code == 200
assert r.get_data(as_text=True) == 'testing file load'
with freeze_time("2017-10-7"):
set_config('end', '1507262400') # Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST
for v in ('public', 'private'):
set_config('challenge_visibility', v)
# Unauthed users shouldn't be able to see files if the CTF hasn't started
client = app.test_client()
r = client.get(url)
assert r.status_code == 403
assert r.get_data(as_text=True) != 'testing file load'
# Authed users shouldn't be able to see files if the CTF hasn't started
client = login_as_user(app)
r = client.get(url)
assert r.status_code == 403
assert r.get_data(as_text=True) != 'testing file load'
# Admins should be able to see files if the CTF hasn't started
admin = login_as_user(app, "admin")
r = admin.get(url)
assert r.status_code == 200
assert r.get_data(as_text=True) == 'testing file load'
finally:
rmdir(directory)
destroy_ctfd(app)
def test_user_can_access_files_with_auth_token():
app = create_ctfd()
with app.app_context():
from CTFd.utils.uploads import rmdir
chal = gen_challenge(app.db)
chal_id = chal.id
path = app.config.get('UPLOAD_FOLDER')
md5hash = hexencode(os.urandom(16)).decode('utf-8')
location = os.path.join(path, md5hash, 'test.txt')
directory = os.path.dirname(location)
model_path = os.path.join(md5hash, 'test.txt')
try:
os.makedirs(directory)
with open(location, 'wb') as obj:
obj.write('testing file load'.encode())
gen_file(app.db, location=model_path, challenge_id=chal_id)
url = url_for('views.files', path=model_path)
register_user(app)
with login_as_user(app) as client:
req = client.get('/api/v1/challenges/1')
data = req.get_json()
file_url = data['data']['files'][0]
with app.test_client() as client:
r = client.get(url)
assert r.status_code == 403
assert r.get_data(as_text=True) != 'testing file load'
r = client.get(url_for('views.files', path=model_path, token="random_token_that_shouldnt_work"))
assert r.status_code == 403
assert r.get_data(as_text=True) != 'testing file load'
r = client.get(file_url)
assert r.status_code == 200
assert r.get_data(as_text=True) == 'testing file load'
# Unauthed users shouldn't be able to see files if the CTF is admins only
set_config('challenge_visibility', 'admins')
r = client.get(file_url)
assert r.status_code == 403
assert r.get_data(as_text=True) != 'testing file load'
set_config('challenge_visibility', 'private')
with freeze_time("2017-10-7"):
set_config('end', '1507262400') # Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST
# Unauthed users shouldn't be able to see files if the CTF hasn't started
r = client.get(file_url)
assert r.status_code == 403
assert r.get_data(as_text=True) != 'testing file load'
finally:
rmdir(directory)
destroy_ctfd(app)
| 37.417969
| 113
| 0.581794
|
be012a87690c24c6d9b7808790393e1aa6d01211
| 60,229
|
py
|
Python
|
vendor/github.com/tensorflow/tensorflow/tensorflow/contrib/feature_column/python/feature_column/sequence_feature_column_v2_test.py
|
owennewo/kfserving
|
89f73c87525b8e06ea799f69f2979c4ad272fcb3
|
[
"Apache-2.0"
] | 5
|
2019-01-13T16:15:25.000Z
|
2019-07-07T16:17:32.000Z
|
vendor/github.com/tensorflow/tensorflow/tensorflow/contrib/feature_column/python/feature_column/sequence_feature_column_v2_test.py
|
owennewo/kfserving
|
89f73c87525b8e06ea799f69f2979c4ad272fcb3
|
[
"Apache-2.0"
] | 13
|
2020-11-13T18:53:29.000Z
|
2022-03-12T00:33:00.000Z
|
vendor/github.com/tensorflow/tensorflow/tensorflow/contrib/feature_column/python/feature_column/sequence_feature_column_v2_test.py
|
owennewo/kfserving
|
89f73c87525b8e06ea799f69f2979c4ad272fcb3
|
[
"Apache-2.0"
] | 2
|
2020-10-06T09:24:31.000Z
|
2020-12-20T15:10:56.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sequential_feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.feature_column.python.feature_column import sequence_feature_column as sfc_old
from tensorflow.contrib.feature_column.python.feature_column import sequence_feature_column_v2 as sfc
from tensorflow.python.feature_column import feature_column as fc_old
from tensorflow.python.feature_column import feature_column_lib as fc
from tensorflow.python.feature_column.feature_column_v2_test import _TestStateManager
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
from tensorflow.python.training import monitored_session
class SequenceInputLayerTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args_a': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'sparse_input_args_b': {
# example 0, ids [1]
# example 1, ids [2, 0]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (1, 2, 0),
'dense_shape': (2, 2)},
'expected_input_layer': [
# example 0, ids_a [2], ids_b [1]
[[5., 6., 14., 15., 16.], [0., 0., 0., 0., 0.]],
# example 1, ids_a [0, 1], ids_b [2, 0]
[[1., 2., 17., 18., 19.], [3., 4., 11., 12., 13.]],],
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'sparse_input_args_a': {
# feature 0, ids [[2], [0, 1]]
# feature 1, ids [[0, 0], [1]]
'indices': (
(0, 0, 0), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 0, 0, 1),
'dense_shape': (2, 2, 2)},
'sparse_input_args_b': {
# feature 0, ids [[1, 1], [1]]
# feature 1, ids [[2], [0]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (1, 1, 1, 2, 0),
'dense_shape': (2, 2, 2)},
'expected_input_layer': [
# feature 0, [a: 2, -, b: 1, 1], [a: 0, 1, b: 1, -]
[[5., 6., 14., 15., 16.], [2., 3., 14., 15., 16.]],
# feature 1, [a: 0, 0, b: 2, -], [a: 1, -, b: 0, -]
[[1., 2., 17., 18., 19.], [3., 4., 11., 12., 13.]]],
'expected_sequence_length': [2, 2]},
)
def test_embedding_column(
self, sparse_input_args_a, sparse_input_args_b, expected_input_layer,
expected_sequence_length):
sparse_input_a = sparse_tensor.SparseTensorValue(**sparse_input_args_a)
sparse_input_b = sparse_tensor.SparseTensorValue(**sparse_input_args_b)
vocabulary_size = 3
embedding_dimension_a = 2
embedding_values_a = (
(1., 2.), # id 0
(3., 4.), # id 1
(5., 6.) # id 2
)
embedding_dimension_b = 3
embedding_values_b = (
(11., 12., 13.), # id 0
(14., 15., 16.), # id 1
(17., 18., 19.) # id 2
)
def _get_initializer(embedding_dimension, embedding_values):
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
return _initializer
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = fc_old._embedding_column(
categorical_column_a,
dimension=embedding_dimension_a,
initializer=_get_initializer(embedding_dimension_a, embedding_values_a))
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_b = fc_old._embedding_column(
categorical_column_b,
dimension=embedding_dimension_b,
initializer=_get_initializer(embedding_dimension_b, embedding_values_b))
input_layer, sequence_length = sfc.sequence_input_layer(
features={
'aaa': sparse_input_a,
'bbb': sparse_input_b,
},
# Test that columns are reordered alphabetically.
feature_columns=[embedding_column_b, embedding_column_a])
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(
('sequence_input_layer/aaa_embedding/embedding_weights:0',
'sequence_input_layer/bbb_embedding/embedding_weights:0'),
tuple([v.name for v in global_vars]))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(embedding_values_a, global_vars[0].eval(session=sess))
self.assertAllEqual(embedding_values_b, global_vars[1].eval(session=sess))
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
def test_embedding_column_with_non_sequence_categorical(self):
"""Tests that error is raised for non-sequence embedding column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = fc_old._categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = fc_old._embedding_column(
categorical_column_a, dimension=2)
with self.assertRaisesRegexp(
ValueError,
r'In embedding_column: aaa_embedding\. categorical_column must be of '
r'type _SequenceCategoricalColumn to use sequence_input_layer\.'):
_, _ = sfc.sequence_input_layer(
features={'aaa': sparse_input},
feature_columns=[embedding_column_a])
def test_shared_embedding_column(self):
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [1]
# example 1, ids [2, 0]
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 0),
dense_shape=(2, 2))
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 4.), # id 1
(5., 6.) # id 2
)
def _get_initializer(embedding_dimension, embedding_values):
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
return _initializer
expected_input_layer = [
# example 0, ids_a [2], ids_b [1]
[[5., 6., 3., 4.], [0., 0., 0., 0.]],
# example 1, ids_a [0, 1], ids_b [2, 0]
[[1., 2., 5., 6.], [3., 4., 1., 2.]],
]
expected_sequence_length = [1, 2]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
# Test that columns are reordered alphabetically.
shared_embedding_columns = fc.shared_embedding_columns(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension,
initializer=_get_initializer(embedding_dimension, embedding_values))
input_layer, sequence_length = sfc.sequence_input_layer(
features={
'aaa': sparse_input_a,
'bbb': sparse_input_b,
},
feature_columns=shared_embedding_columns)
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(
('sequence_input_layer/aaa_bbb_shared_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(embedding_values, global_vars[0].eval(session=sess))
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
def test_shared_embedding_column_with_non_sequence_categorical(self):
"""Tests that error is raised for non-sequence shared embedding column."""
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = fc_old._categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_old._categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b], dimension=2)
with self.assertRaisesRegexp(
ValueError,
r'In embedding_column: aaa_shared_embedding\. categorical_column must '
r'be of type _SequenceCategoricalColumn to use sequence_input_layer\.'):
_, _ = sfc.sequence_input_layer(
features={
'aaa': sparse_input_a,
'bbb': sparse_input_b
},
feature_columns=shared_embedding_columns)
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args_a': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'sparse_input_args_b': {
# example 0, ids [1]
# example 1, ids [1, 0]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (1, 1, 0),
'dense_shape': (2, 2)},
'expected_input_layer': [
# example 0, ids_a [2], ids_b [1]
[[0., 0., 1., 0., 1.], [0., 0., 0., 0., 0.]],
# example 1, ids_a [0, 1], ids_b [1, 0]
[[1., 0., 0., 0., 1.], [0., 1., 0., 1., 0.]]],
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'sparse_input_args_a': {
# feature 0, ids [[2], [0, 1]]
# feature 1, ids [[0, 0], [1]]
'indices': (
(0, 0, 0), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 0, 0, 1),
'dense_shape': (2, 2, 2)},
'sparse_input_args_b': {
# feature 0, ids [[1, 1], [1]]
# feature 1, ids [[1], [0]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (1, 1, 1, 1, 0),
'dense_shape': (2, 2, 2)},
'expected_input_layer': [
# feature 0, [a: 2, -, b: 1, 1], [a: 0, 1, b: 1, -]
[[0., 0., 1., 0., 2.], [1., 1., 0., 0., 1.]],
# feature 1, [a: 0, 0, b: 1, -], [a: 1, -, b: 0, -]
[[2., 0., 0., 0., 1.], [0., 1., 0., 1., 0.]]],
'expected_sequence_length': [2, 2]},
)
def test_indicator_column(
self, sparse_input_args_a, sparse_input_args_b, expected_input_layer,
expected_sequence_length):
sparse_input_a = sparse_tensor.SparseTensorValue(**sparse_input_args_a)
sparse_input_b = sparse_tensor.SparseTensorValue(**sparse_input_args_b)
vocabulary_size_a = 3
vocabulary_size_b = 2
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size_a)
indicator_column_a = fc_old._indicator_column(categorical_column_a)
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size_b)
indicator_column_b = fc_old._indicator_column(categorical_column_b)
input_layer, sequence_length = sfc.sequence_input_layer(
features={
'aaa': sparse_input_a,
'bbb': sparse_input_b,
},
# Test that columns are reordered alphabetically.
feature_columns=[indicator_column_b, indicator_column_a])
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
def test_indicator_column_with_non_sequence_categorical(self):
"""Tests that error is raised for non-sequence categorical column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = fc_old._categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column_a = fc_old._indicator_column(categorical_column_a)
with self.assertRaisesRegexp(
ValueError,
r'In indicator_column: aaa_indicator\. categorical_column must be of '
r'type _SequenceCategoricalColumn to use sequence_input_layer\.'):
_, _ = sfc.sequence_input_layer(
features={'aaa': sparse_input},
feature_columns=[indicator_column_a])
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [0., 1]
# example 1, [10.]
'indices': ((0, 0), (0, 1), (1, 0)),
'values': (0., 1., 10.),
'dense_shape': (2, 2)},
'expected_input_layer': [
[[0.], [1.]],
[[10.], [0.]]],
'expected_sequence_length': [2, 1]},
{'testcase_name': '3D',
'sparse_input_args': {
# feature 0, ids [[20, 3], [5]]
# feature 1, ids [[3], [8]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (20, 3, 5., 3., 8.),
'dense_shape': (2, 2, 2)},
'expected_input_layer': [
[[20.], [3.], [5.], [0.]],
[[3.], [0.], [8.], [0.]]],
'expected_sequence_length': [2, 2]},
)
def test_numeric_column(
self, sparse_input_args, expected_input_layer, expected_sequence_length):
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc_old.sequence_numeric_column('aaa')
input_layer, sequence_length = sfc.sequence_input_layer(
features={'aaa': sparse_input},
feature_columns=[numeric_column])
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [0., 1., 2., 3., 4., 5., 6., 7.]
# example 1, [10., 11., 12., 13.]
'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
(0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 8)},
'expected_input_layer': [
# The output of numeric_column._get_dense_tensor should be flattened.
[[0., 1., 2., 3.], [4., 5., 6., 7.]],
[[10., 11., 12., 13.], [0., 0., 0., 0.]]],
'expected_sequence_length': [2, 1]},
{'testcase_name': '3D',
'sparse_input_args': {
# example 0, values [[0., 1., 2., 3.]], [[4., 5., 6., 7.]]
# example 1, [[10., 11., 12., 13.], []]
'indices': ((0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3),
(0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 1, 3),
(1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 0, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 2, 4)},
'expected_input_layer': [
# The output of numeric_column._get_dense_tensor should be flattened.
[[0., 1., 2., 3.], [4., 5., 6., 7.]],
[[10., 11., 12., 13.], [0., 0., 0., 0.]]],
'expected_sequence_length': [2, 1]},
)
def test_numeric_column_multi_dim(
self, sparse_input_args, expected_input_layer, expected_sequence_length):
"""Tests sequence_input_layer for multi-dimensional numeric_column."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc_old.sequence_numeric_column('aaa', shape=(2, 2))
input_layer, sequence_length = sfc.sequence_input_layer(
features={'aaa': sparse_input},
feature_columns=[numeric_column])
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected_input_layer, input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
def test_sequence_length_not_equal(self):
"""Tests that an error is raised when sequence lengths are not equal."""
# Input a with sequence_length = [2, 1]
sparse_input_a = sparse_tensor.SparseTensorValue(
indices=((0, 0), (0, 1), (1, 0)),
values=(0., 1., 10.),
dense_shape=(2, 2))
# Input b with sequence_length = [1, 1]
sparse_input_b = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0)),
values=(1., 10.),
dense_shape=(2, 2))
numeric_column_a = sfc_old.sequence_numeric_column('aaa')
numeric_column_b = sfc_old.sequence_numeric_column('bbb')
_, sequence_length = sfc.sequence_input_layer(
features={
'aaa': sparse_input_a,
'bbb': sparse_input_b,
},
feature_columns=[numeric_column_a, numeric_column_b])
with monitored_session.MonitoredSession() as sess:
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[Condition x == y did not hold element-wise:\] '
r'\[x \(sequence_input_layer/aaa/sequence_length:0\) = \] \[2 1\] '
r'\[y \(sequence_input_layer/bbb/sequence_length:0\) = \] \[1 1\]'):
sess.run(sequence_length)
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]
# example 1, [[[10., 11.], [12., 13.]]]
'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
(0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 8)},
'expected_shape': [2, 2, 4]},
{'testcase_name': '3D',
'sparse_input_args': {
# example 0, values [[0., 1., 2., 3.]], [[4., 5., 6., 7.]]
# example 1, [[10., 11., 12., 13.], []]
'indices': ((0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3),
(0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 1, 2),
(1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 0, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 2, 4)},
'expected_shape': [2, 2, 4]},
)
def test_static_shape_from_tensors_numeric(
self, sparse_input_args, expected_shape):
"""Tests that we return a known static shape when we have one."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc_old.sequence_numeric_column('aaa', shape=(2, 2))
input_layer, _ = sfc.sequence_input_layer(
features={'aaa': sparse_input},
feature_columns=[numeric_column])
shape = input_layer.get_shape()
self.assertEqual(shape, expected_shape)
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
'indices': ((0, 0), (1, 0), (1, 1), (3, 0)),
'values': (2, 0, 1, 1),
'dense_shape': (4, 2)},
'expected_shape': [4, 2, 3]},
{'testcase_name': '3D',
'sparse_input_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
# example 2, ids []
# example 3, ids [[1], [0, 2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
(3, 0, 0), (3, 1, 0), (3, 1, 1)),
'values': (2, 0, 1, 2, 1, 0, 2),
'dense_shape': (4, 2, 2)},
'expected_shape': [4, 2, 3]}
)
def test_static_shape_from_tensors_indicator(
self, sparse_input_args, expected_shape):
"""Tests that we return a known static shape when we have one."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=3)
indicator_column = fc_old._indicator_column(categorical_column)
input_layer, _ = sfc.sequence_input_layer(
features={'aaa': sparse_input}, feature_columns=[indicator_column])
shape = input_layer.get_shape()
self.assertEqual(shape, expected_shape)
class ConcatenateContextInputTest(test.TestCase, parameterized.TestCase):
"""Tests the utility fn concatenate_context_input."""
def test_concatenate_context_input(self):
seq_input = ops.convert_to_tensor(np.arange(12).reshape(2, 3, 2))
context_input = ops.convert_to_tensor(np.arange(10).reshape(2, 5))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
input_layer = sfc.concatenate_context_input(context_input, seq_input)
expected = np.array([
[[0, 1, 0, 1, 2, 3, 4], [2, 3, 0, 1, 2, 3, 4], [4, 5, 0, 1, 2, 3, 4]],
[[6, 7, 5, 6, 7, 8, 9], [8, 9, 5, 6, 7, 8, 9], [10, 11, 5, 6, 7, 8, 9]]
], dtype=np.float32)
with monitored_session.MonitoredSession() as sess:
output = sess.run(input_layer)
self.assertAllEqual(expected, output)
@parameterized.named_parameters(
{'testcase_name': 'rank_lt_3',
'seq_input_arg': np.arange(100).reshape(10, 10)},
{'testcase_name': 'rank_gt_3',
'seq_input_arg': np.arange(100).reshape(5, 5, 2, 2)}
)
def test_sequence_input_throws_error(self, seq_input_arg):
seq_input = ops.convert_to_tensor(seq_input_arg)
context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'sequence_input must have rank 3'):
sfc.concatenate_context_input(context_input, seq_input)
@parameterized.named_parameters(
{'testcase_name': 'rank_lt_2',
'context_input_arg': np.arange(100)},
{'testcase_name': 'rank_gt_2',
'context_input_arg': np.arange(100).reshape(5, 5, 4)}
)
def test_context_input_throws_error(self, context_input_arg):
context_input = ops.convert_to_tensor(context_input_arg)
seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'context_input must have rank 2'):
sfc.concatenate_context_input(context_input, seq_input)
def test_integer_seq_input_throws_error(self):
seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(
TypeError, 'sequence_input must have dtype float32'):
sfc.concatenate_context_input(context_input, seq_input)
def test_integer_context_input_throws_error(self):
seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(
TypeError, 'context_input must have dtype float32'):
sfc.concatenate_context_input(context_input, seq_input)
class InputLayerTest(test.TestCase):
"""Tests input_layer with sequence feature columns."""
def test_embedding_column(self):
"""Tests that error is raised for sequence embedding column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = fc_old._embedding_column(
categorical_column_a, dimension=2)
with self.assertRaisesRegexp(
ValueError,
r'In embedding_column: aaa_embedding\. categorical_column must not be '
r'of type _SequenceCategoricalColumn\.'):
_ = fc_old.input_layer(
features={'aaa': sparse_input},
feature_columns=[embedding_column_a])
def test_indicator_column(self):
"""Tests that error is raised for sequence indicator column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column_a = fc_old._indicator_column(categorical_column_a)
with self.assertRaisesRegexp(
ValueError,
r'In indicator_column: aaa_indicator\. categorical_column must not be '
r'of type _SequenceCategoricalColumn\.'):
_ = fc_old.input_layer(
features={'aaa': sparse_input},
feature_columns=[indicator_column_a])
def _assert_sparse_tensor_value(test_case, expected, actual):
_assert_sparse_tensor_indices_shape(test_case, expected, actual)
test_case.assertEqual(
np.array(expected.values).dtype, np.array(actual.values).dtype)
test_case.assertAllEqual(expected.values, actual.values)
def _assert_sparse_tensor_indices_shape(test_case, expected, actual):
test_case.assertEqual(np.int64, np.array(actual.indices).dtype)
test_case.assertAllEqual(expected.indices, actual.indices)
test_case.assertEqual(np.int64, np.array(actual.dense_shape).dtype)
test_case.assertAllEqual(expected.dense_shape, actual.dense_shape)
def _get_sequence_dense_tensor(column, features):
return column.get_sequence_dense_tensor(
fc.FeatureTransformationCache(features), None)
def _get_sequence_dense_tensor_state(column, features):
state_manager = _TestStateManager()
column.create_state(state_manager)
return column.get_sequence_dense_tensor(
fc.FeatureTransformationCache(features), state_manager)
def _get_sparse_tensors(column, features):
return column.get_sparse_tensors(
fc.FeatureTransformationCache(features), None)
class SequenceCategoricalColumnWithIdentityTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (1, 2, 0),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
'values': np.array((1, 2, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': (6, 7, 8),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': (6, 7, 8),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_identity('aaa', num_buckets=9)
id_weight_pair = _get_sparse_tensors(column, {'aaa': inputs})
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
_assert_sparse_tensor_value(
self, expected, id_weight_pair.id_tensor.eval(session=sess))
class SequenceCategoricalColumnWithHashBucketTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': ('omar', 'stringer', 'marlo'),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
# Ignored to avoid hash dependence in test.
'values': np.array((0, 0, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': ('omar', 'stringer', 'marlo'),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
# Ignored to avoid hash dependence in test.
'values': np.array((0, 0, 0), dtype=np.int64),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_hash_bucket(
'aaa', hash_bucket_size=10)
id_weight_pair = _get_sparse_tensors(column, {'aaa': inputs})
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
_assert_sparse_tensor_indices_shape(
self, expected, id_weight_pair.id_tensor.eval(session=sess))
class SequenceCategoricalColumnWithVocabularyFileTest(
test.TestCase, parameterized.TestCase):
def _write_vocab(self, vocab_strings, file_name):
vocab_file = os.path.join(self.get_temp_dir(), file_name)
with open(vocab_file, 'w') as f:
f.write('\n'.join(vocab_strings))
return vocab_file
def setUp(self):
super(SequenceCategoricalColumnWithVocabularyFileTest, self).setUp()
vocab_strings = ['omar', 'stringer', 'marlo']
self._wire_vocabulary_file_name = self._write_vocab(vocab_strings,
'wire_vocabulary.txt')
self._wire_vocabulary_size = 3
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': ('marlo', 'skywalker', 'omar'),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
'values': np.array((2, -1, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': ('omar', 'skywalker', 'marlo'),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': np.array((0, -1, 2), dtype=np.int64),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
id_weight_pair = _get_sparse_tensors(column, {'aaa': inputs})
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
_assert_sparse_tensor_value(
self, expected, id_weight_pair.id_tensor.eval(session=sess))
def test_get_sparse_tensors_dynamic_zero_length(self):
"""Tests _get_sparse_tensors with a dynamic sequence length."""
inputs = sparse_tensor.SparseTensorValue(
indices=np.zeros((0, 2)), values=[], dense_shape=(2, 0))
expected = sparse_tensor.SparseTensorValue(
indices=np.zeros((0, 3)),
values=np.array((), dtype=np.int64),
dense_shape=(2, 0, 1))
column = sfc.sequence_categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
input_placeholder_shape = list(inputs.dense_shape)
# Make second dimension (sequence length) dynamic.
input_placeholder_shape[1] = None
input_placeholder = array_ops.sparse_placeholder(
dtypes.string, shape=input_placeholder_shape)
id_weight_pair = _get_sparse_tensors(column, {'aaa': input_placeholder})
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
result = id_weight_pair.id_tensor.eval(
session=sess, feed_dict={input_placeholder: inputs})
_assert_sparse_tensor_value(
self, expected, result)
class SequenceCategoricalColumnWithVocabularyListTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': ('marlo', 'skywalker', 'omar'),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
'values': np.array((2, -1, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': ('omar', 'skywalker', 'marlo'),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': np.array((0, -1, 2), dtype=np.int64),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
id_weight_pair = _get_sparse_tensors(column, {'aaa': inputs})
self.assertIsNone(id_weight_pair.weight_tensor)
with monitored_session.MonitoredSession() as sess:
_assert_sparse_tensor_value(
self, expected, id_weight_pair.id_tensor.eval(session=sess))
class SequenceEmbeddingColumnTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
'indices': ((0, 0), (1, 0), (1, 1), (3, 0)),
'values': (2, 0, 1, 1),
'dense_shape': (4, 2)},
'expected': [
# example 0, ids [2]
[[7., 11.], [0., 0.]],
# example 1, ids [0, 1]
[[1., 2.], [3., 5.]],
# example 2, ids []
[[0., 0.], [0., 0.]],
# example 3, ids [1]
[[3., 5.], [0., 0.]]]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
# example 2, ids []
# example 3, ids [[1], [0, 2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
(3, 0, 0), (3, 1, 0), (3, 1, 1)),
'values': (2, 0, 1, 2, 1, 0, 2),
'dense_shape': (4, 2, 2)},
'expected': [
# example 0, ids [[2]]
[[7., 11.], [0., 0.]],
# example 1, ids [[0, 1], [2]]
[[2, 3.5], [7., 11.]],
# example 2, ids []
[[0., 0.], [0., 0.]],
# example 3, ids [[1], [0, 2]]
[[3., 5.], [4., 6.5]]]}
)
def test_get_sequence_dense_tensor(self, inputs_args, expected):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
initializer=_initializer)
embedding_lookup, _ = _get_sequence_dense_tensor_state(
embedding_column, {'aaa': inputs})
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('embedding_weights:0',), tuple([v.name for v in global_vars]))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(embedding_values, global_vars[0].eval(session=sess))
self.assertAllEqual(expected, embedding_lookup.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 2),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2]}
)
def test_sequence_length(self, inputs_args, expected_sequence_length):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=2)
_, sequence_length = _get_sequence_dense_tensor_state(
embedding_column, {'aaa': inputs})
with monitored_session.MonitoredSession() as sess:
sequence_length = sess.run(sequence_length)
self.assertAllEqual(expected_sequence_length, sequence_length)
self.assertEqual(np.int64, sequence_length.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids []
# example 1, ids [2]
# example 2, ids [0, 1]
# example 3, ids []
# example 4, ids [1]
# example 5, ids []
indices=((1, 0), (2, 0), (2, 1), (4, 0)),
values=(2, 0, 1, 1),
dense_shape=(6, 2))
expected_sequence_length = [0, 1, 2, 0, 1, 0]
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=2)
_, sequence_length = _get_sequence_dense_tensor_state(
embedding_column, {'aaa': sparse_input})
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
class SequenceSharedEmbeddingColumnTest(test.TestCase):
def test_get_sequence_dense_tensor(self):
vocabulary_size = 3
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 1), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 2))
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [1]
# example 1, ids [0, 2]
# example 2, ids [0]
# example 3, ids []
indices=((0, 0), (1, 0), (1, 1), (2, 0)),
values=(1, 0, 2, 0),
dense_shape=(4, 2))
expected_lookups_a = [
# example 0, ids [2]
[[7., 11.], [0., 0.]],
# example 1, ids [0, 1]
[[1., 2.], [3., 5.]],
# example 2, ids []
[[0., 0.], [0., 0.]],
# example 3, ids [1]
[[3., 5.], [0., 0.]],
]
expected_lookups_b = [
# example 0, ids [1]
[[3., 5.], [0., 0.]],
# example 1, ids [0, 2]
[[1., 2.], [7., 11.]],
# example 2, ids [0]
[[1., 2.], [0., 0.]],
# example 3, ids []
[[0., 0.], [0., 0.]],
]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer)
embedding_lookup_a = _get_sequence_dense_tensor(
shared_embedding_columns[0], {'aaa': sparse_input_a})[0]
embedding_lookup_b = _get_sequence_dense_tensor(
shared_embedding_columns[1], {'bbb': sparse_input_b})[0]
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('aaa_bbb_shared_embedding:0',),
tuple([v.name for v in global_vars]))
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(embedding_values, global_vars[0].eval(session=sess))
self.assertAllEqual(
expected_lookups_a, embedding_lookup_a.eval(session=sess))
self.assertAllEqual(
expected_lookups_b, embedding_lookup_b.eval(session=sess))
def test_sequence_length(self):
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
expected_sequence_length_a = [1, 2]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [0, 2]
# example 1, ids [1]
indices=((0, 0), (0, 1), (1, 0)),
values=(0, 2, 1),
dense_shape=(2, 2))
expected_sequence_length_b = [2, 1]
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b], dimension=2)
sequence_length_a = _get_sequence_dense_tensor(
shared_embedding_columns[0], {'aaa': sparse_input_a})[1]
sequence_length_b = _get_sequence_dense_tensor(
shared_embedding_columns[1], {'bbb': sparse_input_b})[1]
with monitored_session.MonitoredSession() as sess:
sequence_length_a = sess.run(sequence_length_a)
self.assertAllEqual(expected_sequence_length_a, sequence_length_a)
self.assertEqual(np.int64, sequence_length_a.dtype)
sequence_length_b = sess.run(sequence_length_b)
self.assertAllEqual(expected_sequence_length_b, sequence_length_b)
self.assertEqual(np.int64, sequence_length_b.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids []
# example 1, ids [2]
# example 2, ids [0, 1]
# example 3, ids []
# example 4, ids [1]
# example 5, ids []
indices=((1, 0), (2, 0), (2, 1), (4, 0)),
values=(2, 0, 1, 1),
dense_shape=(6, 2))
expected_sequence_length_a = [0, 1, 2, 0, 1, 0]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids []
# example 2, ids []
# example 3, ids []
# example 4, ids [1]
# example 5, ids [0, 1]
indices=((0, 0), (4, 0), (5, 0), (5, 1)),
values=(2, 1, 0, 1),
dense_shape=(6, 2))
expected_sequence_length_b = [1, 0, 0, 0, 1, 2]
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b], dimension=2)
sequence_length_a = _get_sequence_dense_tensor(
shared_embedding_columns[0], {'aaa': sparse_input_a})[1]
sequence_length_b = _get_sequence_dense_tensor(
shared_embedding_columns[1], {'bbb': sparse_input_b})[1]
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_sequence_length_a, sequence_length_a.eval(session=sess))
self.assertAllEqual(
expected_sequence_length_b, sequence_length_b.eval(session=sess))
class SequenceIndicatorColumnTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
'indices': ((0, 0), (1, 0), (1, 1), (3, 0)),
'values': (2, 0, 1, 1),
'dense_shape': (4, 2)},
'expected': [
# example 0, ids [2]
[[0., 0., 1.], [0., 0., 0.]],
# example 1, ids [0, 1]
[[1., 0., 0.], [0., 1., 0.]],
# example 2, ids []
[[0., 0., 0.], [0., 0., 0.]],
# example 3, ids [1]
[[0., 1., 0.], [0., 0., 0.]]]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
# example 2, ids []
# example 3, ids [[1], [2, 2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
(3, 0, 0), (3, 1, 0), (3, 1, 1)),
'values': (2, 0, 1, 2, 1, 2, 2),
'dense_shape': (4, 2, 2)},
'expected': [
# example 0, ids [[2]]
[[0., 0., 1.], [0., 0., 0.]],
# example 1, ids [[0, 1], [2]]
[[1., 1., 0.], [0., 0., 1.]],
# example 2, ids []
[[0., 0., 0.], [0., 0., 0.]],
# example 3, ids [[1], [2, 2]]
[[0., 1., 0.], [0., 0., 2.]]]}
)
def test_get_sequence_dense_tensor(self, inputs_args, expected):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column = fc.indicator_column(categorical_column)
indicator_tensor, _ = _get_sequence_dense_tensor(
indicator_column, {'aaa': inputs})
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected, indicator_tensor.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 2),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2]}
)
def test_sequence_length(self, inputs_args, expected_sequence_length):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column = fc.indicator_column(categorical_column)
_, sequence_length = _get_sequence_dense_tensor(
indicator_column, {'aaa': inputs})
with monitored_session.MonitoredSession() as sess:
sequence_length = sess.run(sequence_length)
self.assertAllEqual(expected_sequence_length, sequence_length)
self.assertEqual(np.int64, sequence_length.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids []
# example 1, ids [2]
# example 2, ids [0, 1]
# example 3, ids []
# example 4, ids [1]
# example 5, ids []
indices=((1, 0), (2, 0), (2, 1), (4, 0)),
values=(2, 0, 1, 1),
dense_shape=(6, 2))
expected_sequence_length = [0, 1, 2, 0, 1, 0]
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column = fc.indicator_column(categorical_column)
_, sequence_length = _get_sequence_dense_tensor(
indicator_column, {'aaa': sparse_input})
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
class SequenceNumericColumnTest(test.TestCase, parameterized.TestCase):
def test_defaults(self):
a = sfc.sequence_numeric_column('aaa')
self.assertEqual('aaa', a.key)
self.assertEqual('aaa', a.name)
self.assertEqual((1,), a.shape)
self.assertEqual(0., a.default_value)
self.assertEqual(dtypes.float32, a.dtype)
self.assertIsNone(a.normalizer_fn)
def test_shape_saved_as_tuple(self):
a = sfc.sequence_numeric_column('aaa', shape=[1, 2])
self.assertEqual((1, 2), a.shape)
def test_shape_must_be_positive_integer(self):
with self.assertRaisesRegexp(TypeError, 'shape dimensions must be integer'):
sfc.sequence_numeric_column('aaa', shape=[1.0])
with self.assertRaisesRegexp(
ValueError, 'shape dimensions must be greater than 0'):
sfc.sequence_numeric_column('aaa', shape=[0])
def test_dtype_is_convertible_to_float(self):
with self.assertRaisesRegexp(
ValueError, 'dtype must be convertible to float'):
sfc.sequence_numeric_column('aaa', dtype=dtypes.string)
def test_normalizer_fn_must_be_callable(self):
with self.assertRaisesRegexp(TypeError, 'must be a callable'):
sfc.sequence_numeric_column('aaa', normalizer_fn='NotACallable')
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, values [0., 1]
# example 1, [10.]
'indices': ((0, 0), (0, 1), (1, 0)),
'values': (0., 1., 10.),
'dense_shape': (2, 2)},
'expected': [
[[0.], [1.]],
[[10.], [0.]]]},
{'testcase_name': '3D',
'inputs_args': {
# feature 0, ids [[20, 3], [5]]
# feature 1, ids [[3], [8]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (20, 3, 5., 3., 8.),
'dense_shape': (2, 2, 2)},
'expected': [
[[20.], [3.], [5.], [0.]],
[[3.], [0.], [8.], [0.]]]},
)
def test_get_sequence_dense_tensor(self, inputs_args, expected):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
numeric_column = sfc.sequence_numeric_column('aaa')
dense_tensor, _ = _get_sequence_dense_tensor(
numeric_column, {'aaa': inputs})
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(expected, dense_tensor.eval(session=sess))
def test_get_sequence_dense_tensor_with_normalizer_fn(self):
def _increment_two(input_sparse_tensor):
return sparse_ops.sparse_add(
input_sparse_tensor,
sparse_tensor.SparseTensor(((0, 0), (1, 1)), (2.0, 2.0), (2, 2))
)
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, values [[0.], [1]]
# example 1, [[10.]]
indices=((0, 0), (0, 1), (1, 0)),
values=(0., 1., 10.),
dense_shape=(2, 2))
# Before _increment_two:
# [[0.], [1.]],
# [[10.], [0.]],
# After _increment_two:
# [[2.], [1.]],
# [[10.], [2.]],
expected_dense_tensor = [
[[2.], [1.]],
[[10.], [2.]],
]
numeric_column = sfc.sequence_numeric_column(
'aaa', normalizer_fn=_increment_two)
dense_tensor, _ = _get_sequence_dense_tensor(
numeric_column, {'aaa': sparse_input})
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_dense_tensor, dense_tensor.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]
# example 1, [[[10., 11.], [12., 13.]]]
'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
(0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 8)},
'expected_dense_tensor': [
[[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]],
[[[10., 11.], [12., 13.]], [[0., 0.], [0., 0.]]]]},
{'testcase_name': '3D',
'sparse_input_args': {
'indices': ((0, 0, 0), (0, 0, 2), (0, 0, 4), (0, 0, 6),
(0, 1, 0), (0, 1, 2), (0, 1, 4), (0, 1, 6),
(1, 0, 0), (1, 0, 2), (1, 0, 4), (1, 0, 6)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 2, 8)},
'expected_dense_tensor': [
[[[0., 0.], [1., 0.]], [[2., 0.], [3., 0.]],
[[4., 0.], [5., 0.]], [[6., 0.], [7., 0.]]],
[[[10., 0.], [11., 0.]], [[12., 0.], [13., 0.]],
[[0., 0.], [0., 0.]], [[0., 0.], [0., 0.]]]]},
)
def test_get_dense_tensor_multi_dim(
self, sparse_input_args, expected_dense_tensor):
"""Tests get_sequence_dense_tensor for multi-dim numeric_column."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=(2, 2))
dense_tensor, _ = _get_sequence_dense_tensor(
numeric_column, {'aaa': sparse_input})
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_dense_tensor, dense_tensor.eval(session=sess))
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2., 0., 1.),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 2],
'shape': (1,)},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2., 0., 1., 2.),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2],
'shape': (1,)},
{'testcase_name': '2D_with_shape',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2., 0., 1.),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 1],
'shape': (2,)},
{'testcase_name': '3D_with_shape',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2., 0., 1., 2.),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2],
'shape': (2,)},
)
def test_sequence_length(self, inputs_args, expected_sequence_length, shape):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=shape)
_, sequence_length = _get_sequence_dense_tensor(
numeric_column, {'aaa': inputs})
with monitored_session.MonitoredSession() as sess:
sequence_length = sess.run(sequence_length)
self.assertAllEqual(expected_sequence_length, sequence_length)
self.assertEqual(np.int64, sequence_length.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, values []
# example 1, values [[0.], [1.]]
# example 2, [[2.]]
# example 3, values []
# example 4, [[3.]]
# example 5, values []
indices=((1, 0), (1, 1), (2, 0), (4, 0)),
values=(0., 1., 2., 3.),
dense_shape=(6, 2))
expected_sequence_length = [0, 2, 1, 0, 1, 0]
numeric_column = sfc.sequence_numeric_column('aaa')
_, sequence_length = _get_sequence_dense_tensor(
numeric_column, {'aaa': sparse_input})
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
if __name__ == '__main__':
test.main()
| 39.913188
| 102
| 0.59639
|
0e4607bd23d501288e254267b2c14a8c9e6a9c13
| 2,375
|
py
|
Python
|
menu.py
|
wangzqzero/calculator
|
94bf0c454672b88262ed87d090908a5ed1518fd3
|
[
"MIT"
] | null | null | null |
menu.py
|
wangzqzero/calculator
|
94bf0c454672b88262ed87d090908a5ed1518fd3
|
[
"MIT"
] | null | null | null |
menu.py
|
wangzqzero/calculator
|
94bf0c454672b88262ed87d090908a5ed1518fd3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'menu.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from equation import Ui_Form
from linequ import Ui_LinEquation
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtWidgets.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig)
class Ui_Menu(object):
def openWindow(self):
self.window = QtWidgets.QMainWindow()
self.ui = Ui_Form()
self.ui.setupUi(self.window)
self.window.show()
def openWindowl(self):
self.window = QtWidgets.QMainWindow()
self.ui = Ui_LinEquation()
self.ui.setupUi(self.window)
self.window.show()
def setupUi(self, Menu):
Menu.setObjectName("Menu")
Menu.resize(436, 532)
self.label = QtWidgets.QLabel(Menu)
self.label.setGeometry(QtCore.QRect(110, 70, 181, 41))
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.linequ = QtWidgets.QPushButton(Menu)
self.linequ.setGeometry(QtCore.QRect(60, 180, 311, 61))
self.linequ.setObjectName("linequ")
self.quadequ = QtWidgets.QPushButton(Menu)
self.quadequ.setGeometry(QtCore.QRect(60, 290, 311, 61))
self.quadequ.setObjectName("quadequ")
self.linequ.clicked.connect(self.openWindowl)
self.quadequ.clicked.connect(self.openWindow)
self.retranslateUi(Menu)
QtCore.QMetaObject.connectSlotsByName(Menu)
def retranslateUi(self, Menu):
_translate = QtCore.QCoreApplication.translate
Menu.setWindowTitle(_translate("Menu", "Form"))
self.label.setText(_translate("Menu", "Equation Menu"))
self.linequ.setText(_translate("Menu", "Solve liner equation in two variable"))
self.quadequ.setText(_translate("Menu", "Solve quadratic equation"))
| 37.109375
| 87
| 0.689263
|
efa935820302bf6dbf6104fcf32831a207e1a725
| 84
|
py
|
Python
|
server/admin/blueprints/user/__init__.py
|
Soopro/totoro
|
6be1af50496340ded9879a6450c8208ac9f97e72
|
[
"MIT"
] | null | null | null |
server/admin/blueprints/user/__init__.py
|
Soopro/totoro
|
6be1af50496340ded9879a6450c8208ac9f97e72
|
[
"MIT"
] | null | null | null |
server/admin/blueprints/user/__init__.py
|
Soopro/totoro
|
6be1af50496340ded9879a6450c8208ac9f97e72
|
[
"MIT"
] | 1
|
2019-10-31T06:11:41.000Z
|
2019-10-31T06:11:41.000Z
|
# coding=utf-8
from __future__ import absolute_import
from .views import blueprint
| 16.8
| 38
| 0.821429
|
59140081b11212c6be482c72a78ac9c17012d685
| 429
|
py
|
Python
|
examples/get-guild-data.py
|
FoxNerdSaysMoo/HypixelIO
|
aca8fd6535c0afb2bb733172db2dcbd68590118d
|
[
"MIT"
] | 16
|
2020-10-28T01:49:31.000Z
|
2022-03-13T23:19:31.000Z
|
examples/get-guild-data.py
|
FoxNerdSaysMoo/HypixelIO
|
aca8fd6535c0afb2bb733172db2dcbd68590118d
|
[
"MIT"
] | 20
|
2021-03-17T07:32:14.000Z
|
2022-03-07T02:48:00.000Z
|
examples/get-guild-data.py
|
FoxNerdSaysMoo/HypixelIO
|
aca8fd6535c0afb2bb733172db2dcbd68590118d
|
[
"MIT"
] | 5
|
2020-10-21T13:53:27.000Z
|
2021-09-02T15:47:45.000Z
|
import os
from textwrap import dedent
import hypixelio as hp
# Init the Client
client = hp.Client(api_key=os.environ["HYPIXEL_KEY"])
# Get the guild object
guild = client.get_guild(name="2k")
# Get the essential data
name, ranking, achievements = guild.NAME, guild.LEGACY_RANKING, guild.ACHIEVEMENTS
# Print the data
print(
dedent(f"""
Name: {name}
ranking: {ranking}
achievements: {achievements}
""")
)
| 18.652174
| 82
| 0.710956
|
13e92b5fe7535220488956da90722a2209116546
| 7,344
|
py
|
Python
|
xonsh/parsers/fstring_adaptor.py
|
wendellwt/xonsh
|
300dfc87170002e900c2878aaf3d67c3f8a765d7
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xonsh/parsers/fstring_adaptor.py
|
wendellwt/xonsh
|
300dfc87170002e900c2878aaf3d67c3f8a765d7
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xonsh/parsers/fstring_adaptor.py
|
wendellwt/xonsh
|
300dfc87170002e900c2878aaf3d67c3f8a765d7
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Implements helper class for parsing Xonsh syntax within f-strings."""
import re
from ast import parse as pyparse
from xonsh import ast
from xonsh.lazyasd import lazyobject
from xonsh.platform import PYTHON_VERSION_INFO
@lazyobject
def RE_FSTR_FIELD_WRAPPER():
if PYTHON_VERSION_INFO > (3, 8):
return re.compile(r"(__xonsh__\.eval_fstring_field\((\d+)\))\s*[^=]")
else:
return re.compile(r"(__xonsh__\.eval_fstring_field\((\d+)\))")
if PYTHON_VERSION_INFO > (3, 8):
@lazyobject
def RE_FSTR_SELF_DOC_FIELD_WRAPPER():
return re.compile(r"(__xonsh__\.eval_fstring_field\((\d+)\)\s*)=")
class FStringAdaptor:
"""Helper for parsing Xonsh syntax within f-strings."""
def __init__(self, fstring, prefix, filename=None):
"""Parses an f-string containing special Xonsh syntax and returns
ast.JoinedStr AST node instance representing the input string.
Parameters
----------
fstring : str
The input f-string.
prefix : str
Prefix of the f-string (e.g. "fr").
filename : str, optional
File from which the code was read or any string describing
origin of the code.
"""
self.fstring = fstring
self.prefix = prefix
self.filename = filename
self.fields = {}
self.repl = ""
self.res = None
def _patch_special_syntax(self):
"""Takes an fstring (and its prefix, ie "f") that may contain
xonsh expressions as its field values and substitues them for
a call to __xonsh__.eval_fstring_field as needed.
"""
prelen = len(self.prefix)
quote = self.fstring[prelen]
if self.fstring[prelen + 1] == quote:
quote *= 3
template = self.fstring[prelen + len(quote) : -len(quote)]
while True:
repl = self.prefix + quote + template + quote
try:
res = pyparse(repl)
break
except SyntaxError as e:
# The e.text attribute is expected to contain the failing
# expression, e.g. "($HOME)" for f"{$HOME}" string.
if e.text is None or e.text[0] != "(":
raise
error_expr = e.text[1:-1]
epos = template.find(error_expr)
if epos < 0:
raise
# We can olny get here in the case of handled SyntaxError.
# Patch the last error and start over.
xonsh_field = (error_expr, self.filename if self.filename else None)
field_id = id(xonsh_field)
self.fields[field_id] = xonsh_field
eval_field = f"__xonsh__.eval_fstring_field({field_id})"
template = template[:epos] + eval_field + template[epos + len(error_expr) :]
self.repl = repl
self.res = res.body[0].value
def _unpatch_strings(self):
"""Reverts false-positive field matches within strings."""
reparse = False
for node in ast.walk(self.res):
if isinstance(node, ast.Constant) and isinstance(node.value, str):
value = node.value
elif isinstance(node, ast.Str):
value = node.s
else:
continue
match = RE_FSTR_FIELD_WRAPPER.search(value)
if match is None:
continue
field = self.fields.pop(int(match.group(2)), None)
if field is None:
continue
self.repl = self.repl.replace(match.group(1), field[0], 1)
reparse = True
if reparse:
self.res = pyparse(self.repl).body[0].value
def _unpatch_selfdoc_strings(self):
"""Reverts false-positive matches within Python 3.8 sef-documenting
f-string expressions."""
for node in ast.walk(self.res):
if isinstance(node, ast.Constant) and isinstance(node.value, str):
value = node.value
elif isinstance(node, ast.Str):
value = node.s
else:
continue
match = RE_FSTR_SELF_DOC_FIELD_WRAPPER.search(value)
if match is None:
continue
field = self.fields.get(int(match.group(2)), None)
if field is None:
continue
value = value.replace(match.group(1), field[0], 1)
if isinstance(node, ast.Str):
node.s = value
else:
node.value = value
def _fix_eval_field_params(self):
"""Replace f-string field ID placeholders with the actual field
expressions."""
for node in ast.walk(self.res):
if not (
isinstance(node, ast.Call)
and node.func.value.id == "__xonsh__"
and node.func.attr == "eval_fstring_field"
and len(node.args) > 0
):
continue
if PYTHON_VERSION_INFO > (3, 8):
if isinstance(node.args[0], ast.Constant) and isinstance(
node.args[0].value, int
):
field = self.fields.pop(node.args[0].value, None)
if field is None:
continue
lineno = node.args[0].lineno
col_offset = node.args[0].col_offset
field_node = ast.Tuple(
elts=[
ast.Constant(
value=field[0], lineno=lineno, col_offset=col_offset
),
ast.Constant(
value=field[1], lineno=lineno, col_offset=col_offset
),
],
ctx=ast.Load(),
lineno=lineno,
col_offset=col_offset,
)
node.args[0] = field_node
elif isinstance(node.args[0], ast.Num):
field = self.fields.pop(node.args[0].n, None)
if field is None:
continue
lineno = node.args[0].lineno
col_offset = node.args[0].col_offset
elts = [ast.Str(s=field[0], lineno=lineno, col_offset=col_offset)]
if field[1] is not None:
elts.append(
ast.Str(s=field[1], lineno=lineno, col_offset=col_offset)
)
else:
elts.append(
ast.NameConstant(
value=None, lineno=lineno, col_offset=col_offset
)
)
field_node = ast.Tuple(
elts=elts, ctx=ast.Load(), lineno=lineno, col_offset=col_offset,
)
node.args[0] = field_node
def run(self):
"""Runs the parser. Returns ast.JoinedStr instance."""
self._patch_special_syntax()
self._unpatch_strings()
if PYTHON_VERSION_INFO > (3, 8):
self._unpatch_selfdoc_strings()
self._fix_eval_field_params()
assert len(self.fields) == 0
return self.res
| 37.279188
| 88
| 0.519199
|
10f7ead58248484a0f768c418ea56836bc8b5fe4
| 6,245
|
py
|
Python
|
ddtrace/contrib/botocore/patch.py
|
KDWSS/dd-trace-py
|
6d859bec403347f7c1e7efd039210908b562741e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
ddtrace/contrib/botocore/patch.py
|
KDWSS/dd-trace-py
|
6d859bec403347f7c1e7efd039210908b562741e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 6
|
2021-06-29T14:58:43.000Z
|
2021-12-15T14:14:36.000Z
|
ddtrace/contrib/botocore/patch.py
|
KDWSS/dd-trace-py
|
6d859bec403347f7c1e7efd039210908b562741e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2020-09-28T06:20:53.000Z
|
2020-09-28T06:20:53.000Z
|
"""
Trace queries to aws api done via botocore client
"""
# 3p
import base64
import json
import botocore.client
from ddtrace import config
from ddtrace.vendor import wrapt
# project
from ...constants import ANALYTICS_SAMPLE_RATE_KEY
from ...constants import SPAN_MEASURED_KEY
from ...ext import SpanTypes
from ...ext import aws
from ...ext import http
from ...internal.logger import get_logger
from ...pin import Pin
from ...propagation.http import HTTPPropagator
from ...utils import get_argument_value
from ...utils.formats import deep_getattr
from ...utils.formats import get_env
from ...utils.wrappers import unwrap
# Original botocore client class
_Botocore_client = botocore.client.BaseClient
ARGS_NAME = ("action", "params", "path", "verb")
TRACED_ARGS = {"params", "path", "verb"}
log = get_logger(__name__)
# Botocore default settings
config._add(
"botocore",
{
"distributed_tracing": get_env("botocore", "distributed_tracing", default=True),
"invoke_with_legacy_context": get_env("botocore", "invoke_with_legacy_context", default=False),
},
)
def inject_trace_data_to_message_attributes(trace_data, entry):
if "MessageAttributes" not in entry:
entry["MessageAttributes"] = {}
# An Amazon SQS message can contain up to 10 metadata attributes.
if len(entry["MessageAttributes"]) < 10:
entry["MessageAttributes"]["_datadog"] = {"DataType": "String", "StringValue": json.dumps(trace_data)}
else:
log.debug("skipping trace injection, max number (10) of MessageAttributes exceeded")
def inject_trace_to_sqs_batch_message(args, span):
trace_data = {}
HTTPPropagator.inject(span.context, trace_data)
params = args[1]
for entry in params["Entries"]:
inject_trace_data_to_message_attributes(trace_data, entry)
def inject_trace_to_sqs_message(args, span):
trace_data = {}
HTTPPropagator.inject(span.context, trace_data)
params = args[1]
inject_trace_data_to_message_attributes(trace_data, params)
def modify_client_context(client_context_object, trace_headers):
if config.botocore["invoke_with_legacy_context"]:
trace_headers = {"_datadog": trace_headers}
if "custom" in client_context_object:
client_context_object["custom"].update(trace_headers)
else:
client_context_object["custom"] = trace_headers
def inject_trace_to_client_context(args, span):
trace_headers = {}
HTTPPropagator.inject(span.context, trace_headers)
client_context_object = {}
params = args[1]
if "ClientContext" in params:
try:
client_context_json = base64.b64decode(params["ClientContext"]).decode("utf-8")
client_context_object = json.loads(client_context_json)
except Exception:
log.warning("malformed client_context=%s", params["ClientContext"], exc_info=True)
return
modify_client_context(client_context_object, trace_headers)
try:
json_context = json.dumps(client_context_object).encode("utf-8")
except Exception:
log.warning("unable to encode modified client context as json: %s", client_context_object, exc_info=True)
return
params["ClientContext"] = base64.b64encode(json_context).decode("utf-8")
def patch():
if getattr(botocore.client, "_datadog_patch", False):
return
setattr(botocore.client, "_datadog_patch", True)
wrapt.wrap_function_wrapper("botocore.client", "BaseClient._make_api_call", patched_api_call)
Pin(service="aws", app="aws").onto(botocore.client.BaseClient)
def unpatch():
if getattr(botocore.client, "_datadog_patch", False):
setattr(botocore.client, "_datadog_patch", False)
unwrap(botocore.client.BaseClient, "_make_api_call")
def patched_api_call(original_func, instance, args, kwargs):
pin = Pin.get_from(instance)
if not pin or not pin.enabled():
return original_func(*args, **kwargs)
endpoint_name = deep_getattr(instance, "_endpoint._endpoint_prefix")
with pin.tracer.trace(
"{}.command".format(endpoint_name), service="{}.{}".format(pin.service, endpoint_name), span_type=SpanTypes.HTTP
) as span:
span.set_tag(SPAN_MEASURED_KEY)
operation = None
if args:
operation = get_argument_value(args, kwargs, 0, "operation_name")
# DEV: join is the fastest way of concatenating strings that is compatible
# across Python versions (see
# https://stackoverflow.com/questions/1316887/what-is-the-most-efficient-string-concatenation-method-in-python)
span.resource = ".".join((endpoint_name, operation.lower()))
if config.botocore["distributed_tracing"]:
if endpoint_name == "lambda" and operation == "Invoke":
inject_trace_to_client_context(args, span)
if endpoint_name == "sqs" and operation == "SendMessage":
inject_trace_to_sqs_message(args, span)
if endpoint_name == "sqs" and operation == "SendMessageBatch":
inject_trace_to_sqs_batch_message(args, span)
else:
span.resource = endpoint_name
aws.add_span_arg_tags(span, endpoint_name, args, ARGS_NAME, TRACED_ARGS)
region_name = deep_getattr(instance, "meta.region_name")
span._set_str_tag("aws.agent", "botocore")
if operation is not None:
span._set_str_tag("aws.operation", operation)
if region_name is not None:
span._set_str_tag("aws.region", region_name)
result = original_func(*args, **kwargs)
response_meta = result.get("ResponseMetadata")
if response_meta:
if "HTTPStatusCode" in response_meta:
span.set_tag(http.STATUS_CODE, response_meta["HTTPStatusCode"])
if "RetryAttempts" in response_meta:
span.set_tag("retry_attempts", response_meta["RetryAttempts"])
if "RequestId" in response_meta:
span.set_tag("aws.requestid", response_meta["RequestId"])
# set analytics sample rate
span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.botocore.get_analytics_sample_rate())
return result
| 35.282486
| 123
| 0.692074
|
aad1f0fbeb3828656409050b9b897c1a3ff5e384
| 4,211
|
py
|
Python
|
foolbox/models/mxnet_gluon.py
|
mkyybx/foolbox
|
00b2dcc5ed30b12f28431e9dabe4d2bbc214d444
|
[
"MIT"
] | 4
|
2021-11-12T04:06:32.000Z
|
2022-01-27T09:01:41.000Z
|
foolbox/models/mxnet_gluon.py
|
pige2nd/foolbox
|
2daabba8355afce9dfbec3de8d71dadadcfbd10b
|
[
"MIT"
] | 1
|
2022-02-22T14:00:59.000Z
|
2022-02-25T08:57:29.000Z
|
foolbox/models/mxnet_gluon.py
|
pige2nd/foolbox
|
2daabba8355afce9dfbec3de8d71dadadcfbd10b
|
[
"MIT"
] | 2
|
2020-11-27T00:03:48.000Z
|
2020-11-27T00:08:04.000Z
|
from __future__ import absolute_import
from .base import DifferentiableModel
import numpy as np
class MXNetGluonModel(DifferentiableModel):
"""Creates a :class:`Model` instance from an existing `MXNet Gluon` Block.
Parameters
----------
block : `mxnet.gluon.Block`
The Gluon Block representing the model to be run.
ctx : `mxnet.context.Context`
The device, e.g. mxnet.cpu() or mxnet.gpu().
num_classes : int
The number of classes.
bounds : tuple
Tuple of lower and upper bound for the pixel values, usually
(0, 1) or (0, 255).
channel_axis : int
The index of the axis that represents color channels.
preprocessing: 2-element tuple with floats or numpy arrays
Elementwises preprocessing of input; we first subtract the first
element of preprocessing from the input and then divide the input by
the second element.
"""
def __init__(
self,
block,
bounds,
num_classes,
ctx=None,
channel_axis=1,
preprocessing=(0, 1)):
import mxnet as mx
self._num_classes = num_classes
if ctx is None:
ctx = mx.cpu()
super(MXNetGluonModel, self).__init__(
bounds=bounds,
channel_axis=channel_axis,
preprocessing=preprocessing)
self._device = ctx
self._block = block
def num_classes(self):
return self._num_classes
def forward(self, inputs):
import mxnet as mx
inputs, _ = self._process_input(inputs)
data_array = mx.nd.array(inputs, ctx=self._device)
data_array.attach_grad()
with mx.autograd.record(train_mode=False):
L = self._block(data_array)
return L.asnumpy()
def forward_and_gradient_one(self, x, label):
import mxnet as mx
x, dpdx = self._process_input(x)
label = mx.nd.array([label], ctx=self._device)
data_array = mx.nd.array(x[np.newaxis], ctx=self._device)
data_array.attach_grad()
with mx.autograd.record(train_mode=False):
logits = self._block(data_array)
loss = mx.nd.softmax_cross_entropy(logits, label)
loss.backward(train_mode=False)
predictions = np.squeeze(logits.asnumpy(), axis=0)
gradient = np.squeeze(data_array.grad.asnumpy(), axis=0)
gradient = self._process_gradient(dpdx, gradient)
return predictions, gradient
def gradient(self, inputs, labels):
import mxnet as mx
inputs, dpdx = self._process_input(inputs)
inputs = mx.nd.array(inputs, ctx=self._device)
labels = mx.nd.array(labels, ctx=self._device)
inputs.attach_grad()
with mx.autograd.record(train_mode=False):
logits = self._block(inputs)
loss = mx.nd.softmax_cross_entropy(logits, labels)
loss.backward(train_mode=False)
gradient = inputs.grad.asnumpy()
gradient = self._process_gradient(dpdx, gradient)
return gradient
def _loss_fn(self, x, label):
import mxnet as mx
x, _ = self._process_input(x)
label = mx.nd.array([label], ctx=self._device)
data_array = mx.nd.array(x[np.newaxis], ctx=self._device)
data_array.attach_grad()
with mx.autograd.record(train_mode=False):
logits = self._block(data_array)
loss = mx.nd.softmax_cross_entropy(logits, label)
loss.backward(train_mode=False)
return loss.asnumpy()
def backward(self, gradient, inputs):
# lazy import
import mxnet as mx
assert gradient.ndim == 2
inputs, dpdx = self._process_input(inputs)
inputs = mx.nd.array(inputs, ctx=self._device)
gradient = mx.nd.array(gradient, ctx=self._device)
inputs.attach_grad()
with mx.autograd.record(train_mode=False):
logits = self._block(inputs)
assert gradient.shape == logits.shape
logits.backward(gradient, train_mode=False)
gradient = inputs.grad.asnumpy()
gradient = self._process_gradient(dpdx, gradient)
return gradient
| 34.516393
| 78
| 0.627642
|
8b0e5a1f263db58638424bb478a4a8134c61a03c
| 952
|
py
|
Python
|
dummy_settings.py
|
AnselmC/connection-tester
|
9a53b11e54be982d883c2db69613376a4b15aa7a
|
[
"MIT"
] | null | null | null |
dummy_settings.py
|
AnselmC/connection-tester
|
9a53b11e54be982d883c2db69613376a4b15aa7a
|
[
"MIT"
] | null | null | null |
dummy_settings.py
|
AnselmC/connection-tester
|
9a53b11e54be982d883c2db69613376a4b15aa7a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
sender = 'your.adress@host.com' # Your address
password = '#s0oPerSTr0nGpä$$\\/\\/ord' # Your password
server_name = 'mail.host.com' # Your mail server address
server_port = 587 # Your mail server port
ISP = 'customer_service@isp.com' # Your ISP customer service address
contract_no = '1234' # The ID of your contract
subject = 'Notification concerning low bandwidth'
body = 'Dear customer support,\n' + \
'This is an automated message informing you about ' + \
'underperforming bandwidth.\n My advertised up- and ' + \
'download speeds for contract {contract_no} are {up} Mbps and ' +\
'{down} Mbps, respectively.\n' + \
'However, {runs} speed tests conducted between {start} and {end} ' + \
'showed average speeds of {avg_up} Mbps and {avg_down} Mbps, ' + \
'respectively. Attached you can find a time sequence.\n' + \
'Please resolve this issue in a timely matter.'
| 52.888889
| 74
| 0.683824
|
825806276de0276aaa7cf46bb0f739e225fa3f54
| 26,056
|
py
|
Python
|
osx/devkit/plug-ins/scripted/swissArmyManip.py
|
leegoonz/Maya-devkit
|
b81fe799b58e854e4ef16435426d60446e975871
|
[
"ADSL"
] | 10
|
2018-03-30T16:09:02.000Z
|
2021-12-07T07:29:19.000Z
|
osx/devkit/plug-ins/scripted/swissArmyManip.py
|
leegoonz/Maya-devkit
|
b81fe799b58e854e4ef16435426d60446e975871
|
[
"ADSL"
] | null | null | null |
osx/devkit/plug-ins/scripted/swissArmyManip.py
|
leegoonz/Maya-devkit
|
b81fe799b58e854e4ef16435426d60446e975871
|
[
"ADSL"
] | 9
|
2018-06-02T09:18:49.000Z
|
2021-12-20T09:24:35.000Z
|
#-
# ==========================================================================
# Copyright (C) 1995 - 2006 Autodesk, Inc. and/or its licensors. All
# rights reserved.
#
# The coded instructions, statements, computer programs, and/or related
# material (collectively the "Data") in these files contain unpublished
# information proprietary to Autodesk, Inc. ("Autodesk") and/or its
# licensors, which is protected by U.S. and Canadian federal copyright
# law and by international treaties.
#
# The Data is provided for use exclusively by You. You have the right
# to use, modify, and incorporate this Data into other products for
# purposes authorized by the Autodesk software license agreement,
# without fee.
#
# The copyright notices in the Software and this entire statement,
# including the above license grant, this restriction and the
# following disclaimer, must be included in all copies of the
# Software, in whole or in part, and all derivative works of
# the Software, unless such copies or derivative works are solely
# in the form of machine-executable object code generated by a
# source language processor.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND.
# AUTODESK DOES NOT MAKE AND HEREBY DISCLAIMS ANY EXPRESS OR IMPLIED
# WARRANTIES INCLUDING, BUT NOT LIMITED TO, THE WARRANTIES OF
# NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR
# PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE, OR
# TRADE PRACTICE. IN NO EVENT WILL AUTODESK AND/OR ITS LICENSORS
# BE LIABLE FOR ANY LOST REVENUES, DATA, OR PROFITS, OR SPECIAL,
# DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES, EVEN IF AUTODESK
# AND/OR ITS LICENSORS HAS BEEN ADVISED OF THE POSSIBILITY
# OR PROBABILITY OF SUCH DAMAGES.
#
# ==========================================================================
#+
#
# Autodesk Script File
# MODIFY THIS AT YOUR OWN RISK
#
# Creation Date: 27 September 2006
#
# swissArmyManip.py
#
# This plug-in is an example of a user-defined manipulator,
# which is comprised of a variety of the base manipulators:
# - MFnCircleSweepManip
# - MFnDirectionManip
# - MFnDiscManip
# - MFnDistanceManip
# - MFnFreePointTriadManip
# - MFnStateManip
# - MFnToggleManip
# - MFnRotateManip
# - MFnScaleManip
#
# To use this plug-in:
#
# import maya.cmds as cmds
# cmds.createNode("spSwissArmyLocator")
#
# click on the showManipTool
#
import maya.OpenMaya as OpenMaya
import maya.OpenMayaUI as OpenMayaUI
import maya.OpenMayaRender as OpenMayaRender
import maya.OpenMayaMPx as OpenMayaMPx
import math,sys
glRenderer = OpenMayaRender.MHardwareRenderer.theRenderer()
glFT = glRenderer.glFunctionTable()
kSwissArmyLocatorName = "spSwissArmyLocator"
kSwissArmyLocatorId = OpenMaya.MTypeId(0x87006)
kSwissArmyLocatorManipName = "spSwissArmyLocatorManip"
kSwissArmyLocatorManipId = OpenMaya.MTypeId(0x87007)
delta1 = 0.01
delta2 = 0.02
delta3 = 0.03
delta4 = 0.04
# Locator Data
centre = [ [ 0.10, 0.0, 0.10 ],
[ 0.10, 0.0, -0.10 ],
[ -0.10, 0.0, -0.10 ],
[ -0.10, 0.0, 0.10 ],
[ 0.10, 0.0, 0.10 ] ]
state1 = [ [ 1.00, 0.0, 1.00 ],
[ 1.00, 0.0, 0.50 ],
[ 0.50, 0.0, 0.50 ],
[ 0.50, 0.0, 1.00 ],
[ 1.00, 0.0, 1.00 ] ]
state2 = [ [ 1.00, 0.0, -1.00 ],
[ 1.00, 0.0, -0.50 ],
[ 0.50, 0.0, -0.50 ],
[ 0.50, 0.0, -1.00 ],
[ 1.00, 0.0, -1.00 ] ]
state3 = [ [ -1.00, 0.0, -1.00 ],
[ -1.00, 0.0, -0.50 ],
[ -0.50, 0.0, -0.50 ],
[ -0.50, 0.0, -1.00 ],
[ -1.00, 0.0, -1.00 ] ]
state4 = [ [ -1.00, 0.0, 1.00 ],
[ -1.00, 0.0, 0.50 ],
[ -0.50, 0.0, 0.50 ],
[ -0.50, 0.0, 1.00 ],
[ -1.00, 0.0, 1.00 ] ]
arrow1 = [ [ 0.00, 0.0, 1.00 ],
[ 0.10, 0.0, 0.20 ],
[ -0.10, 0.0, 0.20 ],
[ 0.00, 0.0, 1.00 ] ]
arrow2 = [ [ 1.00, 0.0, 0.00 ],
[ 0.20, 0.0, 0.10 ],
[ 0.20, 0.0, -0.10 ],
[ 1.00, 0.0, 0.00 ] ]
arrow3 = [ [ 0.00, 0.0, -1.00 ],
[ 0.10, 0.0, -0.20 ],
[ -0.10, 0.0, -0.20 ],
[ 0.00, 0.0, -1.00 ] ]
arrow4 = [ [ -1.00, 0.0, 0.00 ],
[ -0.20, 0.0, 0.10 ],
[ -0.20, 0.0, -0.10 ],
[ -1.00, 0.0, 0.00 ] ]
perimeter=[ [ 1.10, 0.0, 1.10 ],
[ 1.10, 0.0, -1.10 ],
[ -1.10, 0.0, -1.10 ],
[ -1.10, 0.0, 1.10 ],
[ 1.10, 0.0, 1.10 ] ]
kCentreCount = 5
kState1Count = 5
kState2Count = 5
kState3Count = 5
kState4Count = 5
kArrow1Count = 4
kArrow2Count = 4
kArrow3Count = 4
kArrow4Count = 4
kPerimeterCount = 5
########################################################################
########################################################################
class swissArmyLocatorManip(OpenMayaMPx.MPxManipContainer):
def __init__(self):
OpenMayaMPx.MPxManipContainer.__init__(self)
self.fCircleSweepManip = OpenMaya.MDagPath()
self.fDirectionManip = OpenMaya.MDagPath()
self.fDiscManip = OpenMaya.MDagPath()
self.fDistanceManip = OpenMaya.MDagPath()
self.fFreePointTriadManip = OpenMaya.MDagPath()
self.fStateManip = OpenMaya.MDagPath()
self.fToggleManip = OpenMaya.MDagPath()
self.fRotateManip = OpenMaya.MDagPath()
self.fScaleManip = OpenMaya.MDagPath()
self.fNodePath = OpenMaya.MDagPath()
def createChildren(self):
# FreePointTriadManip
self.fFreePointTriadManip = self.addFreePointTriadManip("freePointTriadManip", "point")
freePointTriadManipFn = OpenMayaUI.MFnFreePointTriadManip(self.fFreePointTriadManip)
# DirectionManip
self.fDirectionManip = self.addDirectionManip("directionManip", "direction")
directionManipFn = OpenMayaUI.MFnDirectionManip(self.fDirectionManip)
# ToggleManip
self.fToggleManip = self.addToggleManip("toggleManip", "toggle")
toggleManipFn = OpenMayaUI.MFnToggleManip(self.fToggleManip)
# StateManip
self.fStateManip = self.addStateManip("stateManip", "state")
stateManipFn = OpenMayaUI.MFnStateManip(self.fStateManip)
# DiscManip
self.fDiscManip = self.addDiscManip("discManip", "angle")
discManipFn = OpenMayaUI.MFnDiscManip(self.fDiscManip)
# CircleSweepManip
self.fCircleSweepManip = self.addCircleSweepManip("circleSweepManip", "angle")
circleSweepManipFn = OpenMayaUI.MFnCircleSweepManip(self.fCircleSweepManip)
circleSweepManipFn.setCenterPoint(OpenMaya.MPoint(0, 0, 0))
circleSweepManipFn.setNormal(OpenMaya.MVector(0, 1, 0))
circleSweepManipFn.setRadius(2.0)
circleSweepManipFn.setDrawAsArc(True)
# DistanceManip
self.fDistanceManip = self.addDistanceManip("distanceManip", "distance")
distanceManipFn = OpenMayaUI.MFnDistanceManip(self.fDistanceManip)
distanceManipFn.setStartPoint(OpenMaya.MPoint(0, 0, 0))
distanceManipFn.setDirection(OpenMaya.MVector(0, 1, 0))
# RotateManip
self.fRotateManip = self.addRotateManip("RotateManip", "rotation")
rotateManipFn = OpenMayaUI.MFnRotateManip(self.fRotateManip)
# ScaleManip
self.fScaleManip = self.addScaleManip("scaleManip", "scale")
scaleManipFn = OpenMayaUI.MFnScaleManip(self.fScaleManip)
def connectToDependNode(self, node):
# Get the DAG path
dagNodeFn = OpenMaya.MFnDagNode(node)
dagNodeFn.getPath(self.fNodePath)
parentNode = dagNodeFn.parent(0)
parentNodeFn = OpenMaya.MFnDagNode(parentNode)
# Connect the plugs
nodeFn = OpenMaya.MFnDependencyNode()
nodeFn.setObject(node)
# FreePointTriadManip
freePointTriadManipFn = OpenMayaUI.MFnFreePointTriadManip(self.fFreePointTriadManip)
try:
translationPlug = parentNodeFn.findPlug("t")
freePointTriadManipFn.connectToPointPlug(translationPlug)
except:
pass
# DirectionManip
directionManipFn = OpenMayaUI.MFnDirectionManip()
directionManipFn.setObject(self.fDirectionManip)
try:
directionPlug = nodeFn.findPlug("arrow2Direction")
directionManipFn.connectToDirectionPlug(directionPlug)
startPointIndex = directionManipFn.startPointIndex()
self.addPlugToManipConversion(startPointIndex)
except:
pass
# DistanceManip
distanceManipFn = OpenMayaUI.MFnDistanceManip()
distanceManipFn.setObject(self.fDistanceManip)
try:
sizePlug = nodeFn.findPlug("size")
distanceManipFn.connectToDistancePlug(sizePlug)
startPointIndex = distanceManipFn.startPointIndex()
self.addPlugToManipConversion(startPointIndex)
except:
pass
# CircleSweepManip
circleSweepManipFn = OpenMayaUI.MFnCircleSweepManip(self.fCircleSweepManip)
try:
arrow1AnglePlug = nodeFn.findPlug("arrow1Angle")
circleSweepManipFn.connectToAnglePlug(arrow1AnglePlug)
centerIndex = circleSweepManipFn.centerIndex()
self.addPlugToManipConversion(centerIndex)
except:
pass
# DiscManip
discManipFn = OpenMayaUI.MFnDiscManip(self.fDiscManip)
try:
arrow3AnglePlug = nodeFn.findPlug("arrow3Angle")
discManipFn.connectToAnglePlug(arrow3AnglePlug)
centerIndex = discManipFn.centerIndex()
self.addPlugToManipConversion(centerIndex)
except:
pass
# StateManip
stateManipFn = OpenMayaUI.MFnStateManip(self.fStateManip)
try:
statePlug = nodeFn.findPlug("state")
stateManipFn.connectToStatePlug(statePlug)
positionIndex = stateManipFn.positionIndex()
self.addPlugToManipConversion(positionIndex)
except:
pass
# ToggleManip
toggleManipFn = OpenMayaUI.MFnToggleManip(self.fToggleManip)
try:
togglePlug = nodeFn.findPlug("toggle")
toggleManipFn.connectToTogglePlug(togglePlug)
startPointIndex = toggleManipFn.startPointIndex()
self.addPlugToManipConversion(startPointIndex)
except:
pass
# Determine the transform node for the locator
transformPath = OpenMaya.MDagPath(self.fNodePath)
transformPath.pop()
transformNode = OpenMaya.MFnTransform(transformPath)
# RotateManip
rotateManipFn = OpenMayaUI.MFnRotateManip(self.fRotateManip)
try:
rotatePlug = transformNode.findPlug("rotate")
rotateManipFn.connectToRotationPlug(rotatePlug)
rotateManipFn.displayWithNode(node)
except:
pass
# ScaleManip
scaleManipFn = OpenMayaUI.MFnScaleManip(self.fScaleManip)
try:
scalePlug = transformNode.findPlug("scale")
scaleManipFn.connectToScalePlug(scalePlug)
scaleManipFn.displayWithNode(node)
except:
pass
self.finishAddingManips()
OpenMayaMPx.MPxManipContainer.connectToDependNode(self, node)
def draw(self, view, path, style, status):
OpenMayaMPx.MPxManipContainer.draw(self, view, path, style, status)
view.beginGL()
textPos = OpenMaya.MPoint(self.nodeTranslation())
view.drawText("Swiss Army Manipulator", textPos, OpenMayaUI.M3dView.kLeft)
view.endGL()
def plugToManipConversion(self, theIndex):
numData = OpenMaya.MFnNumericData()
numDataObj = numData.create(OpenMaya.MFnNumericData.k3Float)
vec = self.nodeTranslation()
numData.setData3Float(vec.x, vec.y, vec.z)
manipData = OpenMayaUI.MManipData(numDataObj)
return manipData
def nodeTranslation(self):
dagFn = OpenMaya.MFnDagNode(self.fNodePath)
path = OpenMaya.MDagPath()
dagFn.getPath(path)
path.pop() # pop from the shape to the transform
transformFn = OpenMaya.MFnTransform(path)
return transformFn.getTranslation(OpenMaya.MSpace.kWorld)
########################################################################
########################################################################
class swissArmyLocator(OpenMayaMPx.MPxLocatorNode):
aSize = OpenMaya.MObject() # The size of the locator
aPoint = OpenMaya.MObject()
aPointX = OpenMaya.MObject()
aPointY = OpenMaya.MObject()
aPointZ = OpenMaya.MObject()
aArrow1Angle = OpenMaya.MObject()
aArrow2Direction = OpenMaya.MObject()
aArrow2DirectionX = OpenMaya.MObject()
aArrow2DirectionY = OpenMaya.MObject()
aArrow2DirectionZ = OpenMaya.MObject()
aArrow3Angle = OpenMaya.MObject()
aArrow4Distance = OpenMaya.MObject()
aState = OpenMaya.MObject()
aToggle = OpenMaya.MObject()
def __init__(self):
OpenMayaMPx.MPxLocatorNode.__init__(self)
def compute(self, plug, data):
return OpenMaya.kUnknownParameter
def draw(self, view, path, style, status):
# Get the size
thisNode = self.thisMObject()
plug = OpenMaya.MPlug(thisNode, swissArmyLocator.aSize)
sizeVal = plug.asMDistance()
arrow1AnglePlug = OpenMaya.MPlug(thisNode, swissArmyLocator.aArrow1Angle)
arrow1Angle = arrow1AnglePlug.asMAngle()
angle1 = -arrow1Angle.asRadians() - 3.1415927/2.0
arrow3AnglePlug = OpenMaya.MPlug(thisNode, swissArmyLocator.aArrow3Angle)
arrow3Angle = arrow3AnglePlug.asMAngle()
angle3 = arrow3Angle.asRadians()
statePlug = OpenMaya.MPlug(thisNode, swissArmyLocator.aState)
state = statePlug.asInt()
togglePlug = OpenMaya.MPlug(thisNode, swissArmyLocator.aToggle)
toggle = togglePlug.asBool()
directionXPlug = OpenMaya.MPlug(thisNode, swissArmyLocator.aArrow2DirectionX)
directionYPlug = OpenMaya.MPlug(thisNode, swissArmyLocator.aArrow2DirectionY)
directionZPlug = OpenMaya.MPlug(thisNode, swissArmyLocator.aArrow2DirectionZ)
dirX = directionXPlug.asDouble()
dirY = directionYPlug.asDouble()
dirZ = directionZPlug.asDouble()
angle2 = math.atan2(dirZ, dirX)
angle2 += 3.1415927
multiplier = sizeVal.asCentimeters()
view.beginGL()
if ((style == OpenMayaUI.M3dView.kFlatShaded) or
(style == OpenMayaUI.M3dView.kGouraudShaded)):
# Push the color settings
glFT.glPushAttrib(OpenMayaRender.MGL_CURRENT_BIT)
if (status == OpenMayaUI.M3dView.kActive):
view.setDrawColor(13, OpenMayaUI.M3dView.kActiveColors)
else:
view.setDrawColor(13, OpenMayaUI.M3dView.kDormantColors)
if (toggle):
if (status == OpenMayaUI.M3dView.kActive):
view.setDrawColor(15, OpenMayaUI.M3dView.kActiveColors)
else:
view.setDrawColor(15, OpenMayaUI.M3dView.kDormantColors)
glFT.glBegin(OpenMayaRender.MGL_TRIANGLE_FAN)
last = kCentreCount - 1
for i in range(last):
glFT.glVertex3f(centre[i][0] * multiplier,
centre[i][1] * multiplier,
centre[i][2] * multiplier)
glFT.glEnd()
if (state == 0):
if (status == OpenMayaUI.M3dView.kActive):
view.setDrawColor(19, OpenMayaUI.M3dView.kActiveColors)
else:
view.setDrawColor(19, OpenMayaUI.M3dView.kDormantColors)
glFT.glBegin(OpenMayaRender.MGL_TRIANGLE_FAN)
last = kState1Count - 1
for i in range(last):
glFT.glVertex3f(state1[i][0] * multiplier,
state1[i][1] * multiplier,
state1[i][2] * multiplier)
glFT.glEnd()
if (state == 1):
if (status == OpenMayaUI.M3dView.kActive):
view.setDrawColor(21, OpenMayaUI.M3dView.kActiveColors)
else:
view.setDrawColor(21, OpenMayaUI.M3dView.kDormantColors)
glFT.glBegin(OpenMayaRender.MGL_TRIANGLE_FAN)
last = kState2Count - 1
for i in range(last):
glFT.glVertex3f(state2[i][0] * multiplier,
state2[i][1] * multiplier,
state2[i][2] * multiplier)
glFT.glEnd()
if (state == 2):
if (status == OpenMayaUI.M3dView.kActive):
view.setDrawColor(18, OpenMayaUI.M3dView.kActiveColors)
else:
view.setDrawColor(18, OpenMayaUI.M3dView.kDormantColors)
glFT.glBegin(OpenMayaRender.MGL_TRIANGLE_FAN)
last = kState3Count - 1
for i in range(last):
glFT.glVertex3f(state3[i][0] * multiplier,
state3[i][1] * multiplier,
state3[i][2] * multiplier)
glFT.glEnd()
if (state == 3):
if (status == OpenMayaUI.M3dView.kActive):
view.setDrawColor(17, OpenMayaUI.M3dView.kActiveColors)
else:
view.setDrawColor(17, OpenMayaUI.M3dView.kDormantColors)
glFT.glBegin(OpenMayaRender.MGL_TRIANGLE_FAN)
last = kState4Count - 1
for i in range(last):
glFT.glVertex3f(state4[i][0] * multiplier,
state4[i][1] * multiplier,
state4[i][2] * multiplier)
glFT.glEnd()
if (status == OpenMayaUI.M3dView.kActive):
view.setDrawColor(12, OpenMayaUI.M3dView.kActiveColors)
else:
view.setDrawColor(12, OpenMayaUI.M3dView.kDormantColors)
glFT.glBegin(OpenMayaRender.MGL_TRIANGLE_FAN)
last = kArrow1Count - 1
for i in range(last):
glFT.glVertex3f((-arrow1[i][0] * multiplier * math.cos(angle1) - arrow1[i][2] * multiplier * math.sin(angle1)),
(arrow1[i][1] * multiplier + delta1),
(arrow1[i][2] * multiplier * math.cos(angle1) - arrow1[i][0] * multiplier * math.sin(angle1)))
glFT.glEnd()
if (status == OpenMayaUI.M3dView.kActive):
view.setDrawColor(16, OpenMayaUI.M3dView.kActiveColors)
else:
view.setDrawColor(16, OpenMayaUI.M3dView.kDormantColors)
glFT.glBegin(OpenMayaRender.MGL_TRIANGLE_FAN)
last = kArrow2Count - 1
for i in range(last):
glFT.glVertex3f((-arrow2[i][0] * multiplier * math.cos(angle2) - arrow2[i][2] * multiplier * math.sin(angle2)),
(arrow2[i][1] * multiplier + delta2),
(arrow2[i][2] * multiplier * math.cos(angle2) - arrow2[i][0] * multiplier * math.sin(angle2)))
glFT.glEnd()
if (status == OpenMayaUI.M3dView.kActive):
view.setDrawColor(13, OpenMayaUI.M3dView.kActiveColors)
else:
view.setDrawColor(13, OpenMayaUI.M3dView.kDormantColors)
glFT.glBegin(OpenMayaRender.MGL_TRIANGLE_FAN)
last = kArrow3Count - 1
for i in range(last):
glFT.glVertex3f((-arrow3[i][0] * multiplier * math.cos(angle3) - arrow3[i][2] * multiplier * math.sin(angle3)),
(arrow3[i][1] * multiplier + delta3),
(arrow3[i][2] * multiplier * math.cos(angle3) - arrow3[i][0] * multiplier * math.sin(angle3)))
glFT.glEnd()
if (status == OpenMayaUI.M3dView.kActive):
view.setDrawColor(5, OpenMayaUI.M3dView.kActiveColors)
else:
view.setDrawColor(5, OpenMayaUI.M3dView.kDormantColors)
glFT.glBegin(OpenMayaRender.MGL_TRIANGLE_FAN)
last = kArrow4Count - 1
for i in range(last):
glFT.glVertex3f((arrow4[i][0] * multiplier),
(arrow4[i][1] * multiplier + delta4),
(arrow4[i][2] * multiplier))
glFT.glEnd()
glFT.glPopAttrib()
# Draw the outline of the locator
glFT.glBegin(OpenMayaRender.MGL_LINES)
if toggle:
last = kCentreCount - 1
for i in range(last):
glFT.glVertex3f(centre[i][0] * multiplier,
centre[i][1] * multiplier,
centre[i][2] * multiplier)
glFT.glVertex3f(centre[i+1][0] * multiplier,
centre[i+1][1] * multiplier,
centre[i+1][2] * multiplier)
if (state == 0):
last = kState1Count - 1
for i in range(last):
glFT.glVertex3f(state1[i][0] * multiplier,
state1[i][1] * multiplier,
state1[i][2] * multiplier)
glFT.glVertex3f(state1[i+1][0] * multiplier,
state1[i+1][1] * multiplier,
state1[i+1][2] * multiplier)
if (state == 1):
last = kState2Count - 1
for i in range(last):
glFT.glVertex3f(state2[i][0] * multiplier,
state2[i][1] * multiplier,
state2[i][2] * multiplier)
glFT.glVertex3f(state2[i+1][0] * multiplier,
state2[i+1][1] * multiplier,
state2[i+1][2] * multiplier)
if (state == 2):
last = kState3Count - 1
for i in range(last):
glFT.glVertex3f(state3[i][0] * multiplier,
state3[i][1] * multiplier,
state3[i][2] * multiplier)
glFT.glVertex3f(state3[i+1][0] * multiplier,
state3[i+1][1] * multiplier,
state3[i+1][2] * multiplier)
if (state == 3):
last = kState4Count - 1
for i in range(last):
glFT.glVertex3f(state4[i][0] * multiplier,
state4[i][1] * multiplier,
state4[i][2] * multiplier)
glFT.glVertex3f(state4[i+1][0] * multiplier,
state4[i+1][1] * multiplier,
state4[i+1][2] * multiplier)
last = kArrow1Count - 1
for i in range(last):
glFT.glVertex3f((-arrow1[i][0] * multiplier * math.cos(angle1) - arrow1[i][2] * multiplier * math.sin(angle1)),
(arrow1[i][1] * multiplier + delta1),
(arrow1[i][2] * multiplier * math.cos(angle1) - arrow1[i][0] * multiplier * math.sin(angle1)))
glFT.glVertex3f((-arrow1[i+1][0] * multiplier * math.cos(angle1) - arrow1[i+1][2] * multiplier * math.sin(angle1)),
(arrow1[i+1][1] * multiplier + delta1),
(arrow1[i+1][2] * multiplier * math.cos(angle1) - arrow1[i+1][0] * multiplier * math.sin(angle1)))
last = kArrow2Count - 1
for i in range(last):
glFT.glVertex3f((-arrow2[i][0] * multiplier * math.cos(angle2) - arrow2[i][2] * multiplier * math.sin(angle2)),
(arrow2[i][1] * multiplier + delta2),
(arrow2[i][2] * multiplier * math.cos(angle2) - arrow2[i][0] * multiplier * math.sin(angle2)))
glFT.glVertex3f((-arrow2[i+1][0] * multiplier * math.cos(angle2) - arrow2[i+1][2] * multiplier * math.sin(angle2)),
(arrow2[i+1][1] * multiplier + delta2),
(arrow2[i+1][2] * multiplier * math.cos(angle2) - arrow2[i+1][0] * multiplier * math.sin(angle2)))
last = kArrow3Count - 1
for i in range(last):
glFT.glVertex3f((-arrow3[i][0] * multiplier * math.cos(angle3) - arrow3[i][2] * multiplier * math.sin(angle3)),
(arrow3[i][1] * multiplier + delta3),
(arrow3[i][2] * multiplier * math.cos(angle3) - arrow3[i][0] * multiplier * math.sin(angle3)))
glFT.glVertex3f((-arrow3[i+1][0] * multiplier * math.cos(angle3) - arrow3[i+1][2] * multiplier * math.sin(angle3)),
(arrow3[i+1][1] * multiplier + delta3),
(arrow3[i+1][2] * multiplier * math.cos(angle3) - arrow3[i+1][0] * multiplier * math.sin(angle3)))
last = kArrow4Count - 1
for i in range(last):
glFT.glVertex3f((arrow4[i][0] * multiplier),
(arrow4[i][1] * multiplier + delta4),
(arrow4[i][2] * multiplier))
glFT.glVertex3f((arrow4[i+1][0] * multiplier),
(arrow4[i+1][1] * multiplier + delta4),
(arrow4[i+1][2] * multiplier))
last = kPerimeterCount - 1
for i in range(last):
glFT.glVertex3f(perimeter[i][0] * multiplier,
perimeter[i][1] * multiplier,
perimeter[i][2] * multiplier)
glFT.glVertex3f(perimeter[i+1][0] * multiplier,
perimeter[i+1][1] * multiplier,
perimeter[i+1][2] * multiplier)
glFT.glEnd()
view.endGL()
def isBounded(self):
return True
def boundingBox(self):
thisNode = self.thisMObject()
plug = OpenMaya.MPlug(thisNode, swissArmyLocator.aSize)
sizeVal = plug.asMDistance()
multiplier = sizeVal.asCentimeters()
corner1 = OpenMaya.MPoint(-1.1, 0.0, -1.1)
corner2 = OpenMaya.MPoint(1.1, 0.0, 1.1)
corner1 = corner1 * multiplier
corner2 = corner2 * multiplier
return OpenMaya.MBoundingBox(corner1, corner2)
########################################################################
########################################################################
def locatorCreator():
return OpenMayaMPx.asMPxPtr(swissArmyLocator())
def locatorInit():
unitFn = OpenMaya.MFnUnitAttribute()
numericFn = OpenMaya.MFnNumericAttribute()
# aSize
swissArmyLocator.aSize = unitFn.create("size", "sz", OpenMaya.MFnUnitAttribute.kDistance, 10.0)
unitFn.setStorable(True)
unitFn.setWritable(True)
# aPoint
swissArmyLocator.aPointX = numericFn.create("pointX", "ptx", OpenMaya.MFnNumericData.kDouble, 0.0)
swissArmyLocator.aPointY = numericFn.create("pointY", "pty", OpenMaya.MFnNumericData.kDouble, 0.0)
swissArmyLocator.aPointZ = numericFn.create("pointZ", "ptz", OpenMaya.MFnNumericData.kDouble, 0.0)
swissArmyLocator.aPoint = numericFn.create("point", "pt", swissArmyLocator.aPointX, swissArmyLocator.aPointY, swissArmyLocator.aPointZ)
# aArrow1Angle
swissArmyLocator.aArrow1Angle = unitFn.create("arrow1Angle", "a1a", OpenMaya.MFnUnitAttribute.kAngle, 0.0)
# aArrow2Direction
swissArmyLocator.aArrow2DirectionX = numericFn.create("arrow2DirectionX", "a2x", OpenMaya.MFnNumericData.kDouble, 1.0)
swissArmyLocator.aArrow2DirectionY = numericFn.create("arrow2DirectionY", "a2y", OpenMaya.MFnNumericData.kDouble, 0.0)
swissArmyLocator.aArrow2DirectionZ = numericFn.create("arrow2DirectionZ", "a2z", OpenMaya.MFnNumericData.kDouble, 0.0)
swissArmyLocator.aArrow2Direction = numericFn.create("arrow2Direction", "dir", swissArmyLocator.aArrow2DirectionX, swissArmyLocator.aArrow2DirectionY, swissArmyLocator.aArrow2DirectionZ)
# aArrow3Angle
swissArmyLocator.aArrow3Angle = unitFn.create("arrow3Angle", "a3a", OpenMaya.MFnUnitAttribute.kAngle, 0.0)
# aArrow4Distance
swissArmyLocator.aArrow4Distance = unitFn.create("arrow2Distance", "dis", OpenMaya.MFnUnitAttribute.kDistance, 0.0)
# aState
swissArmyLocator.aState = numericFn.create("state", "s", OpenMaya.MFnNumericData.kLong, 0)
# aToggle
swissArmyLocator.aToggle = numericFn.create("toggle", "t", OpenMaya.MFnNumericData.kBoolean, False)
swissArmyLocator.addAttribute(swissArmyLocator.aPoint)
swissArmyLocator.addAttribute(swissArmyLocator.aArrow1Angle)
swissArmyLocator.addAttribute(swissArmyLocator.aArrow2Direction)
swissArmyLocator.addAttribute(swissArmyLocator.aArrow3Angle)
swissArmyLocator.addAttribute(swissArmyLocator.aArrow4Distance)
swissArmyLocator.addAttribute(swissArmyLocator.aState)
swissArmyLocator.addAttribute(swissArmyLocator.aToggle)
swissArmyLocator.addAttribute(swissArmyLocator.aSize)
OpenMayaMPx.MPxManipContainer.addToManipConnectTable(kSwissArmyLocatorId)
def locatorManipCreator():
return OpenMayaMPx.asMPxPtr(swissArmyLocatorManip())
def locatorManipInit():
OpenMayaMPx.MPxManipContainer.initialize()
# initialize the script plug-in
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject, "Autodesk", "1.0", "Any")
try:
mplugin.registerNode(kSwissArmyLocatorName,
kSwissArmyLocatorId,
locatorCreator,
locatorInit,
OpenMayaMPx.MPxNode.kLocatorNode)
except:
print "Failed to register context command: %s" % kSwissArmyLocatorName
raise
try:
mplugin.registerNode(kSwissArmyLocatorManipName,
kSwissArmyLocatorManipId,
locatorManipCreator,
locatorManipInit,
OpenMayaMPx.MPxNode.kManipContainer)
except:
print "Failed to register node: %s" % kSwissArmyLocatorManipName
raise
# uninitialize the script plug-in
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterNode(kSwissArmyLocatorId)
except:
print "Failed to deregister context command: %s" % kSwissArmyLocatorName
raise
try:
mplugin.deregisterNode(kSwissArmyLocatorManipId)
except:
print "Failed to deregister node: %s" % kSwissArmyLocatorManipName
raise
| 34.194226
| 187
| 0.691971
|
3e83a9952365e9e3d2e29c25c0ebf78b14e2c8f7
| 3,004
|
py
|
Python
|
examples/gym_test.py
|
tawnkramer/donkey_gym
|
4ea670491eaef66178a1ffe3d672c7d4344c51bf
|
[
"MIT"
] | 31
|
2018-10-20T22:00:53.000Z
|
2019-08-07T12:24:28.000Z
|
examples/gym_test.py
|
tawnkramer/donkey_gym
|
4ea670491eaef66178a1ffe3d672c7d4344c51bf
|
[
"MIT"
] | 4
|
2018-12-18T23:09:51.000Z
|
2019-08-08T20:40:05.000Z
|
examples/gym_test.py
|
tawnkramer/donkey_gym
|
4ea670491eaef66178a1ffe3d672c7d4344c51bf
|
[
"MIT"
] | 17
|
2018-11-04T20:36:02.000Z
|
2019-08-07T15:25:00.000Z
|
"""
file: gym_test.py
author: Tawn Kramer
date: 20 October 2018
notes: This will do a basic test of gym_donkeycar environment by
submitting random input for 3 episodes.
"""
import argparse
import gym
import gym_donkeycar # noqa: F401
NUM_EPISODES = 3
MAX_TIME_STEPS = 1000
def test_track(env_name, conf):
env = gym.make(env_name, conf=conf)
# make sure you have no track loaded
exit_scene(env)
simulate(env)
# exit the scene and close the env
exit_scene(env)
env.close()
def select_action(env):
return env.action_space.sample() # taking random action from the action_space
def simulate(env):
for _ in range(NUM_EPISODES):
# Reset the environment
obv = env.reset()
for _ in range(MAX_TIME_STEPS):
# Select an action
action = select_action(env)
# execute the action
obv, reward, done, info = env.step(action)
if done:
print("done w episode.", info)
break
def exit_scene(env):
env.viewer.exit_scene()
if __name__ == "__main__":
# Initialize the donkey environment
# where env_name one of:
env_list = [
"donkey-warehouse-v0",
"donkey-generated-roads-v0",
"donkey-avc-sparkfun-v0",
"donkey-generated-track-v0",
"donkey-roboracingleague-track-v0",
"donkey-waveshare-v0",
"donkey-minimonaco-track-v0",
"donkey-warren-track-v0",
"donkey-thunderhill-track-v0",
"donkey-circuit-launch-track-v0",
]
parser = argparse.ArgumentParser(description="gym_test")
parser.add_argument(
"--sim",
type=str,
default="sim_path",
help="path to unity simulator. maybe be left at default if you would like to start the sim on your own.",
)
parser.add_argument("--host", type=str, default="127.0.0.1", help="host to use for tcp")
parser.add_argument("--port", type=int, default=9091, help="port to use for tcp")
parser.add_argument(
"--env_name", type=str, default="all", help="name of donkey sim environment", choices=env_list + ["all"]
)
args = parser.parse_args()
conf = {
"exe_path": args.sim,
"host": args.host,
"port": args.port,
"body_style": "donkey",
"body_rgb": (128, 128, 128),
"car_name": "me",
"font_size": 100,
"start_delay": 1,
"max_cte": 5,
"lidar_config": {
"deg_per_sweep_inc": 2.0,
"deg_ang_down": 0.0,
"deg_ang_delta": -1.0,
"num_sweeps_levels": 1,
"max_range": 50.0,
"noise": 0.4,
"offset_x": 0.0,
"offset_y": 0.5,
"offset_z": 0.5,
"rot_x": 0.0,
},
}
if args.env_name == "all":
for env_name in env_list:
test_track(env_name, conf)
else:
test_track(args.env_name, conf)
print("test finished")
| 24.422764
| 113
| 0.579228
|
8fed8a29a6f33975bada7e22c9c246ed7976923f
| 4,292
|
py
|
Python
|
bquadform_utils.py
|
coinstudent2048/BQuadForm
|
d09b55b4f4d5004eaac41c5b19219c89f7ca441c
|
[
"MIT"
] | null | null | null |
bquadform_utils.py
|
coinstudent2048/BQuadForm
|
d09b55b4f4d5004eaac41c5b19219c89f7ca441c
|
[
"MIT"
] | null | null | null |
bquadform_utils.py
|
coinstudent2048/BQuadForm
|
d09b55b4f4d5004eaac41c5b19219c89f7ca441c
|
[
"MIT"
] | null | null | null |
# Miscellaneous utilities for bquadform.py
#
# Use this code only for prototyping
# integer square root (floor)
# source: https://stackoverflow.com/a/53983683
def isqrt(n):
if not isinstance(n, int):
raise TypeError("input is not integer")
if n > 0:
x = 1 << (n.bit_length() + 1 >> 1)
while True:
y = (x + n // x) >> 1
if y >= x:
return x
x = y
elif n == 0:
return 0
else:
raise ValueError("input is negative")
# # integer square root (ceiling)
# def isqrt_ceil(n):
# if n == 0:
# return 0
# else:
# return 1 + isqrt(n - 1)
# Euclidean division: always ensures that
# 0 <= r < |b| regardless of sign of divisor
def divmod_euclid(a, b):
q, r = divmod(a, b) # divmod uses floor division
if r < 0:
q += 1
r -= b
return (q, r)
# extended Euclidean algorithm (assumes a >= 0 & b >= 0)
# reference: Algorithm 1.3.6 (p.16) of Cohen -
# "A Course in Computational Algebraic Number theory" (GTM 138)
def ext_euclid(a, b, u = 1, v1 = 0):
# [Initialize]
d = a
# v to be computed in ext_euclid_front()
if b == 0:
return (u, d)
v3 = b
# [Finished?]
while v3 != 0:
# [Euclidean step]
q, t3 = divmod(d, v3)
t1 = u - q * v1
u = v1
d = v3
v1 = t1
v3 = t3
# [Finished?] cont. moved to ext_euclid_front()
return (u, d)
# extended partial Euclidean algorithm
# reference: Sub-algorithm PARTEUCL(a, b) (p. 248) of Cohen -
# "A Course in Computational Algebraic Number theory" (GTM 138)
def part_euclid(d, v3, v, v2, L):
# [Initialize]
z = 0
# [Finished?]
while abs(v3) > L:
# [Euclidean step]
q, t3 = divmod_euclid(d, v3)
t2 = v - q * v2
v = v2
d = v3
v2 = t2
v3 = t3
z += 1
# [Finished?] cont. moved to main functions
return (v, d, v2, v3, z)
# most significant digit of a, and the value of b in same place
# in base M (assumes a >= b, a >= 0, and b >= 0)
def same_msd(a, b, M):
while a >= M:
a //= M
b //= M
return a, b
# Lehmer extended (assumes a >= b, a >= 0, and b >= 0)
# reference: Algorithm 1.3.7 (p. 17) of Cohen -
# "A Course in Computational Algebraic Number theory" (GTM 138)
# my comment: for some reason, this is slower?!
def lehmer(a, b, M):
# [Initialize]
u = 1
v1 = 0
# [Finished?]
while abs(b) >= M:
a_hat, b_hat = same_msd(a, b, M)
A = 1
B = 0
C = 0
D = 1
# [Test quotient]
while not (b_hat + C == 0 or b_hat + D == 0):
q = (a_hat + A) // (b_hat + C)
if q != ((a_hat + B) // (b_hat + D)):
break
# [Euclidean step]
T = A - q * C
A = C
C = T
T = B - q * D
B = D
D = T
T = a_hat - q * b_hat
a_hat = b_hat
b_hat = T
# [Multi-precision step]
if B == 0:
q, t = divmod(a, b)
a = b
b = t
t = u - q * v1
u = v1
v1 = t
else:
t = A * a + B * b
r = C * a + D * b
a = t
b = r
t = A * u + B * v1
r = C * u + D * v1
u = t
v1 = r
return a, b, u, v1
# "frontend" for extended Euclidean algorithm
def ext_euclid_front(a, b, use_lehmer = True, M = 1 << 32):
# init: the algorithms assume that a >= 0 & b >= 0
orig_a = a
orig_b = b
if orig_a < 0:
a = -a
if orig_b < 0:
b = -b
# execute algorithms
if use_lehmer and a < b:
at = a
bt = b
b, a, u, v1 = lehmer(b, a, M)
u, d = ext_euclid(b, a, u, v1)
v = u
u = (d - bt * v) // at
elif use_lehmer:
at = a
bt = b
a, b, u, v1 = lehmer(a, b, M)
u, d = ext_euclid(a, b, u, v1)
v = (d - at * u) // bt
else:
u, d = ext_euclid(a, b)
v = (d - a * u) // b
# final: check sign of orig a & b
if orig_a < 0:
a = -a
u = -u
if b < 0:
b = -b
v = -v
return (u, v, d)
| 25.39645
| 63
| 0.45247
|
53f8d098ce583370597b4e7ca96c05dc04ffcf03
| 117
|
py
|
Python
|
tests/unit/core/test_urls.py
|
etienne86/oc_p13_team_spirit
|
fd3d45618d349ecd0a03e63c4a7e9c1044eeffaa
|
[
"MIT"
] | null | null | null |
tests/unit/core/test_urls.py
|
etienne86/oc_p13_team_spirit
|
fd3d45618d349ecd0a03e63c4a7e9c1044eeffaa
|
[
"MIT"
] | null | null | null |
tests/unit/core/test_urls.py
|
etienne86/oc_p13_team_spirit
|
fd3d45618d349ecd0a03e63c4a7e9c1044eeffaa
|
[
"MIT"
] | null | null | null |
"""
This module contains the unit tests related to
the urls in app ``core``.
"""
# from django.test import TestCase
| 16.714286
| 46
| 0.709402
|
ad14326992bd79ed996bbca8c3a7ca903cdaffc8
| 27,594
|
py
|
Python
|
databroker/headersource/core.py
|
danielballan/databroker
|
6eeafd63d1ecf691a06acd8d15a2ea27d381b8db
|
[
"BSD-3-Clause"
] | null | null | null |
databroker/headersource/core.py
|
danielballan/databroker
|
6eeafd63d1ecf691a06acd8d15a2ea27d381b8db
|
[
"BSD-3-Clause"
] | 8
|
2016-11-08T18:19:15.000Z
|
2017-04-06T13:13:55.000Z
|
databroker/headersource/core.py
|
stuartcampbell/databroker
|
b8cefd1a982a697bb679d8a2c1743751a42007d8
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
import six
import warnings
import logging
import numpy as np
from ..utils import (apply_to_dict_recursively, sanitize_np,
format_time as _format_time)
logger = logging.getLogger(__name__)
# singletons defined as they are defined in pymongo
ASCENDING = 1
DESCENDING = -1
def _format_regex(d):
for k, v in six.iteritems(d):
if k == '$regex':
# format regex for monoquery
d[k] = '/{0}/'.format(v)
else:
# recurse if v is a dict
if hasattr(v, 'items'):
_format_regex(v)
class NoRunStop(Exception):
pass
class NoRunStart(Exception):
pass
class NoEventDescriptors(Exception):
pass
def doc_or_uid_to_uid(doc_or_uid):
"""Given Document or uid return the uid
Parameters
----------
doc_or_uid : dict or str
If str, then assume uid and pass through, if not, return
the 'uid' field
Returns
-------
uid : str
A string version of the uid of the given document
"""
if not isinstance(doc_or_uid, six.string_types):
doc_or_uid = doc_or_uid['uid']
return doc_or_uid
def _cache_run_start(run_start, run_start_cache):
"""Cache a RunStart document
Parameters
----------
run_start : dict
raw pymongo dictionary. This is expected to have
an entry `_id` with the ObjectId used by mongo.
run_start_cache : dict
Dict[str, Document]
Returns
-------
run_start : dict
Document instance for this RunStart document.
The ObjectId has been stripped.
"""
run_start = dict(run_start)
# TODO actually do this de-reference for documents that have it
# There is no known actually usage of this document and it is not being
# created going forward
run_start.pop('beamline_config_id', None)
# get the mongo ObjectID
oid = run_start.pop('_id', None)
run_start_cache[run_start['uid']] = run_start
run_start_cache[oid] = run_start
return run_start
def _cache_run_stop(run_stop, run_stop_cache):
"""Cache a RunStop document
Parameters
----------
run_stop : dict
raw pymongo dictionary. This is expected to have
an entry `_id` with the ObjectId used by mongo.
run_stop_cache : dict
Dict[str, Document]
Returns
-------
run_stop : dict
Document instance for this RunStop document.
The ObjectId (if it exists) has been stripped.
"""
run_stop = dict(run_stop)
# pop off the ObjectId of this document
oid = run_stop.pop('_id', None)
try:
run_stop['run_start']
except KeyError:
run_stop['run_start'] = run_stop.pop('run_start_id')
run_stop_cache[run_stop['uid']] = run_stop
# this is
if oid is not None:
run_stop_cache[oid] = run_stop
return run_stop
def _cache_descriptor(descriptor, descritor_cache):
"""De-reference and cache a RunStop document
The de-referenced Document is cached against the
ObjectId and the uid -> ObjectID mapping is stored.
Parameters
----------
descriptor : dict
raw pymongo dictionary. This is expected to have
an entry `_id` with the ObjectId used by mongo.
Returns
-------
descriptor : dict
Document instance for this EventDescriptor document.
The ObjectId has been stripped.
"""
descriptor = dict(descriptor)
# pop the ObjectID
oid = descriptor.pop('_id', None)
try:
descriptor['run_start']
except KeyError:
descriptor['run_start'] = descriptor.pop('run_start_id')
descritor_cache[descriptor['uid']] = descriptor
if oid is not None:
descritor_cache[oid] = descriptor
return descriptor
def run_start_given_uid(uid, run_start_col, run_start_cache):
"""Given a uid, return the RunStart document
Parameters
----------
uid : str
The uid
run_start_col : pymongo.Collection
The collection to search for documents
run_start_cache : MutableMapping
Mutable mapping to serve as a local cache
Returns
-------
run_start : dict
The RunStart document.
"""
try:
return run_start_cache[uid]
except KeyError:
pass
run_start = run_start_col.find_one({'uid': uid})
if run_start is None:
raise NoRunStart("No runstart with uid {!r}".format(uid))
return _cache_run_start(run_start, run_start_cache)
def run_stop_given_uid(uid, run_stop_col, run_stop_cache):
"""Given a uid, return the RunStop document
Parameters
----------
uid : str
The uid
run_stop_col : pymongo.Collection
The collection to search for documents
run_stop_cache : MutableMapping
Mutable mapping to serve as a local cache
Returns
-------
run_stop : dict
The RunStop document fully de-referenced
"""
try:
return run_stop_cache[uid]
except KeyError:
pass
# get the raw run_stop
run_stop = run_stop_col.find_one({'uid': uid})
return _cache_run_stop(run_stop, run_stop_cache)
def descriptor_given_uid(uid, descriptor_col, descriptor_cache):
"""Given a uid, return the EventDescriptor document
Parameters
----------
uid : str
The uid
descriptor_col : pymongo.Collection
The collection to search for documents
descriptor_cache : MutableMapping
Mutable mapping to serve as a local cache
Returns
-------
descriptor : dict
The EventDescriptor document fully de-referenced
"""
try:
return descriptor_cache[uid]
except KeyError:
pass
descriptor = descriptor_col.find_one({'uid': uid})
return _cache_descriptor(descriptor, descriptor_cache)
def stop_by_start(run_start, run_stop_col, run_stop_cache):
"""Given a RunStart return it's RunStop
Raises if no RunStop exists.
Parameters
----------
run_start : dict or str
The RunStart to get the RunStop for. Can be either
a Document/dict with a 'uid' key or a uid string
Returns
-------
run_stop : dict
The RunStop document
Raises
------
NoRunStop
If no RunStop document exists for the given RunStart
"""
run_start_uid = doc_or_uid_to_uid(run_start)
run_stop = run_stop_col.find_one({'run_start': run_start_uid})
if run_stop is None:
raise NoRunStop("No run stop exists for {!r}".format(run_start))
return _cache_run_stop(run_stop, run_stop_cache)
def descriptors_by_start(run_start, descriptor_col, descriptor_cache):
"""Given a RunStart return a list of it's descriptors
Raises if no EventDescriptors exist.
Parameters
----------
run_start : dict or str
The RunStart to get the EventDescriptors for. Can be either
a Document/dict with a 'uid' key or a uid string
descriptor_col
A collection we can search against
descriptor_cache : dict
Dict[str, Document]
Returns
-------
event_descriptors : list
A list of EventDescriptor documents
Raises
------
NoEventDescriptors
If no EventDescriptor documents exist for the given RunStart
"""
# normalize the input and get the run_start oid
run_start_uid = doc_or_uid_to_uid(run_start)
# query the database for any event descriptors which
# refer to the given run_start
descriptors = descriptor_col.find({'run_start': run_start_uid})
# loop over the found documents, cache, and dereference
rets = [_cache_descriptor(descriptor, descriptor_cache)
for descriptor in descriptors]
# if nothing found, raise
if not rets:
raise NoEventDescriptors("No EventDescriptors exists "
"for {!r}".format(run_start))
# return the list of event descriptors
return rets
def get_events_generator(descriptor, event_col, descriptor_col,
descriptor_cache, run_start_col,
run_start_cache, convert_arrays=True):
"""A generator which yields all events from the event stream
Parameters
----------
descriptor : dict or str
The EventDescriptor to get the Events for. Can be either
a Document/dict with a 'uid' key or a uid string
event_col
Collection we can search for events given descriptor in.
descriptor_col
Collection we can search for descriptors given a uid
descriptor_cache : dict
Dict[str, Document]
convert_arrays: boolean, optional
convert 'array' type to numpy.ndarray; True by default
Yields
------
event : dict
All events for the given EventDescriptor from oldest to
newest
"""
descriptor_uid = doc_or_uid_to_uid(descriptor)
descriptor = descriptor_given_uid(descriptor_uid, descriptor_col,
descriptor_cache)
col = event_col
ev_cur = col.find({'descriptor': descriptor_uid},
sort=[('time', ASCENDING)])
data_keys = descriptor['data_keys']
external_keys = [k for k in data_keys if 'external' in data_keys[k]]
for ev in ev_cur:
# ditch the ObjectID
ev.pop('_id', None)
ev['descriptor'] = descriptor_uid
for k, v in ev['data'].items():
_dk = data_keys[k]
# convert any arrays stored directly in mds into ndarray
if convert_arrays:
if _dk['dtype'] == 'array' and not _dk.get('external', False):
ev['data'][k] = np.asarray(ev['data'][k])
# note which keys refer to dereferences (external) data
ev['filled'] = {k: False for k in external_keys}
yield ev
def _transpose(in_data, keys, field):
"""Turn a list of dicts into dict of lists
Parameters
----------
in_data : list
A list of dicts which contain at least one dict.
All of the inner dicts must have at least the keys
in `keys`
keys : list
The list of keys to extract
field : str
The field in the outer dict to use
Returns
-------
transpose : dict
The transpose of the data
"""
out = {k: [None] * len(in_data) for k in keys}
for j, ev in enumerate(in_data):
dd = ev[field]
for k in keys:
out[k][j] = dd[k]
return out
def get_events_table(descriptor, event_col, descriptor_col,
descriptor_cache, run_start_col, run_start_cache):
"""All event data as tables
Parameters
----------
descriptor : dict or str
The EventDescriptor to get the Events for. Can be either
a Document/dict with a 'uid' key or a uid string
event_col
Collection we can search for events given descriptor in.
descriptor_col
Collection we can search for descriptors given a uid
descriptor_cache : dict
Dict[str, Document]
convert_arrays: boolean, optional
convert 'array' type to numpy.ndarray; True by default
Returns
-------
descriptor : dict
EventDescriptor document
data_table : dict
dict of lists of the transposed data
seq_nums : list
The sequence number of each event.
times : list
The time of each event.
uids : list
The uid of each event.
timestamps_table : dict
The timestamps of each of the measurements as dict of lists. Same
keys as `data_table`.
"""
desc_uid = doc_or_uid_to_uid(descriptor)
descriptor = descriptor_given_uid(desc_uid, descriptor_col,
descriptor_cache)
# this will get more complicated once transpose caching layer is in place
all_events = list(get_events_generator(desc_uid, event_col,
descriptor_col,
descriptor_cache,
run_start_col,
run_start_cache))
# get event sequence numbers
seq_nums = [ev['seq_num'] for ev in all_events]
# get event times
times = [ev['time'] for ev in all_events]
# get uids
uids = [ev['uid'] for ev in all_events]
keys = list(descriptor['data_keys'])
# get data values
data_table = _transpose(all_events, keys, 'data')
# get timestamps
timestamps_table = _transpose(all_events, keys, 'timestamps')
# return the whole lot
return descriptor, data_table, seq_nums, times, uids, timestamps_table
# database INSERTION ###################################################
def insert_run_start(run_start_col, run_start_cache,
time, uid, **kwargs):
"""Insert a RunStart document into the database.
Parameters
----------
run_start_col
Collection to insert the start document into
run_start_cache : dict
Dict[str, Document]
time : float
The date/time as found at the client side when the run is started
uid : str
Globally unique id string provided to metadatastore
**kwargs
additional optional or custom fields
Returns
-------
run_start : str
uid of the inserted document. Use `run_start_given_uid` to get
the full document.
"""
if 'custom' in kwargs:
warnings.warn("custom kwarg is deprecated")
custom = kwargs.pop('custom')
if any(k in kwargs for k in custom):
raise TypeError("duplicate keys in kwargs and custom")
kwargs.update(custom)
col = run_start_col
run_start = dict(time=time, uid=uid, **copy.deepcopy(kwargs))
apply_to_dict_recursively(run_start, sanitize_np)
col.insert_one(run_start)
_cache_run_start(run_start, run_start_cache)
logger.debug('Inserted RunStart with uid %s', run_start['uid'])
return uid
def insert_run_stop(run_stop_col, run_stop_cache,
run_start, time, uid, exit_status, reason=None,
**kwargs):
"""Insert RunStop document into database
Parameters
----------
run_stop_col
Collection to insert the start document into
run_stop_cache : dict
Dict[str, Document]
run_start : dict or str
The RunStart to insert the RunStop for. Can be either
a Document/dict with a 'uid' key or a uid string
time : float
The date/time as found at the client side
uid : str
Globally unique id string provided to metadatastore
exit_status : {'success', 'abort', 'fail'}, optional
indicating reason run stopped, 'success' by default
reason : str, optional
more detailed exit status (stack trace, user remark, etc.)
Returns
-------
run_stop : str
uid of inserted Document
Raises
------
RuntimeError
Only one RunStop per RunStart, raises if you try to insert a second
"""
if 'custom' in kwargs:
warnings.warn("custom kwarg is deprecated")
custom = kwargs.pop('custom')
if any(k in kwargs for k in custom):
raise TypeError("duplicate keys in kwargs and custom")
kwargs.update(custom)
run_start_uid = doc_or_uid_to_uid(run_start)
try:
stop_by_start(run_start_uid,
run_stop_col, run_stop_cache)
except NoRunStop:
pass
else:
raise RuntimeError("Runstop already exits for {!r}".format(run_start))
col = run_stop_col
run_stop = dict(run_start=run_start_uid, time=time, uid=uid,
exit_status=exit_status, **copy.deepcopy(kwargs))
apply_to_dict_recursively(run_stop, sanitize_np)
if reason is not None and reason != '':
run_stop['reason'] = reason
col.insert_one(run_stop)
_cache_run_stop(run_stop, run_stop_cache)
logger.debug("Inserted RunStop with uid %s referencing RunStart "
" with uid %s", run_stop['uid'], run_start_uid)
return uid
def insert_descriptor(descriptor_col,
descriptor_cache, run_start, data_keys, time, uid,
**kwargs):
"""Insert an EventDescriptor document in to database.
Parameters
----------
descriptor_col
Collection to insert the start document into
descriptor_cache : dict
Dict[str, Document]
run_start : dict or str
The RunStart to insert a Descriptor for. Can be either
a Document/dict with a 'uid' key or a uid string
data_keys : dict
Provides information about keys of the data dictionary in
an event will contain. No key name may include '.'. See
`DataKey` odm template for schema.
time : float
The date/time as found at the client side when an event
descriptor is created.
uid : str
Globally unique id string provided to metadatastore
Returns
-------
descriptor : str
uid of inserted Document
"""
if 'custom' in kwargs:
warnings.warn("custom kwarg is deprecated")
custom = kwargs.pop('custom')
if any(k in kwargs for k in custom):
raise TypeError("duplicate keys in kwargs and custom")
kwargs.update(custom)
for k in data_keys:
if '.' in k:
raise ValueError("Key names can not contain '.' (period).")
data_keys = {k: dict(v) for k, v in data_keys.items()}
run_start_uid = doc_or_uid_to_uid(run_start)
col = descriptor_col
descriptor = dict(run_start=run_start_uid, data_keys=data_keys,
time=time, uid=uid, **copy.deepcopy(kwargs))
apply_to_dict_recursively(descriptor, sanitize_np)
# TODO validation
col.insert_one(descriptor)
descriptor = _cache_descriptor(descriptor, descriptor_cache)
logger.debug("Inserted EventDescriptor with uid %s referencing "
"RunStart with uid %s", descriptor['uid'], run_start_uid)
return uid
insert_event_descriptor = insert_descriptor
def insert_event(event_col, descriptor, time, seq_num, data, timestamps, uid,
validate, filled):
"""Create an event in metadatastore database backend
.. warning
This does not validate that the keys in `data` and `timestamps`
match the data keys in `descriptor`.
Parameters
----------
event_col
Collection to insert the Event into.
descriptor : dict or str
The Descriptor to insert event for. Can be either
a Document/dict with a 'uid' key or a uid string
time : float
The date/time as found at the client side when an event is
created.
seq_num : int
Unique sequence number for the event. Provides order of an event in
the group of events
data : dict
Dictionary of measured values (or external references)
timestamps : dict
Dictionary of measured timestamps for each values, having the
same keys as `data` above
uid : str
Globally unique id string provided to metadatastore
validate : boolean
Check that data and timestamps have the same keys.
filled : dict
Dictionary of `False` or datum_ids. Keys are a subset of the keys in
`data` and `timestamps` above.
"""
if validate:
raise NotImplementedError("insert event validation not written yet")
# convert data to storage format
# make sure we really have a uid
descriptor_uid = doc_or_uid_to_uid(descriptor)
col = event_col
data = dict(data)
apply_to_dict_recursively(data, sanitize_np)
timestamps = dict(timestamps)
apply_to_dict_recursively(timestamps, sanitize_np)
# Replace any filled data with the datum_id stashed in 'filled'.
for k, v in six.iteritems(filled):
if v:
data[k] = v
event = dict(descriptor=descriptor_uid, uid=uid,
data=data, timestamps=timestamps, time=time,
seq_num=int(seq_num))
col.insert_one(event)
logger.debug("Inserted Event with uid %s referencing "
"EventDescriptor with uid %s", event['uid'],
descriptor_uid)
return uid
BAD_KEYS_FMT = """Event documents are malformed, the keys on 'data' and
'timestamps do not match:\n data: {}\ntimestamps:{}"""
def bulk_insert_events(event_col, descriptor, events, validate):
"""Bulk insert many events
Parameters
----------
event_col
The collection to insert the Events into
descriptor : dict or str
The Descriptor to insert event for. Can be either
a Document/dict with a 'uid' key or a uid string
events : iterable
iterable of dicts matching the bs.Event schema
validate : bool
If it should be checked that each pair of data/timestamps
dicts has identical keys
Returns
-------
ret : dict
dictionary of details about the insertion
"""
descriptor_uid = str(doc_or_uid_to_uid(descriptor))
def event_factory():
for ev in events:
# check keys, this could be expensive
if validate:
if ev['data'].keys() != ev['timestamps'].keys():
raise ValueError(
BAD_KEYS_FMT.format(ev['data'].keys(),
ev['timestamps'].keys()))
data = dict(ev['data'])
# Replace any filled data with the datum_id stashed in 'filled'.
for k, v in six.iteritems(ev.get('filled', {})):
if v:
data[k] = v
apply_to_dict_recursively(data, sanitize_np)
ts = dict(ev['timestamps'])
apply_to_dict_recursively(ts, sanitize_np)
# Replace any filled data with the datum_id stashed in 'filled'.
for k, v in six.iteritems(ev.get('filled', {})):
if v:
data[k] = v
ev_out = dict(descriptor=descriptor_uid,
uid=str(ev['uid']),
data=data, timestamps=ts,
time=ev['time'],
seq_num=int(ev['seq_num']))
yield ev_out
return event_col.insert(event_factory())
# DATABASE RETRIEVAL ##########################################################
def find_run_starts(run_start_col, run_start_cache, tz, **kwargs):
"""Given search criteria, locate RunStart Documents.
Parameters
----------
start_time : time-like, optional
time-like representation of the earliest time that a RunStart
was created. Valid options are:
- timestamps --> time.time()
- '2015'
- '2015-01'
- '2015-01-30'
- '2015-03-30 03:00:00'
- datetime.datetime.now()
stop_time : time-like, optional
timestamp of the latest time that a RunStart was created. See
docs for `start_time` for examples.
beamline_id : str, optional
String identifier for a specific beamline
project : str, optional
Project name
owner : str, optional
The username of the logged-in user when the scan was performed
scan_id : int, optional
Integer scan identifier
Returns
-------
rs_objects : iterable of dicts
Examples
--------
>>> find_run_starts(scan_id=123)
>>> find_run_starts(owner='arkilic')
>>> find_run_starts(start_time=1421176750.514707, stop_time=time.time()})
>>> find_run_starts(start_time=1421176750.514707, stop_time=time.time())
>>> find_run_starts(owner='arkilic', start_time=1421176750.514707,
... stop_time=time.time())
"""
# now try rest of formatting
_format_time(kwargs, tz)
_format_regex(kwargs)
rs_objects = run_start_col.find(kwargs,
sort=[('time', DESCENDING)])
for rs in rs_objects:
yield _cache_run_start(rs, run_start_cache)
def find_run_stops(stop_col, stop_cache, tz,
run_start=None, **kwargs):
"""Given search criteria, locate RunStop Documents.
Parameters
----------
run_start : dict or str, optional
The RunStart document or uid to get the corresponding run end for
start_time : time-like, optional
time-like representation of the earliest time that a RunStop
was created. Valid options are:
- timestamps --> time.time()
- '2015'
- '2015-01'
- '2015-01-30'
- '2015-03-30 03:00:00'
- datetime.datetime.now()
stop_time : time-like, optional
timestamp of the latest time that a RunStop was created. See
docs for `start_time` for examples.
exit_status : {'success', 'fail', 'abort'}, optional
provides information regarding the run success.
reason : str, optional
Long-form description of why the run was terminated.
uid : str, optional
Globally unique id string provided to metadatastore
Yields
------
run_stop : dict
The requested RunStop documents
"""
# if trying to find by run_start, there can be only one
# normalize the input and get the run_start oid
if run_start:
run_start_uid = doc_or_uid_to_uid(run_start)
kwargs['run_start'] = run_start_uid
_format_time(kwargs, tz)
col = stop_col
run_stop = col.find(kwargs, sort=[('time', ASCENDING)])
for rs in run_stop:
yield _cache_run_stop(rs, stop_cache)
def find_descriptors(descriptor_col, descriptor_cache,
tz,
run_start=None, **kwargs):
"""Given search criteria, locate EventDescriptor Documents.
Parameters
----------
run_start : dict or str, optional
The RunStart document or uid to get the corresponding run end for
start_time : time-like, optional
time-like representation of the earliest time that an EventDescriptor
was created. Valid options are:
- timestamps --> time.time()
- '2015'
- '2015-01'
- '2015-01-30'
- '2015-03-30 03:00:00'
- datetime.datetime.now()
stop_time : time-like, optional
timestamp of the latest time that an EventDescriptor was created. See
docs for `start_time` for examples.
uid : str, optional
Globally unique id string provided to metadatastore
Yields
-------
descriptor : dict
The requested EventDescriptor
"""
if run_start:
run_start_uid = doc_or_uid_to_uid(run_start)
kwargs['run_start'] = run_start_uid
_format_time(kwargs, tz)
col = descriptor_col
event_descriptor_objects = col.find(kwargs,
sort=[('time', ASCENDING)])
for event_descriptor in event_descriptor_objects:
yield _cache_descriptor(event_descriptor, descriptor_cache)
def find_last(start_col, start_cache, num):
"""Locate the last `num` RunStart Documents
Parameters
----------
num : integer, optional
number of RunStart documents to return, default 1
Yields
------
run_start : dict
The requested RunStart documents
"""
col = start_col
gen = col.find({}, sort=[('time', DESCENDING)])
for _ in range(num):
yield _cache_run_start(next(gen), start_cache)
| 29.41791
| 79
| 0.623179
|
3690a4ffe0d0e575e1a53c2715a7137520191697
| 25,475
|
py
|
Python
|
venv/lib/python3.7/site-packages/cvxpy/tests/test_dgp2dcp.py
|
JWThacker/Airbnb_project
|
f804495512f0f924d3048f788ed33ab230b4e02a
|
[
"MIT"
] | 3,285
|
2015-01-03T04:02:29.000Z
|
2021-04-19T14:51:29.000Z
|
venv/lib/python3.7/site-packages/cvxpy/tests/test_dgp2dcp.py
|
JWThacker/Airbnb_project
|
f804495512f0f924d3048f788ed33ab230b4e02a
|
[
"MIT"
] | 1,138
|
2015-01-01T19:40:14.000Z
|
2021-04-18T23:37:31.000Z
|
cvxpy/tests/test_dgp2dcp.py
|
phschiele/cvxpy
|
a43aed7447b87f6d0fbc6f71ae5c7b84183f3369
|
[
"ECL-2.0",
"Apache-2.0"
] | 765
|
2015-01-02T19:29:39.000Z
|
2021-04-20T00:50:43.000Z
|
import numpy as np
import cvxpy
import cvxpy.error as error
import cvxpy.reductions.dgp2dcp.atom_canonicalizers as dgp_atom_canon
from cvxpy.atoms.affine.add_expr import AddExpression
from cvxpy.reductions import solution
from cvxpy.settings import SOLVER_ERROR
from cvxpy.tests.base_test import BaseTest
SOLVER = cvxpy.ECOS
class TestDgp2Dcp(BaseTest):
def test_unconstrained_monomial(self) -> None:
x = cvxpy.Variable(pos=True)
y = cvxpy.Variable(pos=True)
prod = x * y
dgp = cvxpy.Problem(cvxpy.Minimize(prod), [])
dgp2dcp = cvxpy.reductions.Dgp2Dcp(dgp)
dcp = dgp2dcp.reduce()
self.assertIsInstance(dcp.objective.expr, AddExpression)
self.assertEqual(len(dcp.objective.expr.args), 2)
self.assertIsInstance(dcp.objective.expr.args[0], cvxpy.Variable)
self.assertIsInstance(dcp.objective.expr.args[1], cvxpy.Variable)
opt = dcp.solve(SOLVER)
# dcp is solved in log-space, so it is unbounded below
# (since the OPT for dgp is 0 + epsilon).
self.assertEqual(opt, -float("inf"))
self.assertEqual(dcp.status, "unbounded")
dgp.unpack(dgp2dcp.retrieve(dcp.solution))
self.assertAlmostEqual(dgp.value, 0.0)
self.assertEqual(dgp.status, "unbounded")
dgp._clear_solution()
dgp.solve(SOLVER, gp=True)
self.assertAlmostEqual(dgp.value, 0.0)
self.assertEqual(dgp.status, "unbounded")
dgp = cvxpy.Problem(cvxpy.Maximize(prod), [])
dgp2dcp = cvxpy.reductions.Dgp2Dcp(dgp)
dcp = dgp2dcp.reduce()
self.assertEqual(dcp.solve(SOLVER), float("inf"))
self.assertEqual(dcp.status, "unbounded")
dgp.unpack(dgp2dcp.retrieve(dcp.solution))
self.assertEqual(dgp.value, float("inf"))
self.assertEqual(dgp.status, "unbounded")
dgp._clear_solution()
dgp.solve(SOLVER, gp=True)
self.assertAlmostEqual(dgp.value, float("inf"))
self.assertEqual(dgp.status, "unbounded")
def test_basic_equality_constraint(self) -> None:
x = cvxpy.Variable(pos=True)
dgp = cvxpy.Problem(cvxpy.Minimize(x), [x == 1.0])
dgp2dcp = cvxpy.reductions.Dgp2Dcp(dgp)
dcp = dgp2dcp.reduce()
self.assertIsInstance(dcp.objective.expr, cvxpy.Variable)
opt = dcp.solve(SOLVER)
self.assertAlmostEqual(opt, 0.0)
self.assertAlmostEqual(dcp.variables()[0].value, 0.0)
dgp.unpack(dgp2dcp.retrieve(dcp.solution))
self.assertAlmostEqual(dgp.value, 1.0)
self.assertAlmostEqual(x.value, 1.0)
dgp._clear_solution()
dgp.solve(SOLVER, gp=True)
self.assertAlmostEqual(dgp.value, 1.0)
self.assertAlmostEqual(x.value, 1.0)
def test_basic_gp(self) -> None:
x, y, z = cvxpy.Variable((3,), pos=True)
constraints = [2*x*y + 2*x*z + 2*y*z <= 1.0, x >= 2*y]
problem = cvxpy.Problem(cvxpy.Minimize(1/(x*y*z)), constraints)
problem.solve(SOLVER, gp=True)
self.assertAlmostEqual(15.59, problem.value, places=2)
def test_maximum(self) -> None:
x = cvxpy.Variable(pos=True)
y = cvxpy.Variable(pos=True)
prod1 = x * y**0.5
prod2 = 3.0 * x * y**0.5
obj = cvxpy.Minimize(cvxpy.maximum(prod1, prod2))
constr = [x == 1.0, y == 4.0]
dgp = cvxpy.Problem(obj, constr)
dgp2dcp = cvxpy.reductions.Dgp2Dcp(dgp)
dcp = dgp2dcp.reduce()
dcp.solve(SOLVER)
dgp.unpack(dgp2dcp.retrieve(dcp.solution))
self.assertAlmostEqual(dgp.value, 6.0)
self.assertAlmostEqual(x.value, 1.0)
self.assertAlmostEqual(y.value, 4.0)
dgp._clear_solution()
dgp.solve(SOLVER, gp=True)
self.assertAlmostEqual(dgp.value, 6.0, places=4)
self.assertAlmostEqual(x.value, 1.0)
def test_prod(self) -> None:
X = np.arange(12).reshape((4, 3))
np.testing.assert_almost_equal(np.prod(X), cvxpy.prod(X).value)
np.testing.assert_almost_equal(
np.prod(X, axis=0), cvxpy.prod(X, axis=0).value)
np.testing.assert_almost_equal(
np.prod(X, axis=1), cvxpy.prod(X, axis=1).value)
np.testing.assert_almost_equal(
np.prod(X, axis=0, keepdims=True),
cvxpy.prod(X, axis=0, keepdims=True).value)
np.testing.assert_almost_equal(
np.prod(X, axis=1, keepdims=True),
cvxpy.prod(X, axis=1, keepdims=True).value)
prod = cvxpy.prod(X)
X_canon, _ = dgp_atom_canon.prod_canon(prod, prod.args)
np.testing.assert_almost_equal(np.sum(X), X_canon.value)
prod = cvxpy.prod(X, axis=0)
X_canon, _ = dgp_atom_canon.prod_canon(prod, prod.args)
np.testing.assert_almost_equal(np.sum(X, axis=0), X_canon.value)
prod = cvxpy.prod(X, axis=1)
X_canon, _ = dgp_atom_canon.prod_canon(prod, prod.args)
np.testing.assert_almost_equal(np.sum(X, axis=1), X_canon.value)
prod = cvxpy.prod(X, axis=0, keepdims=True)
X_canon, _ = dgp_atom_canon.prod_canon(prod, prod.args)
np.testing.assert_almost_equal(
np.sum(X, axis=0, keepdims=True), X_canon.value)
prod = cvxpy.prod(X, axis=1, keepdims=True)
X_canon, _ = dgp_atom_canon.prod_canon(prod, prod.args)
np.testing.assert_almost_equal(
np.sum(X, axis=1, keepdims=True), X_canon.value)
X = np.arange(12)
np.testing.assert_almost_equal(np.prod(X), cvxpy.prod(X).value)
np.testing.assert_almost_equal(np.prod(X, keepdims=True),
cvxpy.prod(X, keepdims=True).value)
prod = cvxpy.prod(X)
X_canon, _ = dgp_atom_canon.prod_canon(prod, prod.args)
np.testing.assert_almost_equal(np.sum(X), X_canon.value)
x = cvxpy.Variable(pos=True)
y = cvxpy.Variable(pos=True)
posy1 = x * y**0.5 + 3.0 * x * y**0.5
posy2 = x * y**0.5 + 3.0 * x ** 2 * y**0.5
self.assertTrue(cvxpy.prod([posy1, posy2]).is_log_log_convex())
self.assertFalse(cvxpy.prod([posy1, posy2]).is_log_log_concave())
self.assertFalse(cvxpy.prod([posy1, 1/posy1]).is_dgp())
m = x * y**0.5
self.assertTrue(cvxpy.prod([m, m]).is_log_log_affine())
self.assertTrue(cvxpy.prod([m, 1/posy1]).is_log_log_concave())
self.assertFalse(cvxpy.prod([m, 1/posy1]).is_log_log_convex())
def test_max(self) -> None:
x = cvxpy.Variable(pos=True)
y = cvxpy.Variable(pos=True)
prod1 = x * y**0.5
prod2 = 3.0 * x * y**0.5
obj = cvxpy.Minimize(cvxpy.max(cvxpy.hstack([prod1, prod2])))
constr = [x == 1.0, y == 4.0]
dgp = cvxpy.Problem(obj, constr)
dgp2dcp = cvxpy.reductions.Dgp2Dcp(dgp)
dcp = dgp2dcp.reduce()
dcp.solve(SOLVER)
dgp.unpack(dgp2dcp.retrieve(dcp.solution))
self.assertAlmostEqual(dgp.value, 6.0)
self.assertAlmostEqual(x.value, 1.0)
self.assertAlmostEqual(y.value, 4.0)
dgp._clear_solution()
dgp.solve(SOLVER, gp=True)
self.assertAlmostEqual(dgp.value, 6.0, places=4)
self.assertAlmostEqual(x.value, 1.0)
def test_minimum(self) -> None:
x = cvxpy.Variable(pos=True)
y = cvxpy.Variable(pos=True)
prod1 = x * y**0.5
prod2 = 3.0 * x * y**0.5
posy = prod1 + prod2
obj = cvxpy.Maximize(cvxpy.minimum(prod1, prod2, 1/posy))
constr = [x == 1.0, y == 4.0]
dgp = cvxpy.Problem(obj, constr)
dgp.solve(SOLVER, gp=True)
self.assertAlmostEqual(dgp.value, 1.0 / (2.0 + 6.0))
self.assertAlmostEqual(x.value, 1.0)
self.assertAlmostEqual(y.value, 4.0)
def test_min(self) -> None:
x = cvxpy.Variable(pos=True)
y = cvxpy.Variable(pos=True)
prod1 = x * y**0.5
prod2 = 3.0 * x * y**0.5
posy = prod1 + prod2
obj = cvxpy.Maximize(cvxpy.min(cvxpy.hstack([prod1, prod2, 1/posy])))
constr = [x == 1.0, y == 4.0]
dgp = cvxpy.Problem(obj, constr)
dgp.solve(SOLVER, gp=True)
self.assertAlmostEqual(dgp.value, 1.0 / (2.0 + 6.0), places=4)
self.assertAlmostEqual(x.value, 1.0)
self.assertAlmostEqual(y.value, 4.0)
def test_sum_largest(self) -> None:
self.skipTest("Enable test once sum_largest is implemented.")
x = cvxpy.Variable((4,), pos=True)
obj = cvxpy.Minimize(cvxpy.sum_largest(x, 3))
constr = [x[0] * x[1] * x[2] * x[3] >= 16]
dgp = cvxpy.Problem(obj, constr)
dgp2dcp = cvxpy.reductions.Dgp2Dcp(dgp)
dcp = dgp2dcp.reduce()
dcp.solve(SOLVER)
dgp.unpack(dgp2dcp.retrieve(dcp.solution))
opt = 6.0
self.assertAlmostEqual(dgp.value, opt)
self.assertAlmostEqual((x[0] * x[1] * x[2] * x[3]).value, 16,
places=2)
dgp._clear_solution()
dgp.solve(SOLVER, gp=True)
self.assertAlmostEqual(dgp.value, opt)
self.assertAlmostEqual((x[0] * x[1] * x[2] * x[3]).value, 16,
places=2)
# An unbounded problem.
x = cvxpy.Variable((4,), pos=True)
y = cvxpy.Variable(pos=True)
obj = cvxpy.Minimize(cvxpy.sum_largest(x, 3) * y)
constr = [x[0] * x[1] * x[2] * x[3] >= 16]
dgp = cvxpy.Problem(obj, constr)
dgp2dcp = cvxpy.reductions.Dgp2Dcp(dgp)
dcp = dgp2dcp.reduce()
opt = dcp.solve(SOLVER)
self.assertEqual(dcp.value, -float("inf"))
dgp.unpack(dgp2dcp.retrieve(dcp.solution))
self.assertAlmostEqual(dgp.value, 0.0)
self.assertAlmostEqual(dgp.status, "unbounded")
dgp._clear_solution()
dgp.solve(SOLVER, gp=True)
self.assertAlmostEqual(dgp.value, 0.0)
self.assertAlmostEqual(dgp.status, "unbounded")
# Another unbounded problem.
x = cvxpy.Variable(2, pos=True)
obj = cvxpy.Minimize(cvxpy.sum_largest(x, 1))
dgp = cvxpy.Problem(obj, [])
dgp2dcp = cvxpy.reductions.Dgp2Dcp(dgp)
dcp = dgp2dcp.reduce()
opt = dcp.solve(SOLVER)
self.assertEqual(dcp.value, -float("inf"))
dgp.unpack(dgp2dcp.retrieve(dcp.solution))
self.assertAlmostEqual(dgp.value, 0.0)
self.assertAlmostEqual(dgp.status, "unbounded")
dgp._clear_solution()
dgp.solve(SOLVER, gp=True)
self.assertAlmostEqual(dgp.value, 0.0)
self.assertAlmostEqual(dgp.status, "unbounded")
# Composition with posynomials.
x = cvxpy.Variable((4,), pos=True)
obj = cvxpy.Minimize(cvxpy.sum_largest(
cvxpy.hstack([3 * x[0]**0.5 * x[1]**0.5,
x[0] * x[1] + 0.5 * x[1] * x[3]**3, x[2]]), 2))
constr = [x[0] * x[1] >= 16]
dgp = cvxpy.Problem(obj, constr)
dgp2dcp = cvxpy.reductions.Dgp2Dcp(dgp)
dcp = dgp2dcp.reduce()
dcp.solve(SOLVER)
dgp.unpack(dgp2dcp.retrieve(dcp.solution))
# opt = 3 * sqrt(4) * sqrt(4) + (4 * 4 + 0.5 * 4 * epsilon) = 28
opt = 28.0
self.assertAlmostEqual(dgp.value, opt, places=2)
self.assertAlmostEqual((x[0] * x[1]).value, 16.0, places=2)
self.assertAlmostEqual(x[3].value, 0.0, places=2)
dgp._clear_solution()
dgp.solve(SOLVER, gp=True)
self.assertAlmostEqual(dgp.value, opt, places=2)
self.assertAlmostEqual((x[0] * x[1]).value, 16.0, places=2)
self.assertAlmostEqual(x[3].value, 0.0, places=2)
def test_div(self) -> None:
x = cvxpy.Variable(pos=True)
y = cvxpy.Variable(pos=True)
p = cvxpy.Problem(cvxpy.Minimize(x * y),
[y/3 <= x, y >= 1])
self.assertAlmostEqual(p.solve(SOLVER, gp=True), 1.0 / 3.0)
self.assertAlmostEqual(y.value, 1.0)
self.assertAlmostEqual(x.value, 1.0 / 3.0)
def test_geo_mean(self) -> None:
x = cvxpy.Variable(3, pos=True)
p = [1, 2, 0.5]
geo_mean = cvxpy.geo_mean(x, p)
dgp = cvxpy.Problem(cvxpy.Minimize(geo_mean), [])
dgp2dcp = cvxpy.reductions.Dgp2Dcp(dgp)
dcp = dgp2dcp.reduce()
dcp.solve(SOLVER)
self.assertEqual(dcp.value, -float("inf"))
dgp.unpack(dgp2dcp.retrieve(dcp.solution))
self.assertEqual(dgp.value, 0.0)
self.assertEqual(dgp.status, "unbounded")
dgp._clear_solution()
dgp.solve(SOLVER, gp=True)
self.assertEqual(dgp.value, 0.0)
self.assertEqual(dgp.status, "unbounded")
def test_solving_non_dgp_problem_raises_error(self) -> None:
problem = cvxpy.Problem(cvxpy.Minimize(-1.0 * cvxpy.Variable()), [])
with self.assertRaisesRegex(error.DGPError,
r"Problem does not follow DGP "
"rules(?s)*.*However, the problem does follow DCP rules.*"):
problem.solve(SOLVER, gp=True)
problem.solve(SOLVER)
self.assertEqual(problem.status, "unbounded")
self.assertEqual(problem.value, -float("inf"))
def test_solving_non_dcp_problem_raises_error(self) -> None:
problem = cvxpy.Problem(
cvxpy.Minimize(cvxpy.Variable(pos=True) * cvxpy.Variable(pos=True)),
)
with self.assertRaisesRegex(error.DCPError,
r"Problem does not follow DCP "
"rules(?s)*.*However, the problem does follow DGP rules.*"):
problem.solve(SOLVER)
problem.solve(SOLVER, gp=True)
self.assertEqual(problem.status, "unbounded")
self.assertAlmostEqual(problem.value, 0.0)
def test_solving_non_dcp_problems_raises_detailed_error(self) -> None:
x = cvxpy.Variable(3)
problem = cvxpy.Problem(cvxpy.Minimize(cvxpy.sum(x) - cvxpy.sum_squares(x)))
with self.assertRaisesRegex(error.DCPError, r"The objective is not DCP"):
problem.solve(SOLVER)
x = cvxpy.Variable(name='x')
problem = cvxpy.Problem(cvxpy.Minimize(x), [x * x <= 5])
with self.assertRaisesRegex(error.DCPError, r"The following constraints are not DCP"):
problem.solve(SOLVER)
def test_add_canon(self) -> None:
X = cvxpy.Constant(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]))
Y = cvxpy.Constant(np.array([[2.0, 3.0, 4.0], [5.0, 6.0, 7.0]]))
Z = X + Y
canon_matrix, constraints = dgp_atom_canon.add_canon(Z, Z.args)
self.assertEqual(len(constraints), 0)
self.assertEqual(canon_matrix.shape, Z.shape)
expected = np.log(np.exp(X.value) + np.exp(Y.value))
np.testing.assert_almost_equal(expected, canon_matrix.value)
# Test promotion
X = cvxpy.Constant(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]))
y = cvxpy.Constant(2.0)
Z = X + y
canon_matrix, constraints = dgp_atom_canon.add_canon(Z, Z.args)
self.assertEqual(len(constraints), 0)
self.assertEqual(canon_matrix.shape, Z.shape)
expected = np.log(np.exp(X.value) + np.exp(y.value))
np.testing.assert_almost_equal(expected, canon_matrix.value)
def test_matmul_canon(self) -> None:
X = cvxpy.Constant(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]))
Y = cvxpy.Constant(np.array([[1.0], [2.0], [3.0]]))
Z = cvxpy.matmul(X, Y)
canon_matrix, constraints = dgp_atom_canon.mulexpression_canon(
Z, Z.args)
self.assertEqual(len(constraints), 0)
self.assertEqual(canon_matrix.shape, (2, 1))
first_entry = np.log(np.exp(2.0) + np.exp(4.0) + np.exp(6.0))
second_entry = np.log(np.exp(5.0) + np.exp(7.0) + np.exp(9.0))
self.assertAlmostEqual(first_entry, canon_matrix[0, 0].value)
self.assertAlmostEqual(second_entry, canon_matrix[1, 0].value)
def test_trace_canon(self) -> None:
X = cvxpy.Constant(np.array([[1.0, 5.0], [9.0, 14.0]]))
Y = cvxpy.trace(X)
canon, constraints = dgp_atom_canon.trace_canon(Y, Y.args)
self.assertEqual(len(constraints), 0)
self.assertTrue(canon.is_scalar())
expected = np.log(np.exp(1.0) + np.exp(14.0))
self.assertAlmostEqual(expected, canon.value)
def test_one_minus_pos(self) -> None:
x = cvxpy.Variable(pos=True)
obj = cvxpy.Maximize(x)
constr = [cvxpy.one_minus_pos(x) >= 0.4]
problem = cvxpy.Problem(obj, constr)
problem.solve(SOLVER, gp=True)
self.assertAlmostEqual(problem.value, 0.6)
self.assertAlmostEqual(x.value, 0.6)
def test_qp_solver_not_allowed(self) -> None:
x = cvxpy.Variable(pos=True)
problem = cvxpy.Problem(cvxpy.Minimize(x))
error_msg = ("When `gp=True`, `solver` must be a conic solver "
"(received 'OSQP'); try calling `solve()` with "
"`solver=cvxpy.ECOS`.")
with self.assertRaises(error.SolverError) as err:
problem.solve(solver="OSQP", gp=True)
self.assertEqual(error_msg, str(err))
def test_paper_example_sum_largest(self) -> None:
self.skipTest("Enable test once sum_largest is implemented.")
x = cvxpy.Variable((4,), pos=True)
x0, x1, x2, x3 = (x[0], x[1], x[2], x[3])
obj = cvxpy.Minimize(cvxpy.sum_largest(
cvxpy.hstack([
3 * x0**0.5 * x1**0.5,
x0 * x1 + 0.5 * x1 * x3**3,
x2]), 2))
constr = [x0 * x1 * x2 >= 16]
p = cvxpy.Problem(obj, constr)
# smoke test.
p.solve(SOLVER, gp=True)
def test_paper_example_one_minus_pos(self) -> None:
x = cvxpy.Variable(pos=True)
y = cvxpy.Variable(pos=True)
obj = cvxpy.Minimize(x * y)
constr = [(y * cvxpy.one_minus_pos(x / y)) ** 2 >= 1, x >= y/3]
problem = cvxpy.Problem(obj, constr)
# smoke test.
problem.solve(SOLVER, gp=True)
def test_paper_example_eye_minus_inv(self) -> None:
X = cvxpy.Variable((2, 2), pos=True)
obj = cvxpy.Minimize(cvxpy.trace(cvxpy.eye_minus_inv(X)))
constr = [cvxpy.geo_mean(cvxpy.diag(X)) == 0.1,
cvxpy.geo_mean(cvxpy.hstack([X[0, 1], X[1, 0]])) == 0.1]
problem = cvxpy.Problem(obj, constr)
problem.solve(gp=True, solver="ECOS")
np.testing.assert_almost_equal(X.value, 0.1*np.ones((2, 2)), decimal=3)
self.assertAlmostEqual(problem.value, 2.25)
def test_simpler_eye_minus_inv(self) -> None:
X = cvxpy.Variable((2, 2), pos=True)
obj = cvxpy.Minimize(cvxpy.trace(cvxpy.eye_minus_inv(X)))
constr = [cvxpy.diag(X) == 0.1,
cvxpy.hstack([X[0, 1], X[1, 0]]) == 0.1]
problem = cvxpy.Problem(obj, constr)
problem.solve(gp=True, solver="ECOS")
np.testing.assert_almost_equal(X.value, 0.1*np.ones((2, 2)), decimal=3)
self.assertAlmostEqual(problem.value, 2.25)
def test_paper_example_exp_log(self) -> None:
x = cvxpy.Variable(pos=True)
y = cvxpy.Variable(pos=True)
obj = cvxpy.Minimize(x * y)
constr = [cvxpy.exp(y/x) <= cvxpy.log(y)]
problem = cvxpy.Problem(obj, constr)
# smoke test.
problem.solve(SOLVER, gp=True)
def test_pf_matrix_completion(self) -> None:
X = cvxpy.Variable((3, 3), pos=True)
obj = cvxpy.Minimize(cvxpy.pf_eigenvalue(X))
known_indices = tuple(zip(*[[0, 0], [0, 2], [1, 1], [2, 0], [2, 1]]))
constr = [
X[known_indices] == [1.0, 1.9, 0.8, 3.2, 5.9],
X[0, 1] * X[1, 0] * X[1, 2] * X[2, 2] == 1.0,
]
problem = cvxpy.Problem(obj, constr)
# smoke test.
problem.solve(SOLVER, gp=True)
def test_rank_one_nmf(self) -> None:
X = cvxpy.Variable((3, 3), pos=True)
x = cvxpy.Variable((3,), pos=True)
y = cvxpy.Variable((3,), pos=True)
xy = cvxpy.vstack([x[0] * y, x[1] * y, x[2] * y])
R = cvxpy.maximum(
cvxpy.multiply(X, (xy) ** (-1.0)),
cvxpy.multiply(X ** (-1.0), xy))
objective = cvxpy.sum(R)
constraints = [
X[0, 0] == 1.0,
X[0, 2] == 1.9,
X[1, 1] == 0.8,
X[2, 0] == 3.2,
X[2, 1] == 5.9,
x[0] * x[1] * x[2] == 1.0,
]
# smoke test.
prob = cvxpy.Problem(cvxpy.Minimize(objective), constraints)
prob.solve(SOLVER, gp=True)
def test_documentation_prob(self) -> None:
x = cvxpy.Variable(pos=True)
y = cvxpy.Variable(pos=True)
z = cvxpy.Variable(pos=True)
objective_fn = x * y * z
constraints = [
4 * x * y * z + 2 * x * z <= 10, x <= 2*y, y <= 2*x, z >= 1]
problem = cvxpy.Problem(cvxpy.Maximize(objective_fn), constraints)
# Smoke test.
problem.solve(SOLVER, gp=True)
def test_solver_error(self) -> None:
x = cvxpy.Variable(pos=True)
y = cvxpy.Variable(pos=True)
prod = x * y
dgp = cvxpy.Problem(cvxpy.Minimize(prod), [])
dgp2dcp = cvxpy.reductions.Dgp2Dcp()
_, inverse_data = dgp2dcp.apply(dgp)
soln = solution.Solution(SOLVER_ERROR, None, {}, {}, {})
dgp_soln = dgp2dcp.invert(soln, inverse_data)
self.assertEqual(dgp_soln.status, SOLVER_ERROR)
def test_sum_scalar(self) -> None:
w = cvxpy.Variable(pos=True)
h = cvxpy.Variable(pos=True)
problem = cvxpy.Problem(cvxpy.Minimize(h),
[w*h >= 10, cvxpy.sum(w) <= 5])
problem.solve(SOLVER, gp=True)
np.testing.assert_almost_equal(problem.value, 2)
np.testing.assert_almost_equal(h.value, 2)
np.testing.assert_almost_equal(w.value, 5)
def test_sum_vector(self) -> None:
w = cvxpy.Variable(2, pos=True)
h = cvxpy.Variable(2, pos=True)
problem = cvxpy.Problem(cvxpy.Minimize(cvxpy.sum(h)),
[cvxpy.multiply(w, h) >= 10,
cvxpy.sum(w) <= 10])
problem.solve(SOLVER, gp=True)
np.testing.assert_almost_equal(problem.value, 4)
np.testing.assert_almost_equal(h.value, np.array([2, 2]))
np.testing.assert_almost_equal(w.value, np.array([5, 5]))
def test_sum_squares_vector(self) -> None:
w = cvxpy.Variable(2, pos=True)
h = cvxpy.Variable(2, pos=True)
problem = cvxpy.Problem(cvxpy.Minimize(cvxpy.sum_squares(h)),
[cvxpy.multiply(w, h) >= 10,
cvxpy.sum(w) <= 10])
problem.solve(SOLVER, gp=True)
np.testing.assert_almost_equal(problem.value, 8)
np.testing.assert_almost_equal(h.value, np.array([2, 2]))
np.testing.assert_almost_equal(w.value, np.array([5, 5]))
def test_sum_matrix(self) -> None:
w = cvxpy.Variable((2, 2), pos=True)
h = cvxpy.Variable((2, 2), pos=True)
problem = cvxpy.Problem(cvxpy.Minimize(cvxpy.sum(h)),
[cvxpy.multiply(w, h) >= 10,
cvxpy.sum(w) <= 20])
problem.solve(SOLVER, gp=True)
np.testing.assert_almost_equal(problem.value, 8)
np.testing.assert_almost_equal(h.value, np.array([[2, 2], [2, 2]]))
np.testing.assert_almost_equal(w.value, np.array([[5, 5], [5, 5]]))
def test_trace(self) -> None:
w = cvxpy.Variable((1, 1), pos=True)
h = cvxpy.Variable(pos=True)
problem = cvxpy.Problem(cvxpy.Minimize(h),
[w*h >= 10, cvxpy.trace(w) <= 5])
problem.solve(SOLVER, gp=True)
np.testing.assert_almost_equal(problem.value, 2)
np.testing.assert_almost_equal(h.value, 2)
np.testing.assert_almost_equal(w.value, np.array([[5]]))
def test_parameter(self) -> None:
param = cvxpy.Parameter(pos=True)
param.value = 1.0
dgp = cvxpy.Problem(cvxpy.Minimize(param), [])
dgp2dcp = cvxpy.reductions.Dgp2Dcp(dgp)
dcp = dgp2dcp.reduce()
self.assertAlmostEqual(dcp.parameters()[0].value, np.log(param.value))
x = cvxpy.Variable(pos=True)
problem = cvxpy.Problem(cvxpy.Minimize(x), [x == param])
problem.solve(SOLVER, gp=True)
self.assertAlmostEqual(problem.value, 1.0)
param.value = 2.0
problem.solve(SOLVER, gp=True)
self.assertAlmostEqual(problem.value, 2.0)
def test_parameter_name(self) -> None:
param = cvxpy.Parameter(pos=True, name='alpha')
param.value = 1.0
dgp = cvxpy.Problem(cvxpy.Minimize(param), [])
dgp2dcp = cvxpy.reductions.Dgp2Dcp(dgp)
dcp = dgp2dcp.reduce()
self.assertAlmostEqual(dcp.parameters()[0].name(), 'alpha')
def test_gmatmul(self) -> None:
x = cvxpy.Variable(2, pos=True)
A = np.array([[-5., 2.], [1., -3.]])
b = np.array([3, 2])
expr = cvxpy.gmatmul(A, x)
x.value = b
self.assertItemsAlmostEqual(expr.value, [3**-5*2**2, 3./8])
A_par = cvxpy.Parameter((2, 2), value=A)
self.assertItemsAlmostEqual(cvxpy.gmatmul(A_par, x).value,
[3**-5*2**2, 3./8])
x.value = None
prob = cvxpy.Problem(cvxpy.Minimize(1.0), [expr == b])
prob.solve(solver=SOLVER, gp=True)
sltn = np.exp(np.linalg.solve(A, np.log(b)))
self.assertItemsAlmostEqual(x.value, sltn)
| 41.557912
| 96
| 0.585162
|
8e8ee4d7cfe615c10af3b26de2069b96a6dd30a9
| 255
|
py
|
Python
|
2_intermediate/chapter11/practice/product.py
|
code4tomorrow/Python
|
035b6f5d8fd635a16caaff78bcd3f582663dadc3
|
[
"MIT"
] | 4
|
2021-03-01T00:32:45.000Z
|
2021-05-21T22:01:52.000Z
|
2_intermediate/chapter11/practice/product.py
|
code4tomorrow/Python
|
035b6f5d8fd635a16caaff78bcd3f582663dadc3
|
[
"MIT"
] | 29
|
2020-09-12T22:56:04.000Z
|
2021-09-25T17:08:42.000Z
|
2_intermediate/chapter11/practice/product.py
|
code4tomorrow/Python
|
035b6f5d8fd635a16caaff78bcd3f582663dadc3
|
[
"MIT"
] | 7
|
2021-02-25T01:50:55.000Z
|
2022-02-28T00:00:42.000Z
|
"""
Product
Write a function that takes a list
of numbers as input and returns
the product of all the numbers in
the list.
Use it to print the products of the
following sets of numbers:
-1, 5, 3, 2, 8
2.5, 3, 0
4, 3, 7, 10
"""
# Insert your code here.
| 15
| 35
| 0.694118
|
6ee0540c30a8ab28e31bdbd93a4001cd443d508e
| 4,084
|
py
|
Python
|
Experiments/Tensorflow/Math/linear_algebra.py
|
merang/Deep-Learning-Experiments
|
c53b7ded52631996e560b33cdf30ce915b18d079
|
[
"MIT"
] | 994
|
2017-01-17T11:56:51.000Z
|
2022-03-22T11:51:40.000Z
|
Experiments/Tensorflow/Math/linear_algebra.py
|
akiljames83/Deep-Learning-Experiments
|
8048b91f382667e9b43078460fb792b369f8af49
|
[
"MIT"
] | 20
|
2017-06-01T01:30:16.000Z
|
2021-06-11T17:27:51.000Z
|
Experiments/Tensorflow/Math/linear_algebra.py
|
akiljames83/Deep-Learning-Experiments
|
8048b91f382667e9b43078460fb792b369f8af49
|
[
"MIT"
] | 789
|
2017-02-16T08:53:14.000Z
|
2022-03-27T14:33:39.000Z
|
'''
Linear Algebra on TensorFlow
Author: Rowel Atienza
Project: https://github.com/roatienza/Deep-Learning-Experiments
'''
# On command line: python linear_algebra.py
# Prerequisite: tensorflow (see tensorflow.org)
from __future__ import print_function
import tensorflow as tf
import numpy as np
# Square matrix A of rank 2
A = tf.constant([ [1.,2.], [3.,4.] ])
# 2x2 Square, Diagonal, Symmetric matrix B
B = tf.diag([5.,6.])
# 2x2 Square matrix
C = tf.constant([ [1.,2.], [2.,4.] ])
# 2x1 vector will all elements equal to 1
x = tf.ones([2,1])
# 2x1 vector will all elements equal to 2.0
b = tf.fill([2,1], 2.)
# 2x1 vector
y = tf.constant([ [-1.], [1.] ])
# run within a session and print
with tf.Session() as session:
print("Tensorflow version: " + tf.__version__)
tf.global_variables_initializer().run()
print("A = ")
print(A.eval())
print("B = ")
print(B.eval())
print("C = ")
print(C.eval())
print("x = ")
print(x.eval())
print("b = ")
print(b.eval())
print("y = ")
print(y.eval())
# Tensor multiplication
print("Ax = ")
print(tf.matmul(A, x).eval())
# Tensor addition
print("A + B =")
print(tf.add(A, B).eval())
print("A + b =")
print(tf.add(A, b).eval())
# Rank of A and B; Number of indices to identify each element
print("tensorRank(A) = ")
print(tf.rank(A).eval())
print("tensorRank(C) = ")
print(tf.rank(C).eval())
# Matrix rank
print("rank(A) = ")
print(np.linalg.matrix_rank(A.eval()))
print("rank(C) = ")
print(np.linalg.matrix_rank(C.eval()))
# Transpose
print("tran(A) = ")
print(tf.matrix_transpose(A).eval())
print("tran(B) = ")
print(tf.matrix_transpose(B).eval())
# Inverse
print("inv(A) = ")
print(tf.matrix_inverse(A).eval())
# Inverse of diagonal matrix has diag elements of the reciprocal of diag elements B
print("inv(B) = ")
print(tf.matrix_inverse(B).eval())
print("inv(C) = ") # since C has rank 1, this will cause error
try:
print(tf.matrix_inverse(C).eval())
except:
print("C is not invertible")
# Product of a matrix and its inverse is an identity (non-singular)
print("A*inv(A) = Eye(2)")
print( tf.matmul(A,tf.matrix_inverse(A)).eval() )
# Element-wise multiplication
print("elem(A)*elem(B) = ")
print(tf.multiply(A,B).eval())
# Element-wise addition
print("elem(A)+elem(B) = ")
print(tf.add(A,B).eval())
# Dot product
print("x dot b")
print(tf.matmul(x,b,transpose_a=True).eval())
# Identity matrix of same shape as A
print("eye(A) = ")
I = tf.eye(A.get_shape().as_list()[0],A.get_shape().as_list()[1])
print(I.eval())
# Multiply eye(A) and A = A
print("eye(A)*A = A = ")
print(tf.matmul(I,A).eval())
print("A * eye(A) = A = ")
print(tf.matmul(A, I).eval())
# l1, l2, Frobenius norm
print("l1(x) = ")
print(tf.reduce_sum(tf.abs(x)).eval())
print("l2(x) = ")
print(tf.sqrt(tf.reduce_sum(tf.square(x))).eval())
print("Frobenius(A) = ")
print(tf.sqrt(tf.reduce_sum(tf.square(A))).eval())
print("Numpy l2(x) =")
print(np.linalg.norm(x.eval(session=tf.Session())))
print("Numpy Forbenius(A) =")
print(np.linalg.norm(A.eval(session=tf.Session())))
# Can you write the L(inf) ?
# Orthogonal vectors; How do you make x and y orthonormal?
print("x dot y")
print(tf.matmul(x,y,transpose_a=True).eval())
# Eigenvalues and eigenvectors
print("Numpy Eigenvalues of (A)=")
e, v = np.linalg.eig(A.eval())
print(e)
print("Numpy Eigenvectors of (A)=")
print(v)
# Frobenius norm is equal to the trace of A*tran(A)
print("Frobenius(A) = Tr(A*tran(A) = ")
print(tf.sqrt(tf.trace(tf.matmul(A,tf.transpose(A)))).eval())
# Determinant of A is the product of its eigenvalues
print("det(A)=")
print(tf.matrix_determinant(A).eval())
# Determinant from eigenvalues
print("det(A) as product of eigenvalues")
print(tf.reduce_prod(e).eval())
| 26.012739
| 87
| 0.602106
|
9c80fb41f32cf725238d88b1d24fb246edb8d6dd
| 3,037
|
py
|
Python
|
src/site_main/settings.py
|
MelvinYin/Defined_Proteins
|
75da20be82a47d85d27176db29580ab87d52b670
|
[
"BSD-3-Clause"
] | 2
|
2021-01-05T02:55:57.000Z
|
2021-04-16T15:49:08.000Z
|
src/site_main/settings.py
|
MelvinYin/Defined_Proteins
|
75da20be82a47d85d27176db29580ab87d52b670
|
[
"BSD-3-Clause"
] | null | null | null |
src/site_main/settings.py
|
MelvinYin/Defined_Proteins
|
75da20be82a47d85d27176db29580ab87d52b670
|
[
"BSD-3-Clause"
] | 1
|
2021-01-05T08:12:38.000Z
|
2021-01-05T08:12:38.000Z
|
"""
Django settings for site_main project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from config import paths
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.join(paths.SRC, "query_main")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'upzey=x#dgt5yzq*wz&30h)j2=^j%djo79&xu4pn5@(7^!c%c+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
SECURE_SSL_REDIRECT = False
# Application definition
INSTALLED_APPS = ['query_main.apps.PollsConfig', 'django.contrib.admin',
'django.contrib.auth', 'django.contrib.contenttypes',
'django.contrib.sessions', 'django.contrib.messages',
'django.contrib.staticfiles',]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'site_main.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'site_main.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| 28.92381
| 91
| 0.709582
|
786882e0f5b985cbeffca0b595f3f54275f0d144
| 5,286
|
py
|
Python
|
fiware_api_blueprint_renderer/src/drafter_postprocessing/data_structures.py
|
Lenijas/test-travisci
|
e83fafe9d46319c9eaf9938e00c49b52454b66df
|
[
"BSD-3-Clause"
] | 1
|
2016-11-10T01:04:52.000Z
|
2016-11-10T01:04:52.000Z
|
fiware_api_blueprint_renderer/src/drafter_postprocessing/data_structures.py
|
Lenijas/test-travisci
|
e83fafe9d46319c9eaf9938e00c49b52454b66df
|
[
"BSD-3-Clause"
] | null | null | null |
fiware_api_blueprint_renderer/src/drafter_postprocessing/data_structures.py
|
Lenijas/test-travisci
|
e83fafe9d46319c9eaf9938e00c49b52454b66df
|
[
"BSD-3-Clause"
] | null | null | null |
from collections import deque
from ..apib_extra_parse_utils import parse_property_member_declaration
from ..apib_extra_parse_utils import get_indentation
def parser_json_data_structures(json_content):
"""Retrieves data structures definition from JSON file and writes them in an easier to access format"""
if len(json_content['content']) > 0:
json_content['data_structures'] = parse_defined_data_structures(json_content['content'][0])
else:
json_content['data_structures'] = {}
# Add resource level defined data structures
structures_from_resources = get_data_structures_from_resources(json_content)
json_content['data_structures'].update(structures_from_resources)
def get_data_structures_from_resources(json_content):
"""Retrieve data structures defined in named resources.
Arguments:
json_content -- JSON object where resources will be analysed
"""
data_structures = {}
for resource_group in json_content["resourceGroups"]:
for resource in resource_group["resources"]:
if resource["name"] == "": continue
for content in resource["content"]:
if content["element"] == "dataStructure":
attributes = get_data_structure_properties_from_json(content["sections"])
data_structures[resource["name"]] = {"attributes": attributes, "is_common_payload": False}
return data_structures
def get_data_structure_properties_from_json(data_structure_content):
"""Extract simpler representation of properties from drafter JSON representation.
Arguments:
data_structure_content -- JSON content section of "dataStructures" element or nested property
"""
attributes = []
for membertype in data_structure_content:
if "content" not in membertype: return attributes
for property_ in membertype["content"]:
attribute = {}
attribute['name'] = property_['content']['name']['literal']
attribute['required'] = 'required' in property_['content']['valueDefinition']['typeDefinition']['attributes']
attribute['type'] = \
property_['content']['valueDefinition']['typeDefinition']['typeSpecification']['name']
attribute['description'] = property_['content']['description']
try:
values_string = property_['content']['valueDefinition']['values'][0]['literal']
attribute['values'] = [e.strip(" ") for e in values_string.split(',')]
except IndexError as error:
attribute['values'] = []
attribute['subproperties'] = get_data_structure_properties_from_json(property_['content']["sections"])
attributes.append(attribute)
return attributes
def parse_defined_data_structures(data):
"""Retrieves data structures definition from JSON fragment and gives them back as Python dict"""
data_structure_dict = {}
try:
if data["content"][0]["sections"][0]["class"] != u'blockDescription':
raise ValueError('Unexpected section received.')
except:
return data_structure_dict
for content in data["content"]:
data_structure = {}
data_structure_definition = []
if content["sections"]!=[]:
data_structure_content = content["sections"][0]["content"]
parse_defined_data_structure_properties(data_structure_definition, deque(data_structure_content.split('\n')))
data_structure_name = content["name"]["literal"]
data_structure["attributes"] = data_structure_definition
data_structure["is_common_payload"] = True
data_structure_dict[data_structure_name] = data_structure
return data_structure_dict
def parse_defined_data_structure_properties(properties_list, remaining_property_lines):
"""Parses the properties definitions of a given data structure given its body
Arguments:
properties_list - List where we'll insert new properties to
remaining_property_lines - Property definition lines pending to be processed
"""
last_member_indentation = -1
while len(remaining_property_lines) > 0:
property_member_declaration = remaining_property_lines[0]
if property_member_declaration != '':
# Retrieve the indentation of the current property definition.
current_member_indentation = get_indentation(property_member_declaration)
if last_member_indentation == -1:
last_member_indentation = current_member_indentation
# Process the new property as a child, parent or uncle of the last
# one processed according to their relative line indentations.
if current_member_indentation == last_member_indentation:
parsed_attribute_definition = parse_property_member_declaration(property_member_declaration)
remaining_property_lines.popleft()
properties_list.append(parsed_attribute_definition)
elif current_member_indentation > last_member_indentation:
parse_defined_data_structure_properties(parsed_attribute_definition['subproperties'], remaining_property_lines)
else:
return
else:
remaining_property_lines.popleft()
| 40.976744
| 127
| 0.699962
|
40046f19043d30c1480b948c5c578edff30a2386
| 15,444
|
py
|
Python
|
src/evaluation_metrics/segmentation_metrics.py
|
LucasFidon/fetal-brain-segmentation-partial-supervision-miccai21
|
69506cbed21c7d04946020e0d09246610c8da6d4
|
[
"BSD-3-Clause"
] | 1
|
2021-12-17T06:25:26.000Z
|
2021-12-17T06:25:26.000Z
|
src/evaluation_metrics/segmentation_metrics.py
|
LucasFidon/fetal-brain-segmentation-partial-supervision-miccai21
|
69506cbed21c7d04946020e0d09246610c8da6d4
|
[
"BSD-3-Clause"
] | null | null | null |
src/evaluation_metrics/segmentation_metrics.py
|
LucasFidon/fetal-brain-segmentation-partial-supervision-miccai21
|
69506cbed21c7d04946020e0d09246610c8da6d4
|
[
"BSD-3-Clause"
] | null | null | null |
"""
@brief Evaluation metrics for segmentation applications.
@author Lucas Fidon (lucas.fidon@kcl.ac.uk)
@date 30 Oct 2019.
"""
import numpy as np
from scipy import ndimage
from evaluation_metrics import lookup_tables
def _binarize(seg, fg_class=1):
"""
Binarize a segmentation with label 1 for pixels/voxels the foreground class
and label 0 for pixels/voxels the other classes.
:param seg: int numpy array.
:param fg_class: int; class in seg corresponding to the foreground.
:return: binary segmentation corresponding to seg for the foreground class fg_class.
"""
bin_seg = np.zeros_like(seg, dtype=bool)
bin_seg[seg == fg_class] = True
return bin_seg
# Basic metrics
def true_positives(seg_pred, seg_gt):
"""
Number of True Positives
for the predicted segmentation seg_pred
and the ground-truth segmentation seg_gt.
:param seg_pred: numpy bool array.
:param seg_gt: numpy bool array.
:return: int; number of true positives.
"""
assert seg_pred.dtype == np.bool, "seg_1 should be of type bool, " \
"found %s instead." % seg_pred.dtype
assert seg_gt.dtype == np.bool, "seg_2 should be of type bool, " \
"found %s instead." % seg_gt.dtype
num_tp = np.sum(seg_pred * seg_gt)
return num_tp
def false_positives(seg_pred, seg_gt):
"""
Number of False Positives
for the predicted segmentation seg_pred
and the ground-truth segmentation seg_gt.
:param seg_pred: numpy bool array.
:param seg_gt: numpy bool array.
:return: int; number of false positives.
"""
assert seg_pred.dtype == np.bool, "seg_1 should be of type bool, " \
"found %s instead." % seg_pred.dtype
assert seg_gt.dtype == np.bool, "seg_2 should be of type bool, " \
"found %s instead." % seg_gt.dtype
num_fp = np.sum(seg_pred * (1 - seg_gt))
return num_fp
def false_negatives(seg_pred, seg_gt):
"""
Number of False Negatives
for the predicted segmentation seg_pred
and the ground-truth segmentation seg_gt.
:param seg_pred: numpy bool array.
:param seg_gt: numpy bool array.
:return: int; number of false negatives.
"""
assert seg_pred.dtype == np.bool, "seg_1 should be of type bool, " \
"found %s instead." % seg_pred.dtype
assert seg_gt.dtype == np.bool, "seg_2 should be of type bool, " \
"found %s instead." % seg_gt.dtype
num_fn = np.sum((1 - seg_pred) * seg_gt)
return num_fn
def true_negatives(seg_pred, seg_gt):
"""
Number of True Negatives
for the predicted segmentation seg_pred
and the ground-truth segmentation seg_gt.
:param seg_pred: numpy bool array.
:param seg_gt: numpy bool array.
:return: int; number of true negatives.
"""
assert seg_pred.dtype == np.bool, "seg_1 should be of type bool, " \
"found %s instead." % seg_pred.dtype
assert seg_gt.dtype == np.bool, "seg_2 should be of type bool, " \
"found %s instead." % seg_gt.dtype
num_tn = np.sum((1 - seg_pred) * (1 - seg_gt))
return num_tn
# Dice scores and variants
def dice_score(seg_1, seg_2, fg_class=1):
"""
Compute the Dice score for class fg_class
between the segmentations seg_1 and seg_2.
For explanation about the formula used to compute the Dice score coefficient,
see for example:
"Generalised Wasserstein Dice Score for Imbalanced Multi-class Segmentation
using Holistic Convolutional Networks", L. Fidon et al, BrainLes 2017.
:param seg_1: numpy int array.
:param seg_2: numpy int array.
:param fg_class: int.
:return: float; Dice score value.
"""
assert seg_1.shape == seg_2.shape, "seg_1 and seg_2 must have the same shape " \
"to compute their dice score."
# binarize the segmentations
bin_seg_1 = _binarize(seg_1, fg_class=fg_class)
bin_seg_2 = _binarize(seg_2, fg_class=fg_class)
# compute the Dice score value
tp = true_positives(bin_seg_1, bin_seg_2)
fp = false_positives(bin_seg_1, bin_seg_2)
fn = false_negatives(bin_seg_1, bin_seg_2)
if tp + fp + fn == 0: # empty foreground for seg_1 and seg_2
dice_val = 1.
else:
dice_val = 2. * tp / (2. * tp + fp + fn)
return dice_val
def mean_dice_score(seg_1, seg_2, labels_list=[0, 1]):
"""
Compute the mean of the Dice scores for the labels in labels_list
between the segmentations seg_1 and seg_2.
:param seg_1: numpy int array.
:param seg_2: numpy int array.
:param labels_list: int list.
:return:
"""
assert len(labels_list) > 0, "the list of labels to consider for the mean dice score" \
"must contain at least one label"
dice_values = []
for l in labels_list:
dice = dice_score(seg_1, seg_2, fg_class=l)
dice_values.append(dice)
mean_dice = np.mean(dice_values)
return mean_dice
# Jaccard index and variants
def jaccard(seg_1, seg_2, fg_class=1):
"""
Compute the Jaccard for class fg_class
between the segmentations seg_1 and seg_2.
:param seg_1: numpy int array.
:param seg_2: numpy int array.
:param fg_class: int.
:return: float; Jaccard value.
"""
assert seg_1.shape == seg_2.shape, "seg_1 and seg_2 must have the same shape " \
"to compute their dice score"
# binarize the segmentations
bin_seg_1 = _binarize(seg_1, fg_class=fg_class)
bin_seg_2 = _binarize(seg_2, fg_class=fg_class)
# compute the Jaccard index value
tp = true_positives(bin_seg_1, bin_seg_2)
fp = false_positives(bin_seg_1, bin_seg_2)
fn = false_negatives(bin_seg_1, bin_seg_2)
if tp + fp + fn == 0: # empty foreground for seg_1 and seg_2
jaccard = 1.
else:
jaccard = tp / (tp + fp + fn)
return jaccard
# Surface distances
def haussdorff_distance(mask_gt, mask_pred, fg_class,
percentile=100, spacing_mm=[0.8, 0.8, 0.8]):
bin_mask_gt = np.squeeze(_binarize(mask_gt, fg_class=fg_class))
bin_mask_pred = np.squeeze(_binarize(mask_pred, fg_class=fg_class))
surface_distances = compute_surface_distances(
bin_mask_gt, bin_mask_pred, spacing_mm)
haussdorff_dist_value = compute_robust_hausdorff(surface_distances, percentile)
return haussdorff_dist_value
def compute_surface_distances(mask_gt, mask_pred, spacing_mm):
"""
Compute closest distances from all surface points to the other surface.
Finds all surface elements "surfels" in the ground truth mask `mask_gt` and
the predicted mask `mask_pred`, computes their area in mm^2 and the distance
to the closest point on the other surface. It returns two sorted lists of
distances together with the corresponding surfel areas. If one of the masks
is empty, the corresponding lists are empty and all distances in the other
list are `inf`.
:param mask_gt: 3-dim Numpy array of type bool. The ground truth mask.
:param mask_pred: 3-dim Numpy array of type bool. The predicted mask.
:param spacing_mm: 3-element list-like structure. Voxel spacing in x0, x1 and x2
direction.
:return: A dict with:
"distances_gt_to_pred": 1-dim numpy array of type float. The distances in mm
from all ground truth surface elements to the predicted surface,
sorted from smallest to largest.
"distances_pred_to_gt": 1-dim numpy array of type float. The distances in mm
from all predicted surface elements to the ground truth surface,
sorted from smallest to largest.
"surfel_areas_gt": 1-dim numpy array of type float. The area in mm^2 of
the ground truth surface elements in the same order as
distances_gt_to_pred
"surfel_areas_pred": 1-dim numpy array of type float. The area in mm^2 of
the predicted surface elements in the same order as
distances_pred_to_gt
"""
# compute the area for all 256 possible surface elements
# (given a 2x2x2 neighbourhood) according to the spacing_mm
neighbour_code_to_surface_area = np.zeros([256])
for code in range(256):
normals = np.array(lookup_tables.neighbour_code_to_normals[code])
sum_area = 0
for normal_idx in range(normals.shape[0]):
# normal vector
n = np.zeros([3])
n[0] = normals[normal_idx, 0] * spacing_mm[1] * spacing_mm[2]
n[1] = normals[normal_idx, 1] * spacing_mm[0] * spacing_mm[2]
n[2] = normals[normal_idx, 2] * spacing_mm[0] * spacing_mm[1]
area = np.linalg.norm(n)
sum_area += area
neighbour_code_to_surface_area[code] = sum_area
# compute the bounding box of the masks to trim
# the volume to the smallest possible processing subvolume
mask_all = mask_gt | mask_pred
bbox_min = np.zeros(3, np.int64)
bbox_max = np.zeros(3, np.int64)
# max projection to the x0-axis
proj_0 = np.max(np.max(mask_all, axis=2), axis=1)
idx_nonzero_0 = np.nonzero(proj_0)[0]
if len(idx_nonzero_0) == 0: # pylint: disable=g-explicit-length-test
return {"distances_gt_to_pred": np.array([]),
"distances_pred_to_gt": np.array([]),
"surfel_areas_gt": np.array([]),
"surfel_areas_pred": np.array([])}
bbox_min[0] = np.min(idx_nonzero_0)
bbox_max[0] = np.max(idx_nonzero_0)
# max projection to the x1-axis
proj_1 = np.max(np.max(mask_all, axis=2), axis=0)
idx_nonzero_1 = np.nonzero(proj_1)[0]
bbox_min[1] = np.min(idx_nonzero_1)
bbox_max[1] = np.max(idx_nonzero_1)
# max projection to the x2-axis
proj_2 = np.max(np.max(mask_all, axis=1), axis=0)
idx_nonzero_2 = np.nonzero(proj_2)[0]
bbox_min[2] = np.min(idx_nonzero_2)
bbox_max[2] = np.max(idx_nonzero_2)
# crop the processing subvolume.
# we need to zeropad the cropped region with 1 voxel at the lower,
# the right and the back side. This is required to obtain the "full"
# convolution result with the 2x2x2 kernel
cropmask_gt = np.zeros((bbox_max - bbox_min)+2, np.uint8)
cropmask_pred = np.zeros((bbox_max - bbox_min)+2, np.uint8)
cropmask_gt[0:-1, 0:-1, 0:-1] = mask_gt[bbox_min[0]:bbox_max[0]+1,
bbox_min[1]:bbox_max[1]+1,
bbox_min[2]:bbox_max[2]+1]
cropmask_pred[0:-1, 0:-1, 0:-1] = mask_pred[bbox_min[0]:bbox_max[0]+1,
bbox_min[1]:bbox_max[1]+1,
bbox_min[2]:bbox_max[2]+1]
# compute the neighbour code (local binary pattern) for each voxel
# the resulting arrays are spacially shifted by minus half a voxel in each
# axis.
# i.e. the points are located at the corners of the original voxels
kernel = np.array([[[128, 64],
[32, 16]],
[[8, 4],
[2, 1]]])
neighbour_code_map_gt = ndimage.filters.correlate(
cropmask_gt.astype(np.uint8), kernel, mode="constant", cval=0)
neighbour_code_map_pred = ndimage.filters.correlate(
cropmask_pred.astype(np.uint8), kernel, mode="constant", cval=0)
# create masks with the surface voxels
borders_gt = ((neighbour_code_map_gt != 0) & (neighbour_code_map_gt != 255))
borders_pred = ((neighbour_code_map_pred != 0) &
(neighbour_code_map_pred != 255))
# compute the distance transform (closest distance of each voxel to the
# surface voxels)
if borders_gt.any():
distmap_gt = ndimage.morphology.distance_transform_edt(
~borders_gt, sampling=spacing_mm)
else:
distmap_gt = np.Inf * np.ones(borders_gt.shape)
if borders_pred.any():
distmap_pred = ndimage.morphology.distance_transform_edt(
~borders_pred, sampling=spacing_mm)
else:
distmap_pred = np.Inf * np.ones(borders_pred.shape)
# compute the area of each surface element
surface_area_map_gt = neighbour_code_to_surface_area[neighbour_code_map_gt]
surface_area_map_pred = neighbour_code_to_surface_area[
neighbour_code_map_pred]
# create a list of all surface elements with distance and area
distances_gt_to_pred = distmap_pred[borders_gt]
distances_pred_to_gt = distmap_gt[borders_pred]
surfel_areas_gt = surface_area_map_gt[borders_gt]
surfel_areas_pred = surface_area_map_pred[borders_pred]
# sort them by distance
if distances_gt_to_pred.shape != (0,):
sorted_surfels_gt = np.array(
sorted(zip(distances_gt_to_pred, surfel_areas_gt)))
distances_gt_to_pred = sorted_surfels_gt[:, 0]
surfel_areas_gt = sorted_surfels_gt[:, 1]
if distances_pred_to_gt.shape != (0,):
sorted_surfels_pred = np.array(
sorted(zip(distances_pred_to_gt, surfel_areas_pred)))
distances_pred_to_gt = sorted_surfels_pred[:, 0]
surfel_areas_pred = sorted_surfels_pred[:, 1]
return {"distances_gt_to_pred": distances_gt_to_pred,
"distances_pred_to_gt": distances_pred_to_gt,
"surfel_areas_gt": surfel_areas_gt,
"surfel_areas_pred": surfel_areas_pred}
def compute_robust_hausdorff(surface_distances, percent):
"""
Computes the robust Hausdorff distance.
Computes the robust Hausdorff distance. "Robust", because it uses the
`percent` percentile of the distances instead of the maximum distance. The
percentage is computed by correctly taking the area of each surface element
into account.
Based on
https://github.com/deepmind/surface-distance/blob/master/surface_distance/metrics.py
:param surface_distances: dict with "distances_gt_to_pred", "distances_pred_to_gt"
"surfel_areas_gt", "surfel_areas_pred" created by
compute_surface_distances()
:param percent: a float value between 0 and 100.
:return: a float value. The robust Hausdorff distance in mm.
"""
distances_gt_to_pred = surface_distances["distances_gt_to_pred"]
distances_pred_to_gt = surface_distances["distances_pred_to_gt"]
surfel_areas_gt = surface_distances["surfel_areas_gt"]
surfel_areas_pred = surface_distances["surfel_areas_pred"]
if len(distances_gt_to_pred) > 0: # pylint: disable=g-explicit-length-test
surfel_areas_cum_gt = np.cumsum(surfel_areas_gt) / np.sum(surfel_areas_gt)
idx = np.searchsorted(surfel_areas_cum_gt, percent/100.0)
perc_distance_gt_to_pred = distances_gt_to_pred[
min(idx, len(distances_gt_to_pred)-1)]
else:
perc_distance_gt_to_pred = np.Inf
if len(distances_pred_to_gt) > 0: # pylint: disable=g-explicit-length-test
surfel_areas_cum_pred = (np.cumsum(surfel_areas_pred) /
np.sum(surfel_areas_pred))
idx = np.searchsorted(surfel_areas_cum_pred, percent/100.0)
perc_distance_pred_to_gt = distances_pred_to_gt[
min(idx, len(distances_pred_to_gt)-1)]
else:
perc_distance_pred_to_gt = np.Inf
return max(perc_distance_gt_to_pred, perc_distance_pred_to_gt)
| 41.516129
| 91
| 0.662652
|
04e7758438803fbd94e3bf7cf14273a9330f086d
| 1,382
|
py
|
Python
|
model-optimizer/extensions/front/caffe/ctcgreedydecoder_ext.py
|
calvinfeng/openvino
|
11f591c16852637506b1b40d083b450e56d0c8ac
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/extensions/front/caffe/ctcgreedydecoder_ext.py
|
calvinfeng/openvino
|
11f591c16852637506b1b40d083b450e56d0c8ac
|
[
"Apache-2.0"
] | 19
|
2021-03-26T08:11:00.000Z
|
2022-02-21T13:06:26.000Z
|
model-optimizer/extensions/front/caffe/ctcgreedydecoder_ext.py
|
calvinfeng/openvino
|
11f591c16852637506b1b40d083b450e56d0c8ac
|
[
"Apache-2.0"
] | 1
|
2021-07-28T17:30:46.000Z
|
2021-07-28T17:30:46.000Z
|
"""
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from extensions.ops.ctc_greedy_decoder import CTCGreedyDecoderOp
from mo.front.caffe.collect_attributes import merge_attrs
from mo.front.common.extractors.utils import layout_attrs
from mo.front.extractor import FrontExtractorOp
class CTCGreedyDecoderFrontExtractor(FrontExtractorOp):
op = 'CTCGreedyDecoder'
enabled = True
@classmethod
def extract(cls, node):
proto_layer = node.pb
param = proto_layer.ctc_decoder_param
update_attrs = {
'ctc_merge_repeated': (int)(param.ctc_merge_repeated)
}
mapping_rule = merge_attrs(param, update_attrs)
mapping_rule.update(layout_attrs())
# update the attributes of the node
CTCGreedyDecoderOp.update_node_stat(node, mapping_rule)
return cls.enabled
| 32.904762
| 73
| 0.745297
|
3ed4ff79ff4a85080a6fb8aa70c12b087955073f
| 666
|
py
|
Python
|
carflux/src/agent.py
|
Deepak-r2dl/carflux
|
53e14a32897b9615a0b0d07bb99e5ebc9ce069f8
|
[
"MIT"
] | null | null | null |
carflux/src/agent.py
|
Deepak-r2dl/carflux
|
53e14a32897b9615a0b0d07bb99e5ebc9ce069f8
|
[
"MIT"
] | null | null | null |
carflux/src/agent.py
|
Deepak-r2dl/carflux
|
53e14a32897b9615a0b0d07bb99e5ebc9ce069f8
|
[
"MIT"
] | null | null | null |
"""
The agent class for Pacman
"""
from abc import ABC, abstractmethod
class Agent(ABC):
# Probably a bit risque.
def __init__(self,**kwargs):
for k, v in kwargs.items():
setattr(self,k,v)
super().__init__()
@abstractmethod
# Observe world. Encode neighbourhood filter if needbe
def perceive(self):
pass
@abstractmethod
# update internal states, optional
def update_state(self):
pass
@abstractmethod
# react to world settings
def react(self):
pass
@abstractmethod
# communicate observable state,
def communicate(self):
pass
| 20.181818
| 58
| 0.605105
|
6396492d3df51ca8c247a8b5a93f435c1bceef25
| 7,646
|
py
|
Python
|
thermo/database.py
|
RoryKurek/thermo
|
985279467faa028234ab422a19b69385e5100149
|
[
"MIT"
] | 380
|
2016-07-04T09:45:20.000Z
|
2022-03-20T18:09:45.000Z
|
thermo/database.py
|
RoryKurek/thermo
|
985279467faa028234ab422a19b69385e5100149
|
[
"MIT"
] | 104
|
2016-07-10T20:47:12.000Z
|
2022-03-22T20:43:39.000Z
|
thermo/database.py
|
RoryKurek/thermo
|
985279467faa028234ab422a19b69385e5100149
|
[
"MIT"
] | 96
|
2016-07-05T20:54:05.000Z
|
2022-02-23T03:06:02.000Z
|
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2018, 2019 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
__all__ = ['']
import os
import marshal
from chemicals.utils import log, exp
from chemicals.utils import mixing_simple, none_and_length_check, Vm_to_rho
from fluids.constants import N_A, k
from thermo.utils import TDependentProperty, MixtureProperty
from thermo.chemical import ChemicalConstants
folder = os.path.join(os.path.dirname(__file__), 'Misc')
def loadChemicalConstants(data, rows=True):
'''Accepts either a marshal-style list-of-lists with fixed indexes, or
takes in the json-style dict-of-dicts-of-dicts.
Returns a dictionary of ChemicalConstants indexed by their CASs.
'''
loaded_chemicals = {}
# Question: What if every database is a per-datasource one
# And I fit all methods to apolynom to within their range?
# Then we have one additional database which holds the best data source.
# That way, all coefficients are precisely sourced.
def add_chemical(kwargs):
# TODO: remove to skip a function call
constants = ChemicalConstants(**kwargs)
loaded_chemicals[constants.CAS] = constants
if rows:
for row in data:
kwargs = dict(CAS=row[0], Tc=row[1], Pc=row[2], Vc=row[3], omega=row[4], Tb=row[5],
Tm=row[6], Tt=row[7], Pt=row[8], Hfus=row[9], Hsub=row[10], Hf=row[11],
dipole=row[12],
HeatCapacityGas=row[13], HeatCapacityLiquid=row[14],
HeatCapacitySolid=row[15],
ThermalConductivityLiquid=row[16], ThermalConductivityGas=row[17],
ViscosityLiquid=row[18], ViscosityGas=row[19],
EnthalpyVaporization=row[20], VaporPressure=row[21], VolumeLiquid=row[22],
SublimationPressure=row[23], EnthalpySublimation=row[24],
VolumeSolid=row[25], VolumeSupercriticalLiquid=row[26])
add_chemical(kwargs)
else:
for CAS, item in data.items():
kwargs= dict(CAS=CAS, Tc=item['Tc']['value'],
Pc=item['Pc']['value'],
Vc=item['Vc']['value'],
omega=item['omega']['value'],
Tb=item['Tb']['value'],
Tm=item['Tm']['value'],
Tt=item['Tt']['value'],
Pt=item['Pt']['value'],
Hfus=item['Hfus']['value'],
Hsub=item['Hsub']['value'],
Hf=item['Hf']['value'],
dipole=item['dipole']['value'])
for prop_key, store in marshal_properties:
try:
prop_data = item[prop_key]
Tmin, Tmax = prop_data['Tmin'], prop_data['Tmax']
coefficients = prop_data['coefficients']
if 'Tc' in prop_data:
kwargs[prop_key] = (Tmin, Tmax, prop_data['Tc'], coefficients)
else:
kwargs[prop_key] = (Tmin, Tmax, coefficients)
except KeyError:
pass
# Tmin, Tmax, coefficients = None, None, None
# kwargs[prop_key] = (Tmin, Tmax, coefficients)
add_chemical(kwargs)
return loaded_chemicals
def load_json_data(json_path):
f = open(json_path, 'r')
import json
full_data = json.load(f)
f.close()
return full_data
def marshal_json_data(full_data, path):
marshal_rows = []
for CAS, data in full_data.items():
row = [CAS]
row.append(data['Tc']['value'])
row.append(data['Pc']['value'])
row.append(data['Vc']['value'])
row.append(data['omega']['value'])
row.append(data['Tb']['value'])
row.append(data['Tm']['value'])
row.append(data['Tt']['value'])
row.append(data['Pt']['value'])
row.append(data['Hfus']['value'])
row.append(data['Hsub']['value'])
row.append(data['Hf']['value'])
row.append(data['dipole']['value'])
for prop_key, store in marshal_properties:
try:
prop_data = data[prop_key]
Tmin, Tmax = prop_data['Tmin'], prop_data['Tmax']
coefficients = prop_data['coefficients']
if 'Tc' in prop_data:
Tc = prop_data['Tc']
row = (Tmin, Tmax, Tc, coefficients)
else:
row = (Tmin, Tmax, coefficients)
except KeyError:
row = (None, None, None)
row.append(row)
marshal_rows.append(row)
f = open(path, 'wb')
marshal.dump(marshal_rows, f, 2)
f.close()
return marshal_rows
marshal_properties = [('HeatCapacityGas', True),
('HeatCapacityLiquid', True),
('HeatCapacitySolid', True),
('ThermalConductivityLiquid', True),
('ThermalConductivityGas', True),
('ViscosityLiquid', True),
('ViscosityGas', True),
('EnthalpyVaporization', True),
('VaporPressure', True),
('VolumeLiquid', True),
('SublimationPressure', True),
('EnthalpySublimation', True),
('VolumeSolid', True),
('VolumeSupercriticalLiquid', True),
]
json_path = os.path.join(folder, 'constants dump.json')
binary_path = os.path.join(folder, 'binary dump.marshal')
skip = not os.path.exists(json_path)
loaded_chemicals = {}
if not skip:
from_json = True
if os.path.exists(binary_path):
# get the changed dates for each file and only load from binary if
# the binary file is newer
json_mtime = os.path.getmtime(json_path)
binary_mtime = os.path.getmtime(binary_path)
if binary_mtime > json_mtime and os.path.getsize(binary_path) > 10000:
from_json = False
full_data = {}
marshal_rows = []
if from_json:
full_data = load_json_data(json_path)
loaded_chemicals = loadChemicalConstants(full_data, rows=False)
marshal_data = from_json
if marshal_data:
try:
marshal_rows = marshal_json_data(full_data, binary_path)
except:
pass
if not from_json:
marshal_rows = marshal.load(open(binary_path, 'rb'))
loaded_chemicals = loadChemicalConstants(marshal_rows, rows=True)
| 36.583732
| 99
| 0.598483
|
f344ff84a369fd623b708fe522a11d03759e7d3c
| 4,390
|
py
|
Python
|
hood/settings.py
|
NIelsen-Mudaki/neighbourhood
|
12e7a38188e00c1cbc7810745eda4d9d205ae0e1
|
[
"Unlicense"
] | null | null | null |
hood/settings.py
|
NIelsen-Mudaki/neighbourhood
|
12e7a38188e00c1cbc7810745eda4d9d205ae0e1
|
[
"Unlicense"
] | null | null | null |
hood/settings.py
|
NIelsen-Mudaki/neighbourhood
|
12e7a38188e00c1cbc7810745eda4d9d205ae0e1
|
[
"Unlicense"
] | null | null | null |
"""
Django settings for hood project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import django_heroku
import dj_database_url
from decouple import config,Csv
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
# development
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_c0!0zu8k^y^k29@&6g_ert02sube2f33^xjfm6dlizvx^)e#k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'neighbourhood',
'bootstrap3',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hood.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hood.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'hood',
'USER': 'moringa',
'PASSWORD':'Sereniel',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
django_heroku.settings(locals())
| 26.287425
| 91
| 0.682916
|
0862370125d1625c58336c2d62a4a57deb52bbe0
| 1,234
|
py
|
Python
|
examples/blogprj/apps/blog/models.py
|
pimentech/django-mongoforms
|
6220e91e05d73a26e495460f98667e23dc16c5f6
|
[
"BSD-3-Clause"
] | 1
|
2017-07-27T05:44:47.000Z
|
2017-07-27T05:44:47.000Z
|
examples/blogprj/apps/blog/models.py
|
pimentech/django-mongoforms
|
6220e91e05d73a26e495460f98667e23dc16c5f6
|
[
"BSD-3-Clause"
] | null | null | null |
examples/blogprj/apps/blog/models.py
|
pimentech/django-mongoforms
|
6220e91e05d73a26e495460f98667e23dc16c5f6
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
from django.template.defaultfilters import slugify
from django.core.urlresolvers import reverse
from mongoengine import *
class BlogPost(Document):
published = BooleanField(default=False)
author = StringField(required=True)
title = StringField(required=True)
slug = StringField()
content = StringField(required=True)
datetime_added = DateTimeField(default=datetime.datetime.now)
def save(self):
if self.slug is None:
slug = slugify(self.title)
new_slug = slug
c = 1
while True:
try:
BlogPost.objects.get(slug=new_slug)
except BlogPost.DoesNotExist:
break
else:
c += 1
new_slug = '%s-%s' % (slug, c)
self.slug = new_slug
return super(BlogPost, self).save()
def get_absolute_url(self):
#return u'%s/' % self.slug
return reverse('apps.blog.views.show', kwargs={'slug': self.slug})
@queryset_manager
def published_posts(doc_cls, queryset):
return queryset(published=True)
meta = {
'ordering': ['-datetime_added']
}
| 28.697674
| 74
| 0.579417
|
a35cf75d191f9eddfc81c4eb7b7eeedcabc1ef3e
| 13,034
|
py
|
Python
|
tools/tools/env/tools/Python27/Lib/site-packages/serial/urlhandler/protocol_socket.py
|
John-J-smith/myRTT
|
7b206d3984f3b70f825a0b9ec87750c153c2c0f1
|
[
"Apache-2.0"
] | 1
|
2020-11-25T20:09:59.000Z
|
2020-11-25T20:09:59.000Z
|
serial/urlhandler/protocol_socket.py
|
gregkoul/pyserial
|
1ef8648ff3c4b4aeaeb3962ea8d1076a1e90ae74
|
[
"BSD-3-Clause"
] | null | null | null |
serial/urlhandler/protocol_socket.py
|
gregkoul/pyserial
|
1ef8648ff3c4b4aeaeb3962ea8d1076a1e90ae74
|
[
"BSD-3-Clause"
] | 2
|
2019-02-14T08:13:33.000Z
|
2019-04-23T21:47:48.000Z
|
#! python
#
# This module implements a simple socket based client.
# It does not support changing any port parameters and will silently ignore any
# requests to do so.
#
# The purpose of this module is that applications using pySerial can connect to
# TCP/IP to serial port converters that do not support RFC 2217.
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C) 2001-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
#
# URL format: socket://<host>:<port>[/option[/option...]]
# options:
# - "debug" print diagnostic messages
import errno
import logging
import select
import socket
import time
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from serial.serialutil import SerialBase, SerialException, to_bytes, \
portNotOpenError, writeTimeoutError, Timeout
# map log level names to constants. used in from_url()
LOGGER_LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
}
POLL_TIMEOUT = 5
class Serial(SerialBase):
"""Serial port implementation for plain sockets."""
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200)
def open(self):
"""\
Open port with current settings. This may throw a SerialException
if the port cannot be opened.
"""
self.logger = None
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
if self.is_open:
raise SerialException("Port is already open.")
try:
# timeout is used for write timeout support :/ and to get an initial connection timeout
self._socket = socket.create_connection(self.from_url(self.portstr), timeout=POLL_TIMEOUT)
except Exception as msg:
self._socket = None
raise SerialException("Could not open port {}: {}".format(self.portstr, msg))
# after connecting, switch to non-blocking, we're using select
self._socket.setblocking(False)
# not that there is anything to configure...
self._reconfigure_port()
# all things set up get, now a clean start
self.is_open = True
if not self._dsrdtr:
self._update_dtr_state()
if not self._rtscts:
self._update_rts_state()
self.reset_input_buffer()
self.reset_output_buffer()
def _reconfigure_port(self):
"""\
Set communication parameters on opened port. For the socket://
protocol all settings are ignored!
"""
if self._socket is None:
raise SerialException("Can only operate on open ports")
if self.logger:
self.logger.info('ignored port configuration change')
def close(self):
"""Close port"""
if self.is_open:
if self._socket:
try:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
except:
# ignore errors.
pass
self._socket = None
self.is_open = False
# in case of quick reconnects, give the server some time
time.sleep(0.3)
def from_url(self, url):
"""extract host and port from an URL string"""
parts = urlparse.urlsplit(url)
if parts.scheme != "socket":
raise SerialException(
'expected a string in the form '
'"socket://<host>:<port>[?logging={debug|info|warning|error}]": '
'not starting with socket:// ({!r})'.format(parts.scheme))
try:
# process options now, directly altering self
for option, values in urlparse.parse_qs(parts.query, True).items():
if option == 'logging':
logging.basicConfig() # XXX is that good to call it here?
self.logger = logging.getLogger('pySerial.socket')
self.logger.setLevel(LOGGER_LEVELS[values[0]])
self.logger.debug('enabled logging')
else:
raise ValueError('unknown option: {!r}'.format(option))
if not 0 <= parts.port < 65536:
raise ValueError("port not in range 0...65535")
except ValueError as e:
raise SerialException(
'expected a string in the form '
'"socket://<host>:<port>[?logging={debug|info|warning|error}]": {}'.format(e))
return (parts.hostname, parts.port)
# - - - - - - - - - - - - - - - - - - - - - - - -
@property
def in_waiting(self):
"""Return the number of bytes currently in the input buffer."""
if not self.is_open:
raise portNotOpenError
# Poll the socket to see if it is ready for reading.
# If ready, at least one byte will be to read.
lr, lw, lx = select.select([self._socket], [], [], 0)
return len(lr)
# select based implementation, similar to posix, but only using socket API
# to be portable, additionally handle socket timeout which is used to
# emulate write timeouts
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self.is_open:
raise portNotOpenError
read = bytearray()
timeout = Timeout(self._timeout)
while len(read) < size:
try:
ready, _, _ = select.select([self._socket], [], [], timeout.time_left())
# If select was used with a timeout, and the timeout occurs, it
# returns with empty lists -> thus abort read operation.
# For timeout == 0 (non-blocking operation) also abort when
# there is nothing to read.
if not ready:
break # timeout
buf = self._socket.recv(size - len(read))
# read should always return some data as select reported it was
# ready to read when we get to this point, unless it is EOF
if not buf:
raise SerialException('socket disconnected')
read.extend(buf)
except OSError as e:
# this is for Python 3.x where select.error is a subclass of
# OSError ignore EAGAIN errors. all other errors are shown
if e.errno != errno.EAGAIN:
raise SerialException('read failed: {}'.format(e))
except (select.error, socket.error) as e:
# this is for Python 2.x
# ignore EAGAIN errors. all other errors are shown
# see also http://www.python.org/dev/peps/pep-3151/#select
if e[0] != errno.EAGAIN:
raise SerialException('read failed: {}'.format(e))
if timeout.expired():
break
return bytes(read)
def write(self, data):
"""\
Output the given byte string over the serial port. Can block if the
connection is blocked. May raise SerialException if the connection is
closed.
"""
if not self.is_open:
raise portNotOpenError
d = to_bytes(data)
tx_len = length = len(d)
timeout = Timeout(self._write_timeout)
while tx_len > 0:
try:
n = self._socket.send(d)
if timeout.is_non_blocking:
# Zero timeout indicates non-blocking - simply return the
# number of bytes of data actually written
return n
elif not timeout.is_infinite:
# when timeout is set, use select to wait for being ready
# with the time left as timeout
if timeout.expired():
raise writeTimeoutError
_, ready, _ = select.select([], [self._socket], [], timeout.time_left())
if not ready:
raise writeTimeoutError
else:
assert timeout.time_left() is None
# wait for write operation
_, ready, _ = select.select([], [self._socket], [], None)
if not ready:
raise SerialException('write failed (select)')
d = d[n:]
tx_len -= n
except SerialException:
raise
except OSError as v:
if v.errno != errno.EAGAIN:
raise SerialException('write failed: {}'.format(v))
# still calculate and check timeout
if timeout.expired():
raise writeTimeoutError
return length - len(d)
def reset_input_buffer(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self.is_open:
raise portNotOpenError
# just use recv to remove input, while there is some
ready = True
while ready:
ready, _, _ = select.select([self._socket], [], [], 0)
try:
self._socket.recv(4096)
except OSError as e:
# this is for Python 3.x where select.error is a subclass of
# OSError ignore EAGAIN errors. all other errors are shown
if e.errno != errno.EAGAIN:
raise SerialException('reset_input_buffer failed: {}'.format(e))
except (select.error, socket.error) as e:
# this is for Python 2.x
# ignore EAGAIN errors. all other errors are shown
# see also http://www.python.org/dev/peps/pep-3151/#select
if e[0] != errno.EAGAIN:
raise SerialException('reset_input_buffer failed: {}'.format(e))
def reset_output_buffer(self):
"""\
Clear output buffer, aborting the current output and
discarding all that is in the buffer.
"""
if not self.is_open:
raise portNotOpenError
if self.logger:
self.logger.info('ignored reset_output_buffer')
def send_break(self, duration=0.25):
"""\
Send break condition. Timed, returns to idle state after given
duration.
"""
if not self.is_open:
raise portNotOpenError
if self.logger:
self.logger.info('ignored send_break({!r})'.format(duration))
def _update_break_state(self):
"""Set break: Controls TXD. When active, to transmitting is
possible."""
if self.logger:
self.logger.info('ignored _update_break_state({!r})'.format(self._break_state))
def _update_rts_state(self):
"""Set terminal status line: Request To Send"""
if self.logger:
self.logger.info('ignored _update_rts_state({!r})'.format(self._rts_state))
def _update_dtr_state(self):
"""Set terminal status line: Data Terminal Ready"""
if self.logger:
self.logger.info('ignored _update_dtr_state({!r})'.format(self._dtr_state))
@property
def cts(self):
"""Read terminal status line: Clear To Send"""
if not self.is_open:
raise portNotOpenError
if self.logger:
self.logger.info('returning dummy for cts')
return True
@property
def dsr(self):
"""Read terminal status line: Data Set Ready"""
if not self.is_open:
raise portNotOpenError
if self.logger:
self.logger.info('returning dummy for dsr')
return True
@property
def ri(self):
"""Read terminal status line: Ring Indicator"""
if not self.is_open:
raise portNotOpenError
if self.logger:
self.logger.info('returning dummy for ri')
return False
@property
def cd(self):
"""Read terminal status line: Carrier Detect"""
if not self.is_open:
raise portNotOpenError
if self.logger:
self.logger.info('returning dummy for cd)')
return True
# - - - platform specific - - -
# works on Linux and probably all the other POSIX systems
def fileno(self):
"""Get the file handle of the underlying socket for use with select"""
return self._socket.fileno()
#
# simple client test
if __name__ == '__main__':
import sys
s = Serial('socket://localhost:7000')
sys.stdout.write('{}\n'.format(s))
sys.stdout.write("write...\n")
s.write(b"hello\n")
s.flush()
sys.stdout.write("read: {}\n".format(s.read(5)))
s.close()
| 37.56196
| 102
| 0.569204
|
414027c6232843f2d07aa824dbc600b01a1be8f9
| 1,190
|
py
|
Python
|
00_PythonPrimer/pythonprimer.py
|
caspar/PhysicsLab
|
1b4d45d9e915a84ecb80a39498850463bbc2d3be
|
[
"MIT"
] | 1
|
2016-05-08T19:42:20.000Z
|
2016-05-08T19:42:20.000Z
|
00_PythonPrimer/pythonprimer.py
|
caspar/PhysicsLab
|
1b4d45d9e915a84ecb80a39498850463bbc2d3be
|
[
"MIT"
] | null | null | null |
00_PythonPrimer/pythonprimer.py
|
caspar/PhysicsLab
|
1b4d45d9e915a84ecb80a39498850463bbc2d3be
|
[
"MIT"
] | null | null | null |
#Lab 0
#coding=utf-8
#Author Caspar Lant
import numpy as np;
import matplotlib.pyplot as plt;
# load csv
DATA = "SampleData-1.csv";
measurement, temperature, pressure, uncertainty, error = np.loadtxt(DATA, skiprows=5, unpack=True, delimiter=',');
# plot data
# plt.xlabel("Temperature ($^\circ$C)");
# plt.ylabel("Pressure (lb/in$ ^2$)");
# with error bars
plt.errorbar(temperature, pressure, error, linestyle = 'None', marker='d', mfc='yellow', mec='r', ms=20, mew=1, ecolor = "k");
plt.show();
#####################
# coupled pendulums #
#####################
A = 0.1
w1 = 2 * np.pi * 5
w2 = 2 * np.pi * 5.2
theta_a1 = []
theta_b1 = []
theta_a2 = []
theta_b2 = []
times = [];
for t in range (0,400):
theta_a1.append(A * np.cos(w1 * t / 200) + A * np.cos(w2 * t / 200));
theta_b1.append(A * np.cos(w1 * t / 200) - A * np.cos(w2 * t / 200));
theta_a2.append(2 * A * np.cos((w2 - w1) / 2 * t / 200) * np.cos((w2 + w1) / 2 * t / 200));
theta_b2.append(2 * A * np.sin((w2 - w1) / 2 * t / 200) * np.sin((w2 + w1) / 2 * t / 200));
times.append(t)
plt.plot(times, theta_a1);
plt.plot(times, theta_b1);
plt.plot(times, theta_a2);
plt.plot(times, theta_b2);
plt.show();
| 26.444444
| 126
| 0.582353
|
8619a4378c2a4f736f26de61a365992dfca06c8f
| 15,414
|
py
|
Python
|
Apps/phthreatminer/threatminer.py
|
mattsayar-splunk/phantom-apps
|
b719b78ded609ae3cbd62d7d2cc317db1a613d3b
|
[
"Apache-2.0"
] | 74
|
2019-10-22T02:00:53.000Z
|
2022-03-15T12:56:13.000Z
|
Apps/phthreatminer/threatminer.py
|
mattsayar-splunk/phantom-apps
|
b719b78ded609ae3cbd62d7d2cc317db1a613d3b
|
[
"Apache-2.0"
] | 375
|
2019-10-22T20:53:50.000Z
|
2021-11-09T21:28:43.000Z
|
Apps/phthreatminer/threatminer.py
|
mattsayar-splunk/phantom-apps
|
b719b78ded609ae3cbd62d7d2cc317db1a613d3b
|
[
"Apache-2.0"
] | 175
|
2019-10-23T15:30:42.000Z
|
2021-11-05T21:33:31.000Z
|
# Copyright (c) 2019 Splunk Inc.
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
# Threat Miner Class
import requests
import logging
import json
import time
# Establish Logging.
logging.basicConfig()
logger = logging.getLogger('ThreatMiner')
class threatMiner():
def __init__(
self,
# Replace the base url to the url that you need
base_url='https://api.threatminer.org/v2/',
prettyPrint=False
):
"""
Threat Miner Python Wrapper.
Available Functions
- test_connect Provides a method to test connectivity
- get_domain This function performs lookups against
domains depending on the function
- get_ip This function performs lookups against
IPs depending on the function
- get_sample This function performs lookups against
hashes depending on the functions
- get_imphash This function performs lookups against
imphashes depending on the functions
- get_ssdeep This function performs lookups against
ssdeep depending on the functions
- get_ssl This function performs lookups against
ssl depending on the functions
- get_email This function performs lookups against
email depending on the functions
- get_av This function performs lookups against
AV depending on the functions
Usage:
# Should match your class name. Delete this line
s = threatMiner()
s.function_name(valid_variables)
"""
# Create Requests Session
self.session = requests.session()
# Create Base URL variable to allow for updates in the future
self.base_url = base_url
# Create Pretty Print variable
self.prettyPrint = prettyPrint
# Create endpoint
endpoint = '{}domain.php?q=vwrm.com&rt=1'.format(self.base_url)
# Initiate Ping to Threat Miner Endpoint
self.ping = self.session.get(endpoint)
# Request failed returning false and logging an error
if self.ping.status_code != 200:
logger.error(
"Error connecting to Threat Miner, error message: {}".format(
self.ping.text))
def logger_out(self, level, function_name, format_var):
if level == "warning":
message = ("{}: Error with query to threatMiner,"
"error message: {}".format(function_name, format_var))
return logger.warning(message)
def parse_output(self, input):
# If prettyPrint set to False
if self.prettyPrint is False:
return json.dumps(input)
# If prettyPrint set to True
elif self.prettyPrint is True:
print json.dumps(input, indent=4)
def test_connect(self):
"""
Function: Test ping to Threat Miner API
Usage:
s = threatMiner()
s.test_connect()
"""
endpoint = '{}domain.php?q=vwrm.com&rt=1'.format(self.base_url)
# Make connection to the ping endpoint
r = self.session.get(endpoint)
# If the request is successful
if r.status_code == 200:
# Specify Output as JSON
return True
# Request failed returning false and logging an error
else:
self.logger_out("warning", "test_connect", r.text)
return False
def get_domain(self, domain, function):
"""
Function: This function performs lookups against
domains depending on the function
:param function: Required - These are the functions
that threat miner provide for domain lookups
Functions
1 - WHOIS
2 - Passive DNS
3 - Example Query URI
4 - Related Samples (hash only)
5 - Subdomains
6 - Report tagging
Usage:
s = threatMiner()
s.get_domain("vwrm.com", 1)
"""
# URL that we are querying
endpoint = '{}/domain.php?q={}&rt={}'.format(
self.base_url, domain, function)
# Create a request
r = self.session.get(endpoint)
# Sleep to ensure throttling
time.sleep(7)
# If the request is successful
if r.status_code == 200:
if int(r.json()['status_code']) == 200:
output = r.json()
return self.parse_output(output)
else:
status_message = r.json()['status_message']
self.logger_out("warning", "get_domain", status_message)
return False
# Request failed returning false and logging an error
else:
# Write a warning to the console
status_message = r.json()['status_message']
self.logger_out("warning", "get_domain", status_message)
return False
def get_ip(self, ip, function):
"""
Function: This function performs lookups
against IPs depending on the function
:param function: Required - These are the functions
that threat miner provide for ip lookups
Functions
1 - WHOIS
2 - Passive DNS
3 - URIs
4 - Related Samples (hash only)
5 - SSL Certificates (hash only)
6 - Report tagging
Usage:
s = threatMiner()
s.get_ip("216.58.213.110", 1)
"""
# URL that we are querying
endpoint = '{}/host.php?q={}&rt={}'.format(self.base_url, ip, function)
# Create a request
r = self.session.get(endpoint)
# Sleep to ensure throttling
time.sleep(7)
# If the request is successful
if r.status_code == 200:
if int(r.json()['status_code']) == 200:
output = r.json()
return self.parse_output(output)
else:
# Write a warning to the console
status_message = r.json()['status_message']
self.logger_out("warning", "get_ip", status_message)
return False
# Request failed returning false and logging an error
else:
# Write a warning to the console
status_message = r.json()['status_message']
self.logger_out("warning", "get_ip", status_message)
return False
def get_sample(self, sample, function):
"""
Function: This function performs lookups against
hashes depending on the functions
:param function: Required - These are the functions that
threat miner provide for hash lookups
Functions
1 - Metadata
2 - HTTP Traffic
3 - Hosts (domains and IPs)
4 - Mutants
5 - Registry Keys
6 - AV Detections
7 - Report tagging
Usage:
s = threatMiner()
s.get_sample("e6ff1bf0821f00384cdd25efb9b1cc09", 1)
"""
# URL that we are querying
endpoint = '{}/sample.php?q={}&rt={}'.format(
self.base_url, sample, function)
# Create a request
r = self.session.get(endpoint)
# Sleep to ensure throttling
time.sleep(7)
# If the request is successful
if r.status_code == 200:
if int(r.json()['status_code']) == 200:
output = r.json()
return self.parse_output(output)
else:
# Write a warning to the console
status_message = r.json()['status_message']
self.logger_out("warning", "get_sample", status_message)
return False
# Request failed returning false and logging an error
else:
# Write a warning to the console
status_message = r.json()['status_message']
self.logger_out("warning", "get_sample", status_message)
return False
def get_imphash(self, imphash, function):
"""
Function: This function performs lookups against
imphashes depending on the functions
:param function: Required - These are the functions that
threat miner provide for imphashes lookups
Functions
1 - Samples
2 - Report tagging
Usage:
s = threatMiner()
s.get_imphash("1f4f257947c1b713ca7f9bc25f914039", 1)
"""
# URL that we are querying
endpoint = '{}/imphash.php?q={}&rt={}'.format(
self.base_url, imphash, function)
# Create a request
r = self.session.get(endpoint)
# Sleep to ensure throttling
time.sleep(7)
# If the request is successful
# If the request is successful
if r.status_code == 200:
if int(r.json()['status_code']) == 200:
output = r.json()
return self.parse_output(output)
else:
# Write a warning to the console
status_message = r.json()['status_message']
self.logger_out("warning", "get_imphash", status_message)
return False
else:
status_message = r.json()['status_message']
self.logger_out("warning", "get_imphash", status_message)
return False
def get_ssdeep(self, ssdeep, function):
"""
Function: This function performs lookups against
ssdeep depending on the functions
:param function: Required - These are the functions that
threat miner provide for ssdeep lookups
Functions
1 - Samples
2 - Report tagging
Usage:
s = threatMiner()
s.get_ssdeep("
1536:TJsNrChuG2K6IVOTjWko8a9P6W3OEHBQc4w4:TJs0oG2KSTj3o8a9PFeEHn4l", 1)
"""
# URL that we are querying
endpoint = '{}/ssdeep.php?q={}&rt={}'.format(
self.base_url, ssdeep, function)
# Create a request
r = self.session.get(endpoint)
# Sleep to ensure throttling
time.sleep(7)
# If the request is successful
if r.status_code == 200:
if int(r.json()['status_code']) == 200:
output = r.json()
return self.parse_output(output)
else:
# Write a warning to the console
status_message = r.json()['status_message']
self.logger_out("warning", "get_ssdeep", status_message)
return False
else:
# Write a warning to the console
status_message = r.json()['status_message']
self.logger_out("warning", "get_ssdeep", status_message)
return False
def get_ssl(self, ssl, function):
"""
Function: This function performs lookups against
ssl depending on the functions
:param function: Required - These are the functions that
threat miner provide for ssl lookups
Functions
1 - Hosts
2 - Report tagging
Usage:
s = threatMiner()
s.get_ssl("42a8d5b3a867a59a79f44ffadd61460780fe58f2", 1)
"""
# URL that we are querying
endpoint = '{}/ssl.php?q={}&rt={}'.format(self.base_url, ssl, function)
# Create a request
r = self.session.get(endpoint)
# Sleep to ensure throttling
time.sleep(7)
# If the request is successful
if r.status_code == 200:
if int(r.json()['status_code']) == 200:
output = r.json()
return self.parse_output(output)
else:
# Write a warning to the console
status_message = r.json()['status_message']
self.logger_out("warning", "get_ssl", status_message)
return False
else:
# Write a warning to the console
status_message = r.json()['status_message']
self.logger_out("warning", "get_ssl", status_message)
return False
def get_email(self, email, function):
"""
Function: This function performs lookups against
email depending on the functions
:param function: Required - These are the functions that
threat miner provide for email lookups
Functions
1 - Domains
Usage:
s = threatMiner()
s.get_email("7bf5721bfa009479c33f3c3cf4ea5392200f030e", 1)
"""
# URL that we are querying
endpoint = '{}/email.php?q={}&rt={}'.format(
self.base_url, email, function)
# Create a request
r = self.session.get(endpoint)
# Sleep to ensure throttling
time.sleep(7)
# If the request is successful
if r.status_code == 200:
if int(r.json()['status_code']) == 200:
output = r.json()
return self.parse_output(output)
else:
# Write a warning to the console
status_message = r.json()['status_message']
self.logger_out("warning", "get_email", status_message)
return False
else:
# Write a warning to the console
status_message = r.json()['status_message']
self.logger_out("warning", "get_email", status_message)
return False
def get_av(self, av, function):
"""
Function: This function performs lookups against
AV depending on the functions
:param function: Required - These are the functions that
threat miner provide for AV lookups
Functions
1 - Samples
2 - Report tagging
Usage:
s = threatMiner()
s.get_av("Trojan.Enfal", 1)
"""
# URL that we are querying
endpoint = '{}/av.php?q={}&rt={}'.format(self.base_url, av, function)
# Create a request
r = self.session.get(endpoint)
# Sleep to ensure throttling
time.sleep(7)
# If the request is successful
if r.status_code == 200:
if int(r.json()['status_code']) == 200:
output = r.json()
return self.parse_output(output)
else:
# Write a warning to the console
status_message = r.json()['status_message']
self.logger_out("warning", "get_av", status_message)
return False
else:
# Write a warning to the console
status_message = r.json()['status_message']
self.logger_out("warning", "get_av", status_message)
return False
| 35.846512
| 79
| 0.542494
|
d273febec5691a9757874476ddec831c480f3597
| 16,511
|
py
|
Python
|
yolo3/model.py
|
tantao258/keras-yolo3
|
cf5222e419903fc6b9e2388a6fff65bc3e001e07
|
[
"MIT"
] | null | null | null |
yolo3/model.py
|
tantao258/keras-yolo3
|
cf5222e419903fc6b9e2388a6fff65bc3e001e07
|
[
"MIT"
] | null | null | null |
yolo3/model.py
|
tantao258/keras-yolo3
|
cf5222e419903fc6b9e2388a6fff65bc3e001e07
|
[
"MIT"
] | null | null | null |
"""YOLO_v3 Model Defined in Keras."""
from functools import wraps
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate, MaxPooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.regularizers import l2
from yolo3.utils import compose
@wraps(Conv2D)
def DarknetConv2D(*args, **kwargs):
"""Wrapper to set Darknet parameters for Convolution2D."""
darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
darknet_conv_kwargs.update(kwargs)
return Conv2D(*args, **darknet_conv_kwargs)
def DarknetConv2D_BN_Leaky(*args, **kwargs):
"""Darknet Convolution2D followed by BatchNormalization and LeakyReLU."""
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetConv2D(*args, **no_bias_kwargs),
BatchNormalization(),
LeakyReLU(alpha=0.1))
def resblock_body(x, num_filters, num_blocks):
'''A series of resblocks starting with a downsampling Convolution2D'''
# Darknet uses left and top padding instead of 'same' mode
x = ZeroPadding2D(((1,0),(1,0)))(x)
x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x)
for i in range(num_blocks):
y = compose(
DarknetConv2D_BN_Leaky(num_filters//2, (1,1)),
DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x)
x = Add()([x,y])
return x
def darknet_body(x):
"""
Darknent body having 52 Convolution2D layers
"""
x = DarknetConv2D_BN_Leaky(32, (3,3))(x)
x = resblock_body(x, 64, 1)
x = resblock_body(x, 128, 2)
x = resblock_body(x, 256, 8)
x = resblock_body(x, 512, 8)
x = resblock_body(x, 1024, 4)
return x
def make_last_layers(x, num_filters, out_filters):
'''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''
x = compose(
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)
y = compose(
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D(out_filters, (1,1)))(x)
return x, y
def yolo_body(inputs, num_anchors, num_classes):
"""
Create YOLO_V3 model CNN body in Keras.
"""
darknet = Model(inputs, darknet_body(inputs))
x, y1 = make_last_layers(darknet.output, 512, num_anchors*(num_classes+5))
x = compose(
DarknetConv2D_BN_Leaky(256, (1,1)),
UpSampling2D(2))(x)
x = Concatenate()([x,darknet.layers[152].output])
x, y2 = make_last_layers(x, 256, num_anchors*(num_classes+5))
x = compose(
DarknetConv2D_BN_Leaky(128, (1,1)),
UpSampling2D(2))(x)
x = Concatenate()([x,darknet.layers[92].output])
x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5))
return Model(inputs, [y1,y2,y3])
def tiny_yolo_body(inputs, num_anchors, num_classes):
'''Create Tiny YOLO_v3 model CNN body in keras.'''
x1 = compose(
DarknetConv2D_BN_Leaky(16, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(32, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(64, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(128, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(256, (3,3)))(inputs)
x2 = compose(
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(512, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(1,1), padding='same'),
DarknetConv2D_BN_Leaky(1024, (3,3)),
DarknetConv2D_BN_Leaky(256, (1,1)))(x1)
y1 = compose(
DarknetConv2D_BN_Leaky(512, (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1)))(x2)
x2 = compose(
DarknetConv2D_BN_Leaky(128, (1,1)),
UpSampling2D(2))(x2)
y2 = compose(
Concatenate(),
DarknetConv2D_BN_Leaky(256, (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1)))([x2,x1])
return Model(inputs, [y1,y2])
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
"""Convert final layer features to bounding box parameters."""
num_anchors = len(anchors)
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])
grid_shape = K.shape(feats)[1:3] # height, width
grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
[1, grid_shape[1], 1, 1])
grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
[grid_shape[0], 1, 1, 1])
grid = K.concatenate([grid_x, grid_y])
grid = K.cast(grid, K.dtype(feats))
feats = K.reshape(
feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])
# Adjust preditions to each spatial grid point and anchor size.
box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
box_confidence = K.sigmoid(feats[..., 4:5])
box_class_probs = K.sigmoid(feats[..., 5:])
if calc_loss == True:
return grid, feats, box_xy, box_wh
return box_xy, box_wh, box_confidence, box_class_probs
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
'''Get corrected boxes'''
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
input_shape = K.cast(input_shape, K.dtype(box_yx))
image_shape = K.cast(image_shape, K.dtype(box_yx))
new_shape = K.round(image_shape * K.min(input_shape/image_shape))
offset = (input_shape-new_shape)/2./input_shape
scale = input_shape/new_shape
box_yx = (box_yx - offset) * scale
box_hw *= scale
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
boxes = K.concatenate([
box_mins[..., 0:1], # y_min
box_mins[..., 1:2], # x_min
box_maxes[..., 0:1], # y_max
box_maxes[..., 1:2] # x_max
])
# Scale boxes back to original image shape.
boxes *= K.concatenate([image_shape, image_shape])
return boxes
def yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape):
'''Process Conv layer output'''
box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats,
anchors, num_classes, input_shape)
boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)
boxes = K.reshape(boxes, [-1, 4])
box_scores = box_confidence * box_class_probs
box_scores = K.reshape(box_scores, [-1, num_classes])
return boxes, box_scores
def yolo_eval(yolo_outputs,
anchors,
num_classes,
image_shape,
max_boxes=20,
score_threshold=.6,
iou_threshold=.5):
"""Evaluate YOLO model on given input and return filtered boxes."""
num_layers = len(yolo_outputs)
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] # default setting
input_shape = K.shape(yolo_outputs[0])[1:3] * 32
boxes = []
box_scores = []
for l in range(num_layers):
_boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],
anchors[anchor_mask[l]], num_classes, input_shape, image_shape)
boxes.append(_boxes)
box_scores.append(_box_scores)
boxes = K.concatenate(boxes, axis=0)
box_scores = K.concatenate(box_scores, axis=0)
mask = box_scores >= score_threshold
max_boxes_tensor = K.constant(max_boxes, dtype='int32')
boxes_ = []
scores_ = []
classes_ = []
for c in range(num_classes):
# TODO: use keras backend instead of tf.
class_boxes = tf.boolean_mask(boxes, mask[:, c])
class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
nms_index = tf.image.non_max_suppression(
class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)
class_boxes = K.gather(class_boxes, nms_index)
class_box_scores = K.gather(class_box_scores, nms_index)
classes = K.ones_like(class_box_scores, 'int32') * c
boxes_.append(class_boxes)
scores_.append(class_box_scores)
classes_.append(classes)
boxes_ = K.concatenate(boxes_, axis=0)
scores_ = K.concatenate(scores_, axis=0)
classes_ = K.concatenate(classes_, axis=0)
return boxes_, scores_, classes_
def preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes):
'''Preprocess true boxes to training input format
Parameters
----------
true_boxes: array, shape=(m, T, 5)
Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape.
input_shape: array-like, hw, multiples of 32
anchors: array, shape=(N, 2), wh
num_classes: integer
Returns
-------
y_true: list of array, shape like yolo_outputs, xywh are reletive value
'''
assert (true_boxes[..., 4]<num_classes).all(), 'class id must be less than num_classes'
num_layers = len(anchors)//3 # default setting
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]
true_boxes = np.array(true_boxes, dtype='float32')
input_shape = np.array(input_shape, dtype='int32')
boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2
boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
true_boxes[..., 0:2] = boxes_xy/input_shape[::-1]
true_boxes[..., 2:4] = boxes_wh/input_shape[::-1]
m = true_boxes.shape[0]
grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(num_layers)]
y_true = [np.zeros((m,grid_shapes[l][0],grid_shapes[l][1],len(anchor_mask[l]),5+num_classes),
dtype='float32') for l in range(num_layers)]
# Expand dim to apply broadcasting.
anchors = np.expand_dims(anchors, 0)
anchor_maxes = anchors / 2.
anchor_mins = -anchor_maxes
valid_mask = boxes_wh[..., 0]>0
for b in range(m):
# Discard zero rows.
wh = boxes_wh[b, valid_mask[b]]
if len(wh)==0: continue
# Expand dim to apply broadcasting.
wh = np.expand_dims(wh, -2)
box_maxes = wh / 2.
box_mins = -box_maxes
intersect_mins = np.maximum(box_mins, anchor_mins)
intersect_maxes = np.minimum(box_maxes, anchor_maxes)
intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
box_area = wh[..., 0] * wh[..., 1]
anchor_area = anchors[..., 0] * anchors[..., 1]
iou = intersect_area / (box_area + anchor_area - intersect_area)
# Find best anchor for each true box
best_anchor = np.argmax(iou, axis=-1)
for t, n in enumerate(best_anchor):
for l in range(num_layers):
if n in anchor_mask[l]:
i = np.floor(true_boxes[b,t,0]*grid_shapes[l][1]).astype('int32')
j = np.floor(true_boxes[b,t,1]*grid_shapes[l][0]).astype('int32')
k = anchor_mask[l].index(n)
c = true_boxes[b,t, 4].astype('int32')
y_true[l][b, j, i, k, 0:4] = true_boxes[b,t, 0:4]
y_true[l][b, j, i, k, 4] = 1
y_true[l][b, j, i, k, 5+c] = 1
return y_true
def box_iou(b1, b2):
'''Return iou tensor
Parameters
----------
b1: tensor, shape=(i1,...,iN, 4), xywh
b2: tensor, shape=(j, 4), xywh
Returns
-------
iou: tensor, shape=(i1,...,iN, j)
'''
# Expand dim to apply broadcasting.
b1 = K.expand_dims(b1, -2)
b1_xy = b1[..., :2]
b1_wh = b1[..., 2:4]
b1_wh_half = b1_wh/2.
b1_mins = b1_xy - b1_wh_half
b1_maxes = b1_xy + b1_wh_half
# Expand dim to apply broadcasting.
b2 = K.expand_dims(b2, 0)
b2_xy = b2[..., :2]
b2_wh = b2[..., 2:4]
b2_wh_half = b2_wh/2.
b2_mins = b2_xy - b2_wh_half
b2_maxes = b2_xy + b2_wh_half
intersect_mins = K.maximum(b1_mins, b2_mins)
intersect_maxes = K.minimum(b1_maxes, b2_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b1_area = b1_wh[..., 0] * b1_wh[..., 1]
b2_area = b2_wh[..., 0] * b2_wh[..., 1]
iou = intersect_area / (b1_area + b2_area - intersect_area)
return iou
def yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False):
'''Return yolo_loss tensor
Parameters
----------
yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body
y_true: list of array, the output of preprocess_true_boxes
anchors: array, shape=(N, 2), wh
num_classes: integer
ignore_thresh: float, the iou threshold whether to ignore object confidence loss
Returns
-------
loss: tensor, shape=(1,)
'''
num_layers = len(anchors)//3 # default setting
yolo_outputs = args[:num_layers]
y_true = args[num_layers:]
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))
grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)]
loss = 0
m = K.shape(yolo_outputs[0])[0] # batch size, tensor
mf = K.cast(m, K.dtype(yolo_outputs[0]))
for l in range(num_layers):
object_mask = y_true[l][..., 4:5]
true_class_probs = y_true[l][..., 5:]
grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],
anchors[anchor_mask[l]], num_classes, input_shape, calc_loss=True)
pred_box = K.concatenate([pred_xy, pred_wh])
# Darknet raw box to calculate loss.
raw_true_xy = y_true[l][..., :2]*grid_shapes[l][::-1] - grid
raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])
raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf
box_loss_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4]
# Find ignore mask, iterate over each of batch.
ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)
object_mask_bool = K.cast(object_mask, 'bool')
def loop_body(b, ignore_mask):
true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0])
iou = box_iou(pred_box[b], true_box)
best_iou = K.max(iou, axis=-1)
ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box)))
return b+1, ignore_mask
_, ignore_mask = K.control_flow_ops.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])
ignore_mask = ignore_mask.stack()
ignore_mask = K.expand_dims(ignore_mask, -1)
# K.binary_crossentropy is helpful to avoid exp overflow.
xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[...,0:2], from_logits=True)
wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh-raw_pred[...,2:4])
confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \
(1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask
class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[...,5:], from_logits=True)
xy_loss = K.sum(xy_loss) / mf
wh_loss = K.sum(wh_loss) / mf
confidence_loss = K.sum(confidence_loss) / mf
class_loss = K.sum(class_loss) / mf
loss += xy_loss + wh_loss + confidence_loss + class_loss
if print_loss:
loss = tf.Print(loss, [loss, xy_loss, wh_loss, confidence_loss, class_loss, K.sum(ignore_mask)], message='loss: ')
return loss
| 39.594724
| 126
| 0.627582
|
da8931fcedd8fcaeee4024f2d348487d0795b706
| 17,380
|
py
|
Python
|
geoprisma/migrations/0001_initial.py
|
groupe-conseil-nutshimit-nippour/django-geoprisma
|
4732fdb8a0684eb4d7fd50aa43e11b454ee71d08
|
[
"BSD-3-Clause"
] | null | null | null |
geoprisma/migrations/0001_initial.py
|
groupe-conseil-nutshimit-nippour/django-geoprisma
|
4732fdb8a0684eb4d7fd50aa43e11b454ee71d08
|
[
"BSD-3-Clause"
] | 5
|
2020-02-12T00:23:17.000Z
|
2021-12-13T19:46:33.000Z
|
geoprisma/migrations/0001_initial.py
|
groupe-conseil-nutshimit-nippour/django-geoprisma
|
4732fdb8a0684eb4d7fd50aa43e11b454ee71d08
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='AccessFilter',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('commentaire', models.TextField(null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AccessFilterOption',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('value', models.TextField(null=True, blank=True)),
('accessfilter', models.ForeignKey(to='geoprisma.AccessFilter')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('template', models.CharField(default=b'', max_length=255)),
('commentaire', models.TextField(null=True, blank=True)),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ApplicationType',
fields=[
('id', models.IntegerField(unique=True, serialize=False, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('activated', models.BooleanField(default=True)),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ApplicationWidget',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('order', models.IntegerField(null=True, blank=True)),
('application', models.ForeignKey(to='geoprisma.Application')),
],
options={
'ordering': ('order',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Datastore',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('layers', models.CharField(max_length=255, null=True)),
('commentaire', models.TextField(null=True, blank=True)),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DatastoreOption',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('value', models.TextField(null=True, blank=True)),
('datastore', models.ForeignKey(to='geoprisma.Datastore')),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DefaultLayerOption',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('value', models.TextField(null=True, blank=True)),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Field',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('title', models.CharField(max_length=255, blank=True)),
('key', models.CharField(max_length=255, null=True, blank=True)),
('domain', models.CharField(max_length=255, null=True, blank=True)),
('commentaire', models.TextField(null=True, blank=True)),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='FieldOption',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('value', models.TextField(null=True, blank=True)),
('field', models.ForeignKey(to='geoprisma.Field')),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='InitialView',
fields=[
('id_initial_view', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=100)),
('description', models.CharField(max_length=100)),
('geom', django.contrib.gis.db.models.fields.GeometryField(srid=32187)),
('sort_index', models.IntegerField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MapContext',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('commentaire', models.TextField(null=True, blank=True)),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MapContextOption',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('value', models.TextField(null=True, blank=True)),
('mapContext', models.ForeignKey(to='geoprisma.MapContext', db_column=b'mapcontext_id')),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MapContextResource',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('order', models.IntegerField(null=True, blank=True)),
('mapContext', models.ForeignKey(to='geoprisma.MapContext', db_column=b'mapcontext_id')),
],
options={
'ordering': ('order',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Resource',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('acl_name', models.CharField(max_length=255, null=True, db_column=b'acl_name', blank=True)),
('key', models.CharField(max_length=255, null=True, blank=True)),
('domain', models.CharField(max_length=255, null=True, blank=True)),
('slug', models.SlugField(max_length=255, unique=True, null=True)),
('display_name', models.CharField(max_length=255, null=True, blank=True)),
('display_name_fr', models.CharField(max_length=255, null=True, blank=True)),
('commentaire', models.TextField()),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ResourceAccessfilter',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('accessfilter', models.ForeignKey(to='geoprisma.AccessFilter')),
('resource', models.ForeignKey(to='geoprisma.Resource')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ResourceField',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('order', models.IntegerField(null=True, blank=True)),
('field', models.ForeignKey(to='geoprisma.Field')),
('resource', models.ForeignKey(to='geoprisma.Resource')),
],
options={
'ordering': ('order',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ResourceOption',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('value', models.TextField(null=True, blank=True)),
('key', models.CharField(max_length=255, null=True, blank=True)),
('domain', models.CharField(max_length=255, null=True, blank=True)),
('resource', models.ForeignKey(to='geoprisma.Resource')),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('source', models.CharField(max_length=1024)),
('slug', models.SlugField(max_length=255, unique=True, null=True)),
('commentaire', models.TextField(null=True, blank=True)),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ServiceOption',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('value', models.TextField(null=True, blank=True)),
('service', models.ForeignKey(to='geoprisma.Service')),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ServiceType',
fields=[
('id', models.IntegerField(unique=True, serialize=False, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('activated', models.BooleanField(default=True)),
('priority', models.IntegerField(null=True, blank=True)),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Session',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('commentaire', models.TextField(null=True, blank=True)),
('application', models.ForeignKey(to='geoprisma.Application')),
('mapContext', models.ForeignKey(to='geoprisma.MapContext', db_column=b'mapcontext_id')),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Widget',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('commentaire', models.TextField()),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='WidgetOption',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('value', models.TextField(null=True, blank=True)),
('order', models.IntegerField(null=True, blank=True)),
('widget', models.ForeignKey(to='geoprisma.Widget')),
],
options={
'ordering': ('order',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='WidgetType',
fields=[
('id', models.IntegerField(unique=True, serialize=False, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('activated', models.BooleanField(default=True)),
('classname', models.CharField(default=b'geoprisma.core.widgets.widgetbase.WidgetBase', max_length=255)),
('action', models.CharField(default=b'read', max_length=255)),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.AddField(
model_name='widget',
name='type',
field=models.ForeignKey(to='geoprisma.WidgetType'),
preserve_default=True,
),
migrations.AddField(
model_name='service',
name='type',
field=models.ForeignKey(to='geoprisma.ServiceType'),
preserve_default=True,
),
migrations.AddField(
model_name='resource',
name='accessfilters',
field=models.ManyToManyField(to='geoprisma.AccessFilter', null=True, through='geoprisma.ResourceAccessfilter', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='resource',
name='datastores',
field=models.ManyToManyField(to='geoprisma.Datastore', db_table=b'geoprisma_resourcedatastore'),
preserve_default=True,
),
migrations.AddField(
model_name='resource',
name='fields',
field=models.ManyToManyField(to='geoprisma.Field', through='geoprisma.ResourceField'),
preserve_default=True,
),
migrations.AddField(
model_name='mapcontextresource',
name='resource',
field=models.ForeignKey(to='geoprisma.Resource'),
preserve_default=True,
),
migrations.AddField(
model_name='mapcontext',
name='resources',
field=models.ManyToManyField(to='geoprisma.Resource', through='geoprisma.MapContextResource'),
preserve_default=True,
),
migrations.AddField(
model_name='initialview',
name='id_session',
field=models.ForeignKey(to='geoprisma.Session'),
preserve_default=True,
),
migrations.AddField(
model_name='defaultlayeroption',
name='servicetype',
field=models.ForeignKey(to='geoprisma.ServiceType'),
preserve_default=True,
),
migrations.AddField(
model_name='datastore',
name='service',
field=models.ForeignKey(to='geoprisma.Service'),
preserve_default=True,
),
migrations.AddField(
model_name='applicationwidget',
name='widget',
field=models.ForeignKey(to='geoprisma.Widget'),
preserve_default=True,
),
migrations.AddField(
model_name='application',
name='type',
field=models.ForeignKey(to='geoprisma.ApplicationType'),
preserve_default=True,
),
migrations.AddField(
model_name='application',
name='widgets',
field=models.ManyToManyField(to='geoprisma.Widget', through='geoprisma.ApplicationWidget'),
preserve_default=True,
),
]
| 41.380952
| 135
| 0.521979
|
e1d66504ed6e3cf6f58939c3bb4e5af129e6eafc
| 5,561
|
py
|
Python
|
technix/de.py
|
dr-bigfatnoob/quirk
|
f5025d7139adaf06380c429b436ccbf1e7611a16
|
[
"Unlicense"
] | 1
|
2021-03-05T07:44:05.000Z
|
2021-03-05T07:44:05.000Z
|
technix/de.py
|
dr-bigfatnoob/quirk
|
f5025d7139adaf06380c429b436ccbf1e7611a16
|
[
"Unlicense"
] | 3
|
2017-06-04T03:01:31.000Z
|
2017-08-04T04:04:37.000Z
|
technix/de.py
|
dr-bigfatnoob/quirk
|
f5025d7139adaf06380c429b436ccbf1e7611a16
|
[
"Unlicense"
] | null | null | null |
from __future__ import print_function, division
import os
import sys
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
__author__ = "bigfatnoob"
from utils.lib import O
from utils.stats import Statistics
import time
from technix.tech_utils import Point, seed
from utils import plotter
from technix import info
def default():
"""
Default settings.
:return:
"""
return O(
gens=50,
candidates=20,
f=0.75,
cr=0.3,
seed=1,
binary=True,
dominates="bdom", # bdom or cdom
cdom_delta=0.01,
mutate="binary", # binary or random
early_termination=True,
verbose=True
)
class DE(O):
def __init__(self, model, mutator, **settings):
"""
Initialize a DE optimizer
:param model: Model to be optimized
:param settings: Settings for the optimizer
"""
O.__init__(self)
self.model = model
if self.model.get_max_size() < 50:
raise Exception("Cannot run DE since # possible decisions less than 50")
self.settings = default().update(**settings)
self.settings.candidates = int(min(self.settings.candidates, 0.5 * self.model.get_max_size() / self.settings.gens))
self.mutator = mutator(self.model, cr=self.settings.cr, f=self.settings.f)
seed(self.settings.seed)
if self.settings.dominates == "bdom":
self.dominates = self.bdom
else:
# TODO: Insert cdom
self.dominates = self.bdom
self.global_set = set()
self.max_size = None
def bdom(self, obj1, obj2):
"""
Binary Domination
:param obj1: Objective 1
:param obj2: Objective 2
:return: Check objective 1 dominates objective 2
"""
at_least = False
for i in self.model.objectives.keys():
a, b = obj1[i], obj2[i]
if self.model.objectives[i].direction.better(a, b):
at_least = True
elif a == b:
continue
else:
return False
return at_least
def populate(self, size):
self.max_size = self.model.get_max_size() if self.max_size is None else self.max_size
if size > self.max_size:
size = self.max_size
population = set()
while len(population) < size:
point = Point(self.model.generate())
if point not in population:
population.add(point)
self.global_set.add(point)
return list(population)
def mutate(self, point, population):
"""
Mutate point against the population
:param point: Point to be mutated
:param population: Population to refer
:return: Mutated point
"""
# TODO: Implement DE binary mutation
if self.settings.mutate == "random":
return self.mutator.mutate_random(point, population)
elif self.settings.mutate == "binary":
return self.mutator.mutate_binary(point, population)
else:
raise Exception("Invalid mutation setting %s" % self.settings.mutate)
def run(self):
"""
DE runner
:return:
"""
# settings = self.settings
self.print("Optimizing using DE ... ")
stat = Statistics()
start = time.time()
self.model.initialize()
population = self.populate(self.settings.candidates)
[point.evaluate(self.model) for point in population]
stat.insert(population)
for i in range(self.settings.gens):
self.print("Generation : %d ... " % (i + 1))
clones = set(population[:])
for point in population:
original_obj = point.evaluate(self.model)
mutant = self.mutate(point, population)
mutated_obj = mutant.evaluate(self.model)
if self.dominates(mutated_obj, original_obj) and (mutant not in self.global_set):
clones.remove(point)
clones.add(mutant)
self.global_set.add(mutant)
population = list(clones)
stat.insert(population)
stat.runtime = time.time() - start
return stat
def print(self, message):
if self.settings.verbose:
print(message)
def _pareto_quirk_test(model_name, **settings):
print("# %s" % model_name)
from language.parser import Parser
from language.mutator import Mutator
mdl = Parser.from_file("models/quirk/%s.str" % model_name)
obj_ids = mdl.objectives.keys()
de = DE(mdl, Mutator, **settings)
stat = de.run()
gens_obj_start = stat.get_objectives(0, obj_ids)
gens_obj_end = stat.get_objectives(-1, obj_ids)
plotter.plot_pareto([gens_obj_start, gens_obj_end], ['red', 'green'], ['x', 'o'],
['first', 'last'], obj_ids[0], obj_ids[1], 'Pareto Front',
'results/pareto/%s_pareto.png' % model_name)
evtpi_index = 0
direction = mdl.objectives[obj_ids[evtpi_index]].direction
samples = stat.get_objective_samples(-1, obj_ids[evtpi_index])
info.save_info(samples, mdl.get_parameters(), direction,
"results/models/%s/info_%s.md" % (model_name, mdl.objectives[obj_ids[evtpi_index]].name))
def _pareto_xomo_test():
from models.xomo.xomo import Model
from models.xomo.mutator import Mutator
mdl = Model()
obj_ids = mdl.objectives.keys()
de = DE(mdl, Mutator)
stat = de.run()
gens_obj_start = stat.get_objectives(0, obj_ids)
gens_obj_end = stat.get_objectives(-1, obj_ids)
plotter.plot_pareto([gens_obj_start, gens_obj_end], ['red', 'green'], ['x', 'o'],
['first', 'last'], obj_ids[0], obj_ids[1], 'Pareto Front',
'results/pareto/%s_pareto.png' % mdl.name)
if __name__ == "__main__":
# _pareto_xomo_test()
# _pareto_quirk_test("SAS", candidates=10, gens=50)
# _pareto_quirk_test("AOWS")
_pareto_quirk_test("ECS")
| 30.723757
| 119
| 0.655458
|
538eba307a72a7b359bf008d7e76bf9ff168a42f
| 296
|
py
|
Python
|
Exe10_valores_listas_intercalados.py
|
lucaslk122/Exercicios-com-lista
|
3e614a865f93afa2ff6a32f8da04abb0c0716cdc
|
[
"MIT"
] | null | null | null |
Exe10_valores_listas_intercalados.py
|
lucaslk122/Exercicios-com-lista
|
3e614a865f93afa2ff6a32f8da04abb0c0716cdc
|
[
"MIT"
] | null | null | null |
Exe10_valores_listas_intercalados.py
|
lucaslk122/Exercicios-com-lista
|
3e614a865f93afa2ff6a32f8da04abb0c0716cdc
|
[
"MIT"
] | null | null | null |
lista1 =[]
lista2 =[]
lista = []
for i in range(10):
lista1.append(int(input("Digite um valor para a primeira lista: ")))
lista2.append(int(input("Digite um valor para a segunda lista: ")))
lista.append(lista1[i])
lista.append(lista2[i])
print(lista1)
print(lista2)
print(lista)
| 22.769231
| 72
| 0.672297
|
ace1d0808da54be32089b17b4052b53fb8f1572f
| 32,295
|
py
|
Python
|
plugins/modules/oci_mysql_db_system_facts.py
|
hanielburton/oci-ansible-collection
|
dfdffde637f746d346ba35569be8c3a3407022f2
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_mysql_db_system_facts.py
|
hanielburton/oci-ansible-collection
|
dfdffde637f746d346ba35569be8c3a3407022f2
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_mysql_db_system_facts.py
|
hanielburton/oci-ansible-collection
|
dfdffde637f746d346ba35569be8c3a3407022f2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2017, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_mysql_db_system_facts
short_description: Fetches details about one or multiple DbSystem resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple DbSystem resources in Oracle Cloud Infrastructure
- Get a list of DB Systems in the specified compartment.
The default sort order is by timeUpdated, descending.
- If I(db_system_id) is specified, the details of a single DbSystem will be returned.
version_added: "2.9"
author: Oracle (@oracle)
options:
db_system_id:
description:
- The DB System L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
- Required to get a specific db_system.
type: str
aliases: ["id"]
compartment_id:
description:
- The compartment L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
- Required to list multiple db_systems.
type: str
is_analytics_cluster_attached:
description:
- If true, return only DB Systems with an Analytics Cluster attached, if false
return only DB Systems with no Analytics Cluster attached. If not
present, return all DB Systems.
type: bool
display_name:
description:
- A filter to return only the resource matching the given display name exactly.
type: str
aliases: ["name"]
lifecycle_state:
description:
- DbSystem Lifecycle State
type: str
choices:
- "CREATING"
- "ACTIVE"
- "INACTIVE"
- "UPDATING"
- "DELETING"
- "DELETED"
- "FAILED"
configuration_id:
description:
- The requested Configuration instance.
type: str
is_up_to_date:
description:
- Filter instances if they are using the latest revision of the
Configuration they are associated with.
type: bool
sort_by:
description:
- The field to sort by. Only one sort order may be provided. Time fields are default ordered as descending. Display name is default ordered as
ascending.
type: str
choices:
- "displayName"
- "timeCreated"
sort_order:
description:
- The sort order to use (ASC or DESC).
type: str
choices:
- "ASC"
- "DESC"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List db_systems
oci_mysql_db_system_facts:
compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
- name: Get a specific db_system
oci_mysql_db_system_facts:
db_system_id: ocid1.dbsystem.oc1..xxxxxxEXAMPLExxxxxx
"""
RETURN = """
db_systems:
description:
- List of DbSystem resources
returned: on success
type: complex
contains:
id:
description:
- The OCID of the DB System.
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
display_name:
description:
- The user-friendly name for the DB System. It does not have to be unique.
returned: on success
type: string
sample: display_name_example
description:
description:
- User-provided data about the DB System.
returned: on success
type: string
sample: description_example
compartment_id:
description:
- The OCID of the compartment the DB System belongs in.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
subnet_id:
description:
- The OCID of the subnet the DB System is associated with.
returned: on success
type: string
sample: ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx
is_analytics_cluster_attached:
description:
- If the DB System has an Analytics Cluster attached.
returned: on success
type: bool
sample: true
analytics_cluster:
description:
- ""
returned: on success
type: complex
contains:
shape_name:
description:
- "The shape determines resources to allocate to the Analytics
Cluster nodes - CPU cores, memory."
returned: on success
type: string
sample: shape_name_example
cluster_size:
description:
- The number of analytics-processing compute instances, of the
specified shape, in the Analytics Cluster.
returned: on success
type: int
sample: 56
lifecycle_state:
description:
- The current state of the MySQL Analytics Cluster.
returned: on success
type: string
sample: lifecycle_state_example
time_created:
description:
- The date and time the Analytics Cluster was created, as described by L(RFC 3339,https://tools.ietf.org/rfc/rfc3339).
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
time_updated:
description:
- The time the Analytics Cluster was last updated, as described by L(RFC 3339,https://tools.ietf.org/rfc/rfc3339).
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
availability_domain:
description:
- The Availability Domain where the primary DB System should be located.
returned: on success
type: string
sample: Uocm:PHX-AD-1
fault_domain:
description:
- The name of the Fault Domain the DB System is located in.
returned: on success
type: string
sample: fault_domain_example
shape_name:
description:
- "The shape of the primary instances of the DB System. The shape
determines resources allocated to a DB System - CPU cores
and memory for VM shapes; CPU cores, memory and storage for non-VM
(or bare metal) shapes. To get a list of shapes, use (the
L(ListShapes,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/mysql/20181021/ShapeSummary/ListShapes) operation."
returned: on success
type: string
sample: shape_name_example
mysql_version:
description:
- Name of the MySQL Version in use for the DB System.
returned: on success
type: string
sample: mysql_version_example
backup_policy:
description:
- ""
returned: on success
type: complex
contains:
is_enabled:
description:
- If automated backups are enabled or disabled.
returned: on success
type: bool
sample: true
window_start_time:
description:
- The start of a 30-minute window of time in which daily, automated backups occur.
- "This should be in the format of the \\"Time\\" portion of an RFC3339-formatted timestamp. Any second or sub-second time data will be
truncated to zero."
- At some point in the window, the system may incur a brief service disruption as the backup is performed.
- "If not defined, a window is selected from the following Region-based time-spans:
- eu-frankfurt-1: 20:00 - 04:00 UTC
- us-ashburn-1: 03:00 - 11:00 UTC
- uk-london-1: 06:00 - 14:00 UTC
- ap-tokyo-1: 13:00 - 21:00
- us-phoenix-1: 06:00 - 14:00"
returned: on success
type: string
sample: window_start_time_example
retention_in_days:
description:
- The number of days automated backups are retained.
returned: on success
type: int
sample: 56
freeform_tags:
description:
- Simple key-value pair applied without any predefined name, type or scope. Exists for cross-compatibility only.
- Tags defined here will be copied verbatim as tags on the Backup resource created by this BackupPolicy.
- "Example: `{\\"bar-key\\": \\"value\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- Usage of predefined tag keys. These predefined keys are scoped to namespaces.
- Tags defined here will be copied verbatim as tags on the Backup resource created by this BackupPolicy.
- "Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
source:
description:
- ""
returned: on success
type: complex
contains:
source_type:
description:
- The specific source identifier.
returned: on success
type: string
sample: NONE
backup_id:
description:
- The OCID of the backup to be used as the source for the new DB System.
returned: on success
type: string
sample: ocid1.backup.oc1..xxxxxxEXAMPLExxxxxx
configuration_id:
description:
- The OCID of the Configuration to be used for Instances in this DB System.
returned: on success
type: string
sample: ocid1.configuration.oc1..xxxxxxEXAMPLExxxxxx
data_storage_size_in_gbs:
description:
- Initial size of the data volume in GiBs that will be created and attached.
returned: on success
type: int
sample: 56
hostname_label:
description:
- "The hostname for the primary endpoint of the DB System. Used for DNS.
The value is the hostname portion of the primary private IP's fully qualified domain name (FQDN)
(for example, \\"dbsystem-1\\" in FQDN \\"dbsystem-1.subnet123.vcn1.oraclevcn.com\\").
Must be unique across all VNICs in the subnet and comply with RFC 952 and RFC 1123."
returned: on success
type: string
sample: hostname_label_example
ip_address:
description:
- "The IP address the DB System is configured to listen on. A private
IP address of the primary endpoint of the DB System. Must be an
available IP address within the subnet's CIDR. This will be a
\\"dotted-quad\\" style IPv4 address."
returned: on success
type: string
sample: ip_address_example
port:
description:
- The port for primary endpoint of the DB System to listen on.
returned: on success
type: int
sample: 56
port_x:
description:
- The network port on which X Plugin listens for TCP/IP connections. This is the X Plugin equivalent of port.
returned: on success
type: int
sample: 56
endpoints:
description:
- The network endpoints available for this DB System.
returned: on success
type: complex
contains:
hostname:
description:
- The network address of the DB System.
returned: on success
type: string
sample: hostname_example
ip_address:
description:
- The IP address the DB System is configured to listen on.
returned: on success
type: string
sample: ip_address_example
port:
description:
- The port the MySQL instance listens on.
returned: on success
type: int
sample: 56
port_x:
description:
- The network port where to connect to use this endpoint using the X protocol.
returned: on success
type: int
sample: 56
modes:
description:
- The access modes from the client that this endpoint supports.
returned: on success
type: list
sample: []
status:
description:
- The state of the endpoints, as far as it can seen from the DB System.
There may be some inconsistency with the actual state of the MySQL service.
returned: on success
type: string
sample: ACTIVE
status_details:
description:
- Additional information about the current endpoint status.
returned: on success
type: string
sample: status_details_example
channels:
description:
- A list with a summary of all the Channels attached to the DB System.
returned: on success
type: complex
contains:
id:
description:
- The OCID of the Channel.
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
compartment_id:
description:
- The OCID of the compartment.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
is_enabled:
description:
- Whether the Channel has been enabled by the user.
returned: on success
type: bool
sample: true
source:
description:
- ""
returned: on success
type: complex
contains:
source_type:
description:
- The specific source identifier.
returned: on success
type: string
sample: MYSQL
hostname:
description:
- The network address of the MySQL instance.
returned: on success
type: string
sample: hostname_example
port:
description:
- The port the source MySQL instance listens on.
returned: on success
type: int
sample: 56
username:
description:
- The name of the replication user on the source MySQL instance.
The username has a maximum length of 96 characters. For more information,
please see the L(MySQL documentation,https://dev.mysql.com/doc/refman/8.0/en/change-master-to.html)
returned: on success
type: string
sample: username_example
ssl_mode:
description:
- The SSL mode of the Channel.
returned: on success
type: string
sample: VERIFY_IDENTITY
ssl_ca_certificate:
description:
- ""
returned: on success
type: complex
contains:
certificate_type:
description:
- The type of CA certificate.
returned: on success
type: string
sample: PEM
contents:
description:
- The string containing the CA certificate in PEM format.
returned: on success
type: string
sample: contents_example
target:
description:
- ""
returned: on success
type: complex
contains:
target_type:
description:
- The specific target identifier.
returned: on success
type: string
sample: DBSYSTEM
db_system_id:
description:
- The OCID of the source DB System.
returned: on success
type: string
sample: ocid1.dbsystem.oc1..xxxxxxEXAMPLExxxxxx
channel_name:
description:
- The case-insensitive name that identifies the replication channel. Channel names
must follow the rules defined for L(MySQL identifiers,https://dev.mysql.com/doc/refman/8.0/en/identifiers.html).
The names of non-Deleted Channels must be unique for each DB System.
returned: on success
type: string
sample: channel_name_example
applier_username:
description:
- The username for the replication applier of the target MySQL DB System.
returned: on success
type: string
sample: applier_username_example
lifecycle_state:
description:
- The state of the Channel.
returned: on success
type: string
sample: lifecycle_state_example
lifecycle_details:
description:
- A message describing the state of the Channel.
returned: on success
type: string
sample: lifecycle_details_example
display_name:
description:
- The user-friendly name for the Channel. It does not have to be unique.
returned: on success
type: string
sample: display_name_example
time_created:
description:
- The date and time the Channel was created, as described by L(RFC 3339,https://tools.ietf.org/rfc/rfc3339).
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
time_updated:
description:
- The time the Channel was last updated, as described by L(RFC 3339,https://tools.ietf.org/rfc/rfc3339).
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
freeform_tags:
description:
- "Simple key-value pair applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\\"bar-key\\": \\"value\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- "Usage of predefined tag keys. These predefined keys are scoped to namespaces.
Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
lifecycle_state:
description:
- The current state of the DB System.
returned: on success
type: string
sample: CREATING
lifecycle_details:
description:
- Additional information about the current lifecycleState.
returned: on success
type: string
sample: lifecycle_details_example
maintenance:
description:
- ""
returned: on success
type: complex
contains:
window_start_time:
description:
- The start time of the maintenance window.
- "This string is of the format: \\"{day-of-week} {time-of-day}\\"."
- "\\"{day-of-week}\\" is a case-insensitive string like \\"mon\\", \\"tue\\", &c."
- "\\"{time-of-day}\\" is the \\"Time\\" portion of an RFC3339-formatted timestamp. Any second or sub-second time data will be truncated
to zero."
returned: on success
type: string
sample: window_start_time_example
time_created:
description:
- The date and time the DB System was created.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
time_updated:
description:
- The time the DB System was last updated.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
freeform_tags:
description:
- "Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\\"bar-key\\": \\"value\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
sample: [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"description": "description_example",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"subnet_id": "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx",
"is_analytics_cluster_attached": true,
"analytics_cluster": {
"shape_name": "shape_name_example",
"cluster_size": 56,
"lifecycle_state": "lifecycle_state_example",
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00"
},
"availability_domain": "Uocm:PHX-AD-1",
"fault_domain": "fault_domain_example",
"shape_name": "shape_name_example",
"mysql_version": "mysql_version_example",
"backup_policy": {
"is_enabled": true,
"window_start_time": "window_start_time_example",
"retention_in_days": 56,
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}}
},
"source": {
"source_type": "NONE",
"backup_id": "ocid1.backup.oc1..xxxxxxEXAMPLExxxxxx"
},
"configuration_id": "ocid1.configuration.oc1..xxxxxxEXAMPLExxxxxx",
"data_storage_size_in_gbs": 56,
"hostname_label": "hostname_label_example",
"ip_address": "ip_address_example",
"port": 56,
"port_x": 56,
"endpoints": [{
"hostname": "hostname_example",
"ip_address": "ip_address_example",
"port": 56,
"port_x": 56,
"modes": [],
"status": "ACTIVE",
"status_details": "status_details_example"
}],
"channels": [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"is_enabled": true,
"source": {
"source_type": "MYSQL",
"hostname": "hostname_example",
"port": 56,
"username": "username_example",
"ssl_mode": "VERIFY_IDENTITY",
"ssl_ca_certificate": {
"certificate_type": "PEM",
"contents": "contents_example"
}
},
"target": {
"target_type": "DBSYSTEM",
"db_system_id": "ocid1.dbsystem.oc1..xxxxxxEXAMPLExxxxxx",
"channel_name": "channel_name_example",
"applier_username": "applier_username_example"
},
"lifecycle_state": "lifecycle_state_example",
"lifecycle_details": "lifecycle_details_example",
"display_name": "display_name_example",
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}}
}],
"lifecycle_state": "CREATING",
"lifecycle_details": "lifecycle_details_example",
"maintenance": {
"window_start_time": "window_start_time_example"
},
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}}
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.mysql import DbSystemClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class MysqlDbSystemFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"db_system_id",
]
def get_required_params_for_list(self):
return [
"compartment_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_db_system,
db_system_id=self.module.params.get("db_system_id"),
)
def list_resources(self):
optional_list_method_params = [
"is_analytics_cluster_attached",
"db_system_id",
"display_name",
"lifecycle_state",
"configuration_id",
"is_up_to_date",
"sort_by",
"sort_order",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_db_systems,
compartment_id=self.module.params.get("compartment_id"),
**optional_kwargs
)
MysqlDbSystemFactsHelperCustom = get_custom_class("MysqlDbSystemFactsHelperCustom")
class ResourceFactsHelper(MysqlDbSystemFactsHelperCustom, MysqlDbSystemFactsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
db_system_id=dict(aliases=["id"], type="str"),
compartment_id=dict(type="str"),
is_analytics_cluster_attached=dict(type="bool"),
display_name=dict(aliases=["name"], type="str"),
lifecycle_state=dict(
type="str",
choices=[
"CREATING",
"ACTIVE",
"INACTIVE",
"UPDATING",
"DELETING",
"DELETED",
"FAILED",
],
),
configuration_id=dict(type="str"),
is_up_to_date=dict(type="bool"),
sort_by=dict(type="str", choices=["displayName", "timeCreated"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="db_system",
service_client_class=DbSystemClient,
namespace="mysql",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(db_systems=result)
if __name__ == "__main__":
main()
| 41.245211
| 160
| 0.507168
|
ca4996994e1f60cce2245ca1aa2bdda3a183044b
| 738
|
py
|
Python
|
tools/distrib/python/grpcio_tools/grpc_version.py
|
wjbbupt/grpc
|
75f71aa4177f65de34b5d2674d83552f28bc0a07
|
[
"Apache-2.0"
] | 1
|
2021-03-20T03:21:57.000Z
|
2021-03-20T03:21:57.000Z
|
tools/distrib/python/grpcio_tools/grpc_version.py
|
wjbbupt/grpc
|
75f71aa4177f65de34b5d2674d83552f28bc0a07
|
[
"Apache-2.0"
] | null | null | null |
tools/distrib/python/grpcio_tools/grpc_version.py
|
wjbbupt/grpc
|
75f71aa4177f65de34b5d2674d83552f28bc0a07
|
[
"Apache-2.0"
] | 1
|
2021-05-21T14:51:45.000Z
|
2021-05-21T14:51:45.000Z
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template`!!!
VERSION = '1.37.0.dev0'
PROTOBUF_VERSION = '3.15.2'
| 38.842105
| 106
| 0.761518
|
665ec765dace39dd418d89982c5453e67dc6398f
| 4,044
|
py
|
Python
|
printing/src/printing_manager/console.py
|
hivesolutions/colony_plugins
|
cfd8fb2ac58037e01002966704b8a642feb37895
|
[
"Apache-1.1"
] | 1
|
2016-10-30T09:51:06.000Z
|
2016-10-30T09:51:06.000Z
|
printing/src/printing_manager/console.py
|
hivesolutions/colony_plugins
|
cfd8fb2ac58037e01002966704b8a642feb37895
|
[
"Apache-1.1"
] | 1
|
2015-12-29T18:51:07.000Z
|
2015-12-29T18:51:07.000Z
|
printing/src/printing_manager/console.py
|
hivesolutions/colony_plugins
|
cfd8fb2ac58037e01002966704b8a642feb37895
|
[
"Apache-1.1"
] | 1
|
2018-01-26T12:54:13.000Z
|
2018-01-26T12:54:13.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Colony Framework
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Colony Framework.
#
# Hive Colony Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Colony Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Colony Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import colony
CONSOLE_EXTENSION_NAME = "printing"
""" The console extension name """
class ConsolePrintingManager(colony.System):
"""
The console printing manager class, responsible
for the handling of the printing commands.
"""
def __init__(self, plugin):
colony.System.__init__(self, plugin)
self.commands_map = self.__generate_commands_map()
def get_console_extension_name(self):
return CONSOLE_EXTENSION_NAME
def get_commands_map(self):
return self.commands_map
def process_print_test(
self,
arguments,
arguments_map,
output_method,
console_context
):
printing_manager = self.plugin.system
printing_manager.print_test()
def process_print_test_image(
self,
arguments,
arguments_map,
output_method,
console_context
):
printing_manager = self.plugin.system
printing_manager.print_test_image()
def process_print_printing_language(
self,
arguments,
arguments_map,
output_method,
console_context,
file_path
):
# retrieves the provided file path value and reads it's contents
# then closes the file, these contents are the ones that are going
# to be used for the printing process of the file
file_path = arguments_map.get("file_path", None)
file = open(file_path, "r")
try: contents = file.read()
finally: file.close()
# retrieves the reference to the printing manager instance
# and runs the printing process for the provided contents
printing_manager = self.plugin.system
printing_manager.print_printing_language(contents)
def __generate_commands_map(self):
return {
"print_test" : {
"handler" : self.process_print_test,
"description" : "prints a test page"
},
"print_image" : {
"handler" : self.process_print_test_image,
"description" : "prints a test page with an image"
},
"print_language" : {
"handler" : self.process_print_test_image,
"description" : "prints the page described in the file of the given file path",
"arguments" : [
{
"name" : "file_path",
"description" : "path to the file name to be printed",
"values" : str,
"mandatory" : False
}
]
}
}
| 32.352
| 96
| 0.607072
|
2835ebc0d4fcde493b00e4b7706c9269f8f1b8a6
| 2,162
|
py
|
Python
|
Login/app.py
|
alexis51151/FlaskEntrance
|
d0942467fffecf02e9da3cd49679a16545f24587
|
[
"Apache-2.0"
] | null | null | null |
Login/app.py
|
alexis51151/FlaskEntrance
|
d0942467fffecf02e9da3cd49679a16545f24587
|
[
"Apache-2.0"
] | null | null | null |
Login/app.py
|
alexis51151/FlaskEntrance
|
d0942467fffecf02e9da3cd49679a16545f24587
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, render_template, flash, redirect, url_for
from flask_login import LoginManager, login_required, login_user # authentication
from forms import LoginForm, RegistrationForm # secure login form
from flask_debugtoolbar import DebugToolbarExtension # for debug
# Imports from project files
from models import User, db
login_manager = LoginManager()
toolbar = DebugToolbarExtension()
def create_app():
# create a Flask app
app = Flask(__name__)
app.config['SECRET_KEY'] = 'amazing-secret-key'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
app.config['SQLALCHEMY_ECHO'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# init user authentication system
login_manager.init_app(app)
login_manager.login_view = 'login'
# init debug toolbar
toolbar.init_app(app)
# init the database
db.init_app(app)
with app.app_context():
db.create_all()
return app
app = create_app()
@login_manager.user_loader
def user_loader(user_id):
return User.query.get(user_id)
"""All the routes are listed below."""
"""Login page"""
@app.route('/', methods=['GET', 'POST'])
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and user.check_password(form.password.data):
user.authenticated = True
login_user(user)
flash('Logged in successfully.')
return redirect("google.com")
return render_template('login.html', form=form)
"""Registration page"""
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data)
user.set_password(form.password.data)
print(user)
db.session.add(user)
db.session.commit()
flash('Congratulations, you are now a registered user!')
return redirect(url_for('login'))
return render_template('register.html', form=form)
if __name__ == '__main__':
app.run()
| 26.365854
| 82
| 0.679926
|
574f420acc03054be825f5636ecfea911c2ea9df
| 190
|
py
|
Python
|
pkg/formatters/simple.py
|
zhutao100/markdown_articles_tool
|
9e7476567ac7b5c5cdf2dfd235f0663260d96aca
|
[
"MIT"
] | 41
|
2020-09-22T12:21:24.000Z
|
2022-03-27T06:54:45.000Z
|
pkg/formatters/simple.py
|
zhutao100/markdown_articles_tool
|
9e7476567ac7b5c5cdf2dfd235f0663260d96aca
|
[
"MIT"
] | 11
|
2020-11-10T02:40:08.000Z
|
2022-03-24T16:20:31.000Z
|
pkg/formatters/simple.py
|
zhutao100/markdown_articles_tool
|
9e7476567ac7b5c5cdf2dfd235f0663260d96aca
|
[
"MIT"
] | 9
|
2021-02-20T00:23:06.000Z
|
2022-03-21T11:39:08.000Z
|
"""
Simple formatter.
"""
class SimpleFormatter:
"""
Writes lines, "as is".
"""
format = 'md'
@staticmethod
def write(lines):
return lines.encode('utf8')
| 11.875
| 35
| 0.547368
|
f05c5ea6f7b37e1748e5d6c249a13e1427557056
| 876
|
py
|
Python
|
doc/ioman/server.py
|
nandun/gxp
|
8dd9d396102e254cb4712fe572b64e398a5f069b
|
[
"BSD-3-Clause"
] | 2
|
2020-03-16T11:37:13.000Z
|
2020-05-15T10:10:56.000Z
|
doc/ioman/server.py
|
nandun/gxp
|
8dd9d396102e254cb4712fe572b64e398a5f069b
|
[
"BSD-3-Clause"
] | null | null | null |
doc/ioman/server.py
|
nandun/gxp
|
8dd9d396102e254cb4712fe572b64e398a5f069b
|
[
"BSD-3-Clause"
] | 1
|
2017-05-12T02:42:35.000Z
|
2017-05-12T02:42:35.000Z
|
import socket,sys
sys.path.append("../..")
import ioman
m = ioman.ioman()
ch_sock = m.make_server_sock(socket.AF_INET, socket.SOCK_STREAM, ("",0), 1)
ip,port = ch_sock.getsockname()
print "server listening on %s, please connect to it (perhaps by 'nc localhost %d')" % (port, port)
# wait for connection to come
connected = 0
disconnected = 0
while connected == 0 or disconnected < connected:
print "process_an_event"
ch,ev = m.process_an_event()
print ch,ev
if isinstance(ev, ioman.aevent):
print "got connection. add to watch list"
rch,wch = m.add_sock(ev.new_so)
rch.set_expected(["\n"])
connected += 1
elif isinstance(ev, ioman.revent):
print "got from client (kind=%d) [%s]" % (ev.kind, ev.data)
if ev.kind == ioman.ch_event.EOF:
disconnected += 1
else:
assert 0,ev
| 27.375
| 98
| 0.631279
|
68f6ea239fb4d1dada881ddefe09ea892649d1f5
| 3,886
|
py
|
Python
|
echo360/course.py
|
RenWal/echo360
|
076368f130a7458373d8ec15bff4a0bca8897449
|
[
"MIT"
] | null | null | null |
echo360/course.py
|
RenWal/echo360
|
076368f130a7458373d8ec15bff4a0bca8897449
|
[
"MIT"
] | null | null | null |
echo360/course.py
|
RenWal/echo360
|
076368f130a7458373d8ec15bff4a0bca8897449
|
[
"MIT"
] | null | null | null |
import json
import sys
import selenium
import logging
from echo360.videos import EchoVideos
_LOGGER = logging.getLogger(__name__)
class EchoCourse(object):
def __init__(self, uuid, hostname=None):
self._course_id = ""
self._course_name = ""
self._uuid = uuid
self._videos = None
self._driver = None
if hostname is None:
self._hostname = "https://view.streaming.sydney.edu.au:8443"
else:
self._hostname = hostname
def get_videos(self):
if self._driver is None:
self._blow_up("webdriver not set yet!!!", "")
if not self._videos:
try:
course_data_json = self._get_course_data()
videos_json = course_data_json["section"]["presentations"]["pageContents"]
self._videos = EchoVideos(videos_json, self._driver)
except KeyError as e:
self._blow_up("Unable to parse course videos from JSON (course_data)", e)
except selenium.common.exceptions.NoSuchElementException as e:
self._blow_up("selenium cannot find given elements", e)
return self._videos
@property
def uuid(self):
return self._uuid
@property
def hostname(self):
return self._hostname
@property
def url(self):
return "{}/ess/portal/section/{}".format(self._hostname, self._uuid)
@property
def video_url(self):
return "{}/ess/client/api/sections/{}/section-data.json?pageSize=100".format(self._hostname, self._uuid)
@property
def course_id(self):
if self._course_id == "":
try:
# driver = webdriver.PhantomJS() #TODO Redo this. Maybe use a singleton factory to request the lecho360 driver?s
self.driver.get(self.url) # Initialize to establish the 'anon' cookie that Echo360 sends.
self.driver.get(self.video_url)
course_data_json = self._get_course_data()
self._course_id = course_data_json["section"]["course"]["identifier"]
self._course_name = course_data_json["section"]["course"]["name"]
except KeyError as e:
self._blow_up("Unable to parse course id (e.g. CS473) from JSON (course_data)", e)
if type(self._course_id) != str:
# it's type unicode for python2
return self._course_id.encode('utf-8')
return self._course_id
@property
def course_name(self):
if self._course_name == "":
# trigger getting course_id to get course name as well
self.course_id
if type(self._course_name) != str:
# it's type unicode for python2
return self._course_name.encode('utf-8')
return self._course_name
@property
def driver(self):
if self._driver is None:
self._blow_up("webdriver not set yet!!!", "")
return self._driver
def _get_course_data(self):
try:
self.driver.get(self.video_url)
_LOGGER.debug("Dumping course page at %s: %s",
self.video_url,
self._driver.page_source)
# self.driver.get_screenshot_as_file('./2.png')
# print(dir(self.driver))
# print('ha')
# print(self.driver.page_source)
json_str = self.driver.find_element_by_tag_name("pre").text
print(json_str)
return json.loads(json_str)
except ValueError as e:
self._blow_up("Unable to retrieve JSON (course_data) from url", e)
def set_driver(self, driver):
self._driver = driver
def _blow_up(self, msg, e):
print(msg)
print("Exception: {}".format(str(e)))
sys.exit(1)
| 34.087719
| 128
| 0.585178
|
494b83673231db880457d034807faecfa24ccdaf
| 1,006
|
py
|
Python
|
project/manage.py
|
ardikabs/dnsmanager
|
4d2f302ea9f54fd4d5416328dc46a1c47b573e5b
|
[
"MIT"
] | 1
|
2019-01-15T10:33:10.000Z
|
2019-01-15T10:33:10.000Z
|
project/manage.py
|
ardikabs/dnsmanager
|
4d2f302ea9f54fd4d5416328dc46a1c47b573e5b
|
[
"MIT"
] | null | null | null |
project/manage.py
|
ardikabs/dnsmanager
|
4d2f302ea9f54fd4d5416328dc46a1c47b573e5b
|
[
"MIT"
] | null | null | null |
import os
import unittest
from server import make_server
from server.app import db
from server.main.models import *
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
app = make_server("default")
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command("db", MigrateCommand)
@manager.command
def recreatedb():
db.drop_all()
db.create_all()
db.session.commit()
@manager.command
def test():
"""Runs the unit tests."""
tests = unittest.TestLoader().discover('server/tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
@manager.command
def seeding():
RecordTypeModel.seeding()
@manager.command
def run():
app.run(debug=True, host="0.0.0.0", port=5000)
if __name__ == "__main__":
manager.run()
| 22.863636
| 78
| 0.72167
|
7462fb7bf64a8f4181ced6c4fb74cdd978a6d338
| 4,052
|
py
|
Python
|
alipay/aop/api/request/AlipayPcreditHuabeiEnterpriseReimburseSyncRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/request/AlipayPcreditHuabeiEnterpriseReimburseSyncRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/request/AlipayPcreditHuabeiEnterpriseReimburseSyncRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayPcreditHuabeiEnterpriseReimburseSyncModel import AlipayPcreditHuabeiEnterpriseReimburseSyncModel
class AlipayPcreditHuabeiEnterpriseReimburseSyncRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayPcreditHuabeiEnterpriseReimburseSyncModel):
self._biz_content = value
else:
self._biz_content = AlipayPcreditHuabeiEnterpriseReimburseSyncModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.pcredit.huabei.enterprise.reimburse.sync'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.944828
| 148
| 0.65153
|
e07435d96097faedd51d0c9c7ebd3d328e0c66fc
| 2,080
|
py
|
Python
|
lib/surface/ai_platform/versions/describe.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
lib/surface/ai_platform/versions/describe.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/ai_platform/versions/describe.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 1
|
2020-07-25T01:40:19.000Z
|
2020-07-25T01:40:19.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ai-platform versions describe command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.ml_engine import versions_api
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.ml_engine import endpoint_util
from googlecloudsdk.command_lib.ml_engine import flags
from googlecloudsdk.command_lib.ml_engine import region_util
from googlecloudsdk.command_lib.ml_engine import versions_util
def _AddDescribeArgs(parser):
flags.GetModelName(positional=False, required=True).AddToParser(parser)
flags.GetRegionArg(include_global=True).AddToParser(parser)
flags.VERSION_NAME.AddToParser(parser)
def _Run(args):
region = region_util.GetRegion(args)
with endpoint_util.MlEndpointOverrides(region=region):
client = versions_api.VersionsClient()
return versions_util.Describe(client, args.version, model=args.model)
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Describe(base.DescribeCommand):
"""Describe an existing AI Platform version."""
@staticmethod
def Args(parser):
_AddDescribeArgs(parser)
def Run(self, args):
return _Run(args)
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class DescribeBeta(base.DescribeCommand):
"""Describe an existing AI Platform version."""
@staticmethod
def Args(parser):
_AddDescribeArgs(parser)
def Run(self, args):
return _Run(args)
| 32
| 74
| 0.786058
|
7a72997c3007231b0069cfaf8c558174c8bb46a9
| 396
|
py
|
Python
|
day5/day5a.py
|
lehoczkics/aoc2020
|
43e640694d05ffbb47568254aeba6d2b2a89ab04
|
[
"Unlicense"
] | null | null | null |
day5/day5a.py
|
lehoczkics/aoc2020
|
43e640694d05ffbb47568254aeba6d2b2a89ab04
|
[
"Unlicense"
] | null | null | null |
day5/day5a.py
|
lehoczkics/aoc2020
|
43e640694d05ffbb47568254aeba6d2b2a89ab04
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/ebv pyhton3
import os
mymax = 0
def btd(n):
return int(n,2)
with open("input1") as f:
for line in f:
# print(line)
rowstr = line[0:7]
colstr = line[7:11]
# print(rowstr, "-", colstr)
# print(btd(rowstr), "-", btd(colstr))
seatid = 8*btd(rowstr) + btd(colstr)
if seatid > mymax :
mymax = seatid
print(mymax)
| 18
| 45
| 0.525253
|
20412dabe1d517197e3583ddb6069196a7f4fdf1
| 3,537
|
py
|
Python
|
ntpclient.py
|
rsmith-nl/scripts
|
4ad489bc637f20f2865c249025b24cb8aad887ca
|
[
"MIT"
] | 25
|
2016-02-24T22:55:30.000Z
|
2022-01-18T08:39:09.000Z
|
ntpclient.py
|
rsmith-nl/scripts
|
4ad489bc637f20f2865c249025b24cb8aad887ca
|
[
"MIT"
] | 4
|
2019-10-10T17:59:31.000Z
|
2020-09-04T08:31:15.000Z
|
ntpclient.py
|
rsmith-nl/scripts
|
4ad489bc637f20f2865c249025b24cb8aad887ca
|
[
"MIT"
] | 11
|
2016-01-09T18:59:21.000Z
|
2020-10-27T07:00:10.000Z
|
#!/usr/bin/env python
# file: ntpclient.py
# vim:fileencoding=utf-8:fdm=marker:ft=python
#
# Copyright © 2018 R.F. Smith <rsmith@xs4all.nl>.
# SPDX-License-Identifier: MIT
# Created: 2017-11-16 19:33:50 +0100
# Last modified: 2020-04-01T20:30:30+0200
"""
Simple NTP query program. This program does not strive for high accuracy.
Use this only as a client, never for a time server!
"""
from datetime import datetime
from socket import socket, AF_INET, SOCK_DGRAM
import argparse
import os
import struct
import sys
import time
__version__ = "2020.04.01"
def main():
"""
Entry point for ntpclient.py.
"""
args = setup()
t1 = time.clock_gettime(time.CLOCK_REALTIME)
ntptime = get_ntp_time(args.server)
t4 = time.clock_gettime(time.CLOCK_REALTIME)
# It is not guaranteed that the NTP time is *exactly* in the middle of both
# local times. But it is a reasonable simplification.
roundtrip = round(t4 - t1, 4)
localtime = (t1 + t4) / 2
diff = localtime - ntptime
res = None
if os.geteuid() == 0:
time.clock_settime(time.CLOCK_REALTIME, ntptime)
res = "Time set to NTP time."
localtime = datetime.fromtimestamp(localtime)
ntptime = datetime.fromtimestamp(ntptime)
if not args.quiet:
print(f"Using server {args.server}.")
print(f"NTP call took approximately {roundtrip} s.")
print("Local time value:", localtime.strftime("%a %b %d %H:%M:%S.%f %Y."))
print(
"NTP time value:",
ntptime.strftime("%a %b %d %H:%M:%S.%f %Y."),
"±",
roundtrip / 2,
"s.",
)
print(f"Local time - ntp time: {diff:.6f} s.")
if res:
print(res)
def setup():
"""Process command-line arguments."""
if "NTPSERVER" in os.environ:
defaultserver = os.environ["NTPSERVER"]
else:
defaultserver = "pool.ntp.org"
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-v", "--version", action="version", version=__version__)
parser.add_argument(
"-q",
"--quiet",
action="store_true",
default=False,
help="Suppress output (default: no)",
)
parser.add_argument(
"-s",
"--server",
type=str,
default=defaultserver,
help=f"NTP server to use (default: “{defaultserver}”)",
)
args = parser.parse_args(sys.argv[1:])
return args
# See e.g. # https://www.cisco.com/c/en/us/about/press/internet-protocol-journal/back-issues/table-contents-58/154-ntp.html
# From left to right:
# * No leap second adjustment = 0 (2 bits)
# * protocol version 3 (3 bits)
# * client packet = 3 (3 bits)
# In [1]: hex((0 & 0b11) << 6 | (3 & 0b111) << 3 | (3 & 0b111))
# Out[1]: '0x1b'
_query = b"\x1b" + 47 * b"\0"
def get_ntp_time(host="pool.ntp.org", port=123):
fmt = "!12I"
with socket(AF_INET, SOCK_DGRAM) as s:
s.sendto(_query, (host, port))
msg, address = s.recvfrom(1024)
unpacked = struct.unpack(fmt, msg[0 : struct.calcsize(fmt)])
# Return the average of receive and transmit timestamps.
# Note that 2208988800 is the difference in seconds between the
# UNIX epoch 1970-1-1 and the NTP epoch 1900-1-1.
# See: (datetime.datetime(1970,1,1) - datetime.datetime(1900,1,1)).total_seconds()
t2 = unpacked[8] + float(unpacked[9]) / 2 ** 32 - 2208988800
t3 = unpacked[10] + float(unpacked[11]) / 2 ** 32 - 2208988800
return (t2 + t3) / 2
if __name__ == "__main__":
main()
| 31.300885
| 123
| 0.620865
|
9a9ec65f26b15acbb68a9c5ce12df007438f5ae1
| 1,276
|
py
|
Python
|
Leetcode/src/Linked List/Palindrome_LinkedList.py
|
QuDong/Algorithm4
|
c15c27653d860a1cd90a42cf97f7586ced12b48f
|
[
"MIT"
] | 6
|
2017-07-07T08:10:42.000Z
|
2019-12-25T21:42:40.000Z
|
Leetcode/src/Linked List/Palindrome_LinkedList.py
|
QuDong/Algorithm4
|
c15c27653d860a1cd90a42cf97f7586ced12b48f
|
[
"MIT"
] | null | null | null |
Leetcode/src/Linked List/Palindrome_LinkedList.py
|
QuDong/Algorithm4
|
c15c27653d860a1cd90a42cf97f7586ced12b48f
|
[
"MIT"
] | 1
|
2021-08-22T06:43:47.000Z
|
2021-08-22T06:43:47.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 21 10:20:24 2016
@author: dong.qu
"""
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(2)
head.next.next.next = ListNode(1)
#head.next.next.next.next = ListNode(1)
def printList(head):
temp = head
while temp:
print(temp.val, end=', ')
temp = temp.next
print()
printList(head)
headr = ListNode(1)
headr.next = ListNode(2)
headr.next.next = ListNode(3)
def reverseLinkedList(head):
pre = None
while head:
cur = head
head = head.next
cur.next = pre
pre = cur
return pre
def isPalindrome(head):
l = 0
temp = head
while temp:
temp=temp.next
l+=1
if l%2==0:
halfl = l//2
else:
halfl = l//2+1
pre=None
node = head
for i in range(halfl):
node = node.next
while node: # reverse the 2nd half list
cur = node
node = node.next
cur.next = pre
pre = cur
while pre and head:
if pre.val != head.val:
return False
pre= pre.next
head=head.next
return True
print(isPalindrome(head))
| 19.044776
| 43
| 0.55094
|
d4294788fdda66c3d99a1e08534ae99be0caf812
| 5,632
|
py
|
Python
|
homeassistant/components/satel_integra/alarm_control_panel.py
|
itewk/home-assistant
|
769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4
|
[
"Apache-2.0"
] | 23
|
2017-11-15T21:03:53.000Z
|
2021-03-29T21:33:48.000Z
|
homeassistant/components/satel_integra/alarm_control_panel.py
|
itewk/home-assistant
|
769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4
|
[
"Apache-2.0"
] | 9
|
2022-01-27T06:32:10.000Z
|
2022-03-31T07:07:51.000Z
|
homeassistant/components/satel_integra/alarm_control_panel.py
|
itewk/home-assistant
|
769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4
|
[
"Apache-2.0"
] | 10
|
2018-01-01T00:12:51.000Z
|
2021-12-21T23:08:05.000Z
|
"""Support for Satel Integra alarm, using ETHM module."""
import asyncio
from collections import OrderedDict
import logging
from satel_integra.satel_integra import AlarmState
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
)
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import (
CONF_ARM_HOME_MODE,
CONF_DEVICE_PARTITIONS,
CONF_ZONE_NAME,
DATA_SATEL,
SIGNAL_PANEL_MESSAGE,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up for Satel Integra alarm panels."""
if not discovery_info:
return
configured_partitions = discovery_info[CONF_DEVICE_PARTITIONS]
controller = hass.data[DATA_SATEL]
devices = []
for partition_num, device_config_data in configured_partitions.items():
zone_name = device_config_data[CONF_ZONE_NAME]
arm_home_mode = device_config_data.get(CONF_ARM_HOME_MODE)
device = SatelIntegraAlarmPanel(
controller, zone_name, arm_home_mode, partition_num
)
devices.append(device)
async_add_entities(devices)
class SatelIntegraAlarmPanel(alarm.AlarmControlPanel):
"""Representation of an AlarmDecoder-based alarm panel."""
def __init__(self, controller, name, arm_home_mode, partition_id):
"""Initialize the alarm panel."""
self._name = name
self._state = None
self._arm_home_mode = arm_home_mode
self._partition_id = partition_id
self._satel = controller
async def async_added_to_hass(self):
"""Update alarm status and register callbacks for future updates."""
_LOGGER.debug("Starts listening for panel messages")
self._update_alarm_status()
async_dispatcher_connect(
self.hass, SIGNAL_PANEL_MESSAGE, self._update_alarm_status
)
@callback
def _update_alarm_status(self):
"""Handle alarm status update."""
state = self._read_alarm_state()
_LOGGER.debug("Got status update, current status: %s", state)
if state != self._state:
self._state = state
self.async_schedule_update_ha_state()
else:
_LOGGER.debug("Ignoring alarm status message, same state")
def _read_alarm_state(self):
"""Read current status of the alarm and translate it into HA status."""
# Default - disarmed:
hass_alarm_status = STATE_ALARM_DISARMED
if not self._satel.connected:
return None
state_map = OrderedDict(
[
(AlarmState.TRIGGERED, STATE_ALARM_TRIGGERED),
(AlarmState.TRIGGERED_FIRE, STATE_ALARM_TRIGGERED),
(AlarmState.ENTRY_TIME, STATE_ALARM_PENDING),
(AlarmState.ARMED_MODE3, STATE_ALARM_ARMED_HOME),
(AlarmState.ARMED_MODE2, STATE_ALARM_ARMED_HOME),
(AlarmState.ARMED_MODE1, STATE_ALARM_ARMED_HOME),
(AlarmState.ARMED_MODE0, STATE_ALARM_ARMED_AWAY),
(AlarmState.EXIT_COUNTDOWN_OVER_10, STATE_ALARM_PENDING),
(AlarmState.EXIT_COUNTDOWN_UNDER_10, STATE_ALARM_PENDING),
]
)
_LOGGER.debug("State map of Satel: %s", self._satel.partition_states)
for satel_state, ha_state in state_map.items():
if (
satel_state in self._satel.partition_states
and self._partition_id in self._satel.partition_states[satel_state]
):
hass_alarm_status = ha_state
break
return hass_alarm_status
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def code_format(self):
"""Return the regex for code format or None if no code is required."""
return alarm.FORMAT_NUMBER
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
if not code:
_LOGGER.debug("Code was empty or None")
return
clear_alarm_necessary = self._state == STATE_ALARM_TRIGGERED
_LOGGER.debug("Disarming, self._state: %s", self._state)
await self._satel.disarm(code, [self._partition_id])
if clear_alarm_necessary:
# Wait 1s before clearing the alarm
await asyncio.sleep(1)
await self._satel.clear_alarm(code, [self._partition_id])
async def async_alarm_arm_away(self, code=None):
"""Send arm away command."""
_LOGGER.debug("Arming away")
if code:
await self._satel.arm(code, [self._partition_id])
async def async_alarm_arm_home(self, code=None):
"""Send arm home command."""
_LOGGER.debug("Arming home")
if code:
await self._satel.arm(code, [self._partition_id], self._arm_home_mode)
| 32.367816
| 86
| 0.665305
|
387dd2f5f55960e8757d847c6e921a5678e17113
| 2,457
|
py
|
Python
|
tools/deploy/caffe_export.py
|
UU-tracktech/fast-reid
|
8e367315fc3b95d326fc37a9bde7b83f90eaf825
|
[
"Apache-2.0"
] | null | null | null |
tools/deploy/caffe_export.py
|
UU-tracktech/fast-reid
|
8e367315fc3b95d326fc37a9bde7b83f90eaf825
|
[
"Apache-2.0"
] | null | null | null |
tools/deploy/caffe_export.py
|
UU-tracktech/fast-reid
|
8e367315fc3b95d326fc37a9bde7b83f90eaf825
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
"""
@author: xingyu liao
@contact: sherlockliao01@gmail.com
"""
import argparse
import logging
import sys
import torch
sys.path.append('.')
import pytorch_to_caffe
from processor.pipeline.reidentification.fastreid.fastreid.config import get_cfg
from processor.pipeline.reidentification.fastreid.fastreid.modeling.meta_arch import build_model
from processor.pipeline.reidentification.fastreid.fastreid.utils.file_io import PathManager
from processor.pipeline.reidentification.fastreid.fastreid.utils.checkpoint import Checkpointer
from processor.pipeline.reidentification.fastreid.fastreid.utils.logger import setup_logger
# import some modules added in project like this below
# sys.path.append("projects/PartialReID")
# from partialreid import *
setup_logger(name='fastreid')
logger = logging.getLogger("fastreid.caffe_export")
def setup_cfg(args):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Convert Pytorch to Caffe model")
parser.add_argument(
"--config-file",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--name",
default="baseline",
help="name for converted model"
)
parser.add_argument(
"--output",
default='caffe_model',
help='path to save converted caffe model'
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
if __name__ == '__main__':
args = get_parser().parse_args()
cfg = setup_cfg(args)
cfg.defrost()
cfg.MODEL.BACKBONE.PRETRAIN = False
cfg.MODEL.HEADS.POOL_LAYER = "Identity"
cfg.MODEL.BACKBONE.WITH_NL = False
model = build_model(cfg)
Checkpointer(model).load(cfg.MODEL.WEIGHTS)
model.eval()
logger.info(model)
inputs = torch.randn(1, 3, cfg.INPUT.SIZE_TEST[0], cfg.INPUT.SIZE_TEST[1]).to(torch.device(cfg.MODEL.DEVICE))
PathManager.mkdirs(args.output)
pytorch_to_caffe.trans_net(model, inputs, args.name)
pytorch_to_caffe.save_prototxt(f"{args.output}/{args.name}.prototxt")
pytorch_to_caffe.save_caffemodel(f"{args.output}/{args.name}.caffemodel")
logger.info(f"Export caffe model in {args.output} sucessfully!")
| 28.569767
| 113
| 0.715914
|
353ae9f0ccfd6fb47bd14f6304a4a3ff1c2ea0a4
| 4,849
|
py
|
Python
|
marketdata/history.py
|
Haynie-Research-and-Development/stock-data
|
7bcef34cbee73d66fd222bfd3d562ef0409108c9
|
[
"Apache-2.0"
] | 9
|
2020-12-09T08:31:16.000Z
|
2021-11-28T08:47:49.000Z
|
marketdata/history.py
|
Haynie-Research-and-Development/stock-data
|
7bcef34cbee73d66fd222bfd3d562ef0409108c9
|
[
"Apache-2.0"
] | 1
|
2021-01-21T22:10:17.000Z
|
2021-01-21T22:10:17.000Z
|
marketdata/history.py
|
Haynie-Research-and-Development/stock-data
|
7bcef34cbee73d66fd222bfd3d562ef0409108c9
|
[
"Apache-2.0"
] | 1
|
2020-08-20T20:35:33.000Z
|
2020-08-20T20:35:33.000Z
|
#**********************************************************
#* CATEGORY SOFTWARE
#* GROUP MARKET DATA
#* AUTHOR LANCE HAYNIE <LANCE@HAYNIEMAIL.COM>
#* FILE HISTORY.PY
#**********************************************************
#ETL Stock Market Data
#Copyright 2020 Haynie IPHC, LLC
#Developed by Haynie Research & Development, LLC
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import sys
import urllib.request as urlreq
import json
import pandas as pd
import logging
import requests
from .settings import settings_data
from .database import db,dw
from .functions import numtest
logging.basicConfig(format='%(levelname)s - %(message)s', level=settings_data['global']['loglevel'])
api_base = settings_data['datasources']['IEX']['url']
api_key = settings_data['datasources']['IEX']['key']
session = requests.Session()
def history(uuid,symbol,api_date,sql_date):
logging.debug("Processing historical data for: " + symbol + ".")
cursor = db.cursor()
try:
api = f"{api_base}/stock/{symbol}/chart/date/{api_date}?chartByDay=true&token={api_key}"
#response_data = json.loads(urlreq.urlopen(api).read().decode())
response_data = session.get(api).json()
open = numtest(response_data[0]['open'])
high = numtest(response_data[0]['high'])
low = numtest(response_data[0]['low'])
close = numtest(response_data[0]['close'])
volume = numtest(response_data[0]['volume'])
uOpen = numtest(response_data[0]['uOpen'])
uHigh = numtest(response_data[0]['uHigh'])
uLow = numtest(response_data[0]['uLow'])
uClose = numtest(response_data[0]['uClose'])
uVolume = numtest(response_data[0]['uVolume'])
fOpen = numtest(response_data[0]['fOpen'])
fHigh = numtest(response_data[0]['fHigh'])
fLow = numtest(response_data[0]['fLow'])
fClose = numtest(response_data[0]['fClose'])
fVolume = numtest(response_data[0]['fVolume'])
change = numtest(response_data[0]['change'])
changePercent = numtest(response_data[0]['changePercent'])
try:
sql = f"""
INSERT INTO
daily(
security_id,
date,
open,
high,
low,
close,
volume,
uOpen,
uHigh,
uLow,
uClose,
uVolume,
fOpen,
fHigh,
fLow,
fClose,
fVolume,
`change`,
changePercent)
values(
{uuid},
'{sql_date}',
{open},
{high},
{low},
{close},
{volume},
{uOpen},
{uHigh},
{uLow},
{uClose},
{uVolume},
{fOpen},
{fHigh},
{fLow},
{fClose},
{fVolume},
{change},
{changePercent});
"""
try:
cursor.execute(sql)
db.commit()
except Exception as e:
error = format(str(e))
if error.find("Duplicate entry") != -1:
logging.debug("Data already exists for " + symbol + " on date " + data_date + ".")
else:
logging.error(format(str(e)))
except Exception as e:
logging.error(format(str(e)))
except Exception as e:
logging.error(format(str(e)))
def update(date):
api_date = date.replace('-', '')
sql_date = date + " 00:00:00"
dw_cursor = dw.cursor()
try:
dw_cursor.execute(f"SELECT uuid, symbol FROM security WHERE uuid NOT IN (select security_id from daily where date = '{sql_date}')")
results = dw_cursor.fetchall()
for row in results:
uuid = row[0]
symbol = row[1]
history(uuid, symbol, api_date, sql_date)
except Exception as e:
logging.error(format(str(e)))
sys.exit(1)
dw.close()
| 34.147887
| 139
| 0.518251
|
6a6fcc84ce162db3eebc3425bdd0b25fda9ace54
| 2,760
|
py
|
Python
|
kolibri/core/deviceadmin/management/commands/dbrestore.py
|
jonboiser/kolibri
|
8ea2febc1739ac772007aae4084f0226dfb4ed40
|
[
"MIT"
] | 1
|
2021-03-26T03:44:24.000Z
|
2021-03-26T03:44:24.000Z
|
kolibri/core/deviceadmin/management/commands/dbrestore.py
|
jonboiser/kolibri
|
8ea2febc1739ac772007aae4084f0226dfb4ed40
|
[
"MIT"
] | 5
|
2016-01-22T18:43:44.000Z
|
2019-07-25T20:34:16.000Z
|
kolibri/core/deviceadmin/management/commands/dbrestore.py
|
jonboiser/kolibri
|
8ea2febc1739ac772007aae4084f0226dfb4ed40
|
[
"MIT"
] | 1
|
2019-11-12T14:00:30.000Z
|
2019-11-12T14:00:30.000Z
|
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import kolibri
from django.core.management.base import BaseCommand, CommandError
from kolibri.utils import server
from ...utils import dbrestore, default_backup_folder, search_latest
logger = logging.getLogger(__name__)
class Command(BaseCommand):
output_transaction = True
# @ReservedAssignment
help = (
"Restores a database backup of Kolibri. This is not intended for "
"replication across different devices, but *only* for restoring a "
"single device from a local backup of the database."
)
def add_arguments(self, parser):
parser.add_argument(
'dump_file',
nargs='?',
type=str,
help="Specifies the exact dump file to restore from"
)
parser.add_argument(
'--latest', '-l',
action='store_true',
dest='latest',
help=(
"Automatically detect and restore from latest backup matching "
"the major and minor version (X.Y) of current installation."
)
)
def handle(self, *args, **options):
try:
server.get_status()
self.stderr.write(self.style.ERROR(
"Cannot restore while Kolibri is running, please run:\n"
"\n"
" kolibri stop\n"
))
raise SystemExit()
except server.NotRunning:
# Great, it's not running!
pass
latest = options['latest']
use_backup = options.get("dump_file", None)
if latest == bool(use_backup):
raise CommandError("Either specify a backup file or use --latest")
logger.info("Beginning database restore")
if latest:
search_root = default_backup_folder()
use_backup = None
# Ultimately, we are okay about a backup from a minor release
fallback_version = ".".join(map(str, kolibri.VERSION[:2]))
if os.path.exists(search_root):
use_backup = search_latest(search_root, fallback_version)
if not use_backup:
raise RuntimeError(
"Could not find a database backup for version: {}".format(
fallback_version
)
)
logger.info("Using backup file: {}".format(use_backup))
if not os.path.isfile(use_backup):
raise CommandError("Couldn't find: {}".format(use_backup))
dbrestore(use_backup)
self.stdout.write(self.style.SUCCESS(
"Restored database from: {path}".format(path=use_backup)
))
| 31.011236
| 79
| 0.582971
|
3e20e973929d9ccbe1ad0adee5301fc494dc6635
| 819
|
py
|
Python
|
profiles_project/urls.py
|
sachmesachi/profiles-rest-api
|
1717488d9accd358bc1521857472046a52368ca8
|
[
"MIT"
] | null | null | null |
profiles_project/urls.py
|
sachmesachi/profiles-rest-api
|
1717488d9accd358bc1521857472046a52368ca8
|
[
"MIT"
] | null | null | null |
profiles_project/urls.py
|
sachmesachi/profiles-rest-api
|
1717488d9accd358bc1521857472046a52368ca8
|
[
"MIT"
] | null | null | null |
"""profiles_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
...
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('profiles_api.urls'))
]
| 32.76
| 77
| 0.704518
|
21900d4ac92a6dbb8b05635203d2d1925e3a37a1
| 1,355
|
py
|
Python
|
book_init.py
|
Alwaysproblem/COMP9900-proj
|
34f10ca8c18c8e8f26b9ce5b7be39c1e05781597
|
[
"MIT"
] | 3
|
2019-01-22T00:41:20.000Z
|
2022-03-03T15:23:09.000Z
|
book_init.py
|
Alwaysproblem/COMP9900-proj
|
34f10ca8c18c8e8f26b9ce5b7be39c1e05781597
|
[
"MIT"
] | null | null | null |
book_init.py
|
Alwaysproblem/COMP9900-proj
|
34f10ca8c18c8e8f26b9ce5b7be39c1e05781597
|
[
"MIT"
] | null | null | null |
import os, re, sqlite3, uuid
conn = sqlite3.connect('book.db',detect_types=sqlite3.PARSE_DECLTYPES,check_same_thread=False)
def create_table():
conn.execute("drop table if exists booking")
conn.execute('''create table booking (
ID primary key not null,
HouseID char(50),
Img char(50),
Address char(50),
Roomtype char(50),
Price char(50),
userid char(50),
start_time char(50),
end_time char(50));
''')
# if __name__ == '__main__':
# create_table()
cur = conn.cursor()
house_id = house_img = house_address = house_roomtype = house_price = current_user = start_date = end_date = None
# sql = 'select * from booking'
key = '"ID", "HouseID", "Img", "Address", "Roomtype", "Price", "userid", "start_time", "end_time"'
sql = "insert into booking (" + key + ") values ('{}','{}','{}','{}','{}','{}','{}','{}','{}')".format \
(uuid.uuid4(), house_id, house_img, house_address, house_roomtype, house_price, current_user, start_date,
end_date)
print(sql)
cur.execute(sql)
# cur.close()
conn.commit()
sql = 'select * from booking'
cur.execute(sql)
t_list = []
for h_tuple in cur.fetchall():
t_list.append(h_tuple)
print('tlist', t_list)
| 30.795455
| 113
| 0.573432
|
bb30f75fb05170045d8b43de9ca6df17649a432c
| 9,649
|
py
|
Python
|
docs/html/conf.py
|
grimreaper/pip
|
7420629800b10d117d3af3b668dbe99b475fcbc0
|
[
"MIT"
] | 1
|
2019-12-20T05:27:25.000Z
|
2019-12-20T05:27:25.000Z
|
docs/html/conf.py
|
grimreaper/pip
|
7420629800b10d117d3af3b668dbe99b475fcbc0
|
[
"MIT"
] | 7
|
2019-12-27T07:56:50.000Z
|
2022-01-25T03:41:39.000Z
|
docs/html/conf.py
|
grimreaper/pip
|
7420629800b10d117d3af3b668dbe99b475fcbc0
|
[
"MIT"
] | 1
|
2020-02-14T16:53:19.000Z
|
2020-02-14T16:53:19.000Z
|
# -*- coding: utf-8 -*-
#
# pip documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 22 22:08:49 2008
#
# This file is execfile()d with the current directory set to its containing dir
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import glob
import os
import re
import sys
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
docs_dir = os.path.dirname(os.path.dirname(__file__))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, docs_dir)
# sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# extensions = ['sphinx.ext.autodoc']
extensions = ['sphinx.ext.extlinks', 'pip_sphinxext', 'sphinx.ext.intersphinx']
# intersphinx
intersphinx_cache_limit = 0
intersphinx_mapping = {
'pypug': ('https://packaging.python.org/', None),
'pypa': ('https://www.pypa.io/en/latest/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pip'
copyright = '2008-2017, PyPA'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = release = 'dev'
# Readthedocs seems to install pip as an egg (via setup.py install) which
# is somehow resulting in "import pip" picking up an older copy of pip.
# Rather than trying to force RTD to install pip properly, we'll simply
# read the version direct from the __init__.py file. (Yes, this is
# fragile, but it works...)
pip_init = os.path.join(docs_dir, '..', 'src', 'pip', '__init__.py')
with open(pip_init) as f:
for line in f:
m = re.match(r'__version__ = "(.*)"', line)
if m:
__version__ = m.group(1)
# The short X.Y version.
version = '.'.join(__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = __version__
break
# We have this here because readthedocs plays tricks sometimes and there seems
# to be a heisenbug, related to the version of pip discovered. This is here to
# help debug that if someone decides to do that in the future.
print(version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_patterns = ['build/']
# The reST default role (used for this markup: `text`) to use for all documents
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
extlinks = {
'issue': ('https://github.com/pypa/pip/issues/%s', '#'),
'pull': ('https://github.com/pypa/pip/pull/%s', 'PR #'),
'pypi': ('https://pypi.org/project/%s/', ''),
}
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = "pypa_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'collapsiblesidebar': True,
'externalrefs': True,
'navigation_depth': 3,
'issues_url': 'https://github.com/pypa/pip/issues'
}
# Add any paths that contain custom themes here, relative to this directory.
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = '_static/piplogo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = 'favicon.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, the Docutils Smart Quotes transform (originally based on
# SmartyPants) will be used to convert characters like quotes and dashes
# to typographically correct entities. The default is True.
smartquotes = True
# This string, for use with Docutils 0.14 or later, customizes the
# SmartQuotes transform. The default of "qDe" converts normal quote
# characters ('"' and "'"), en and em dashes ("--" and "---"), and
# ellipses "...".
# For now, we disable the conversion of dashes so that long options
# like "--find-links" won't render as "-find-links" if included in the
# text in places where monospaced type can't be used. For example, backticks
# can't be used inside roles like :ref:`--no-index <--no-index>` because
# of nesting.
smartquotes_action = "qe"
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'relations.html'],
'index': ['localtoc.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'pipdocs'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
(
'index',
'pip.tex',
u'pip Documentation',
u'pip developers',
'manual',
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
# -- Options for Manual Pages -------------------------------------------------
# List of manual pages generated
man_pages = [
(
'index',
'pip',
u'package manager for Python packages',
u'pip developers',
1
)
]
# Here, we crawl the entire man/commands/ directory and list every file with
# appropriate name and details
man_dir = os.path.join(docs_dir, 'man/')
raw_subcommands = glob.glob(os.path.join(man_dir, 'commands/*.rst'))
if not raw_subcommands:
raise FileNotFoundError(
'The individual subcommand manpages could not be found!'
)
for fname in raw_subcommands:
fname_base = fname[len(man_dir):-4]
outname = 'pip-' + fname_base[9:]
description = u'description of {} command'.format(
outname.replace('-', ' ')
)
man_pages.append((fname_base, outname, description, u'pip developers', 1))
| 33.044521
| 79
| 0.693025
|
6ee0f1a3a900846a7ac90bf878536619733799a9
| 284
|
py
|
Python
|
basic/MultipleElifs.py
|
tonper19/PythonDemos
|
633a40e282049e511fd965c0afe104e775a2f526
|
[
"MIT"
] | null | null | null |
basic/MultipleElifs.py
|
tonper19/PythonDemos
|
633a40e282049e511fd965c0afe104e775a2f526
|
[
"MIT"
] | null | null | null |
basic/MultipleElifs.py
|
tonper19/PythonDemos
|
633a40e282049e511fd965c0afe104e775a2f526
|
[
"MIT"
] | null | null | null |
color = input("What's your favorite color?")
if color == 'purple':
print('excelent choice!')
elif color == 'teal':
print('not bad!')
elif color == 'seafoam':
print('mediocre')
elif color == 'pure darkness':
print('i like how you think')
else:
print('YOU MONSTER!')
| 25.818182
| 44
| 0.626761
|
8283758e7b4b37af5b61603eddfff0bb9fbd3020
| 754
|
py
|
Python
|
examples/bme280.py
|
kungpfui/python-i2cmod
|
57d9cc8de372aa38526c3503ceec0d8924665c04
|
[
"MIT"
] | null | null | null |
examples/bme280.py
|
kungpfui/python-i2cmod
|
57d9cc8de372aa38526c3503ceec0d8924665c04
|
[
"MIT"
] | null | null | null |
examples/bme280.py
|
kungpfui/python-i2cmod
|
57d9cc8de372aa38526c3503ceec0d8924665c04
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Bosch Sensortec BME280 pressure, temperature and humidity sensor.
`BME280 Datasheet <https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BME280_DS001-12.pdf>`
"""
from i2cmod import BME280
def example():
""" Output data to screen """
with BME280(altitude=414.0) as sensor:
print("Chip ID: {:02X}".format(sensor.id))
sensor.update()
print("Pressure: {:.2f} hPa ".format(sensor.pressure))
print("Pressure NN: {:.2f} hPa ".format(sensor.pressure_sea_level))
print("Temperature: {:.2f} C".format(sensor.centigrade))
print("Humidity: {:.2f} %".format(sensor.humidity))
if __name__ == '__main__':
example()
| 29
| 107
| 0.632626
|
122cc54134b401e83f297392b3b4722358f181c9
| 11,203
|
py
|
Python
|
kolibri/core/content/models.py
|
reubenjacob/kolibri
|
028bb2ad63e438c832ff657d37f7b05c3400f2da
|
[
"MIT"
] | null | null | null |
kolibri/core/content/models.py
|
reubenjacob/kolibri
|
028bb2ad63e438c832ff657d37f7b05c3400f2da
|
[
"MIT"
] | 3
|
2016-09-13T15:15:03.000Z
|
2018-10-06T15:54:44.000Z
|
kolibri/core/content/models.py
|
indirectlylit/kolibri
|
d00f070040fec63003c8e7f124ea89bc710a83c4
|
[
"MIT"
] | null | null | null |
"""
These models are used in the databases of content that get imported from Studio.
Any fields added here (and not in base_models.py) are assumed to be locally
calculated cached fields. If a field is intended to be imported from a content
database generated by Studio, it should be added in base_models.py.
*DEVELOPER WARNING regarding updates to these models*
If you modify the schema here, it has implications for the content import pipeline
because we will need to calculate these values during content import (as we they will
not be present in the content databases distributed by Studio).
In the case where new fields are added that do not need to be added to an export schema
the generate_schema command should be run like this:
`kolibri manage generate_schema current`
This will just regenerate the current schema for SQLAlchemy, so that we can use SQLAlchemy
to calculate these fields if needed (this can frequently be more efficient than using the
Django ORM for these calculations).
"""
from __future__ import print_function
import os
from gettext import gettext as _
from django.db import connection
from django.db import models
from django.db.models import Min
from django.db.models import Q
from django.db.models import QuerySet
from django.utils.encoding import python_2_unicode_compatible
from le_utils.constants import content_kinds
from le_utils.constants import format_presets
from mptt.managers import TreeManager
from mptt.querysets import TreeQuerySet
from .utils import paths
from kolibri.core.content import base_models
from kolibri.core.content.errors import InvalidStorageFilenameError
from kolibri.core.device.models import ContentCacheKey
from kolibri.core.mixins import FilterByUUIDQuerysetMixin
PRESET_LOOKUP = dict(format_presets.choices)
@python_2_unicode_compatible
class ContentTag(base_models.ContentTag):
def __str__(self):
return self.tag_name
class ContentNodeQueryset(TreeQuerySet, FilterByUUIDQuerysetMixin):
def dedupe_by_content_id(self, use_distinct=True):
# Cannot use distinct if queryset is also going to use annotate,
# so optional use_distinct flag can be used to fallback to a subquery
# remove duplicate content nodes based on content_id
if connection.vendor == "sqlite" or not use_distinct:
if connection.vendor == "postgresql":
# Create a subquery of all contentnodes deduped by content_id
# to avoid calling distinct on an annotated queryset.
deduped_ids = self.model.objects.order_by("content_id").distinct(
"content_id"
)
else:
# adapted from https://code.djangoproject.com/ticket/22696
deduped_ids = (
self.values("content_id")
.annotate(node_id=Min("id"))
.values_list("node_id", flat=True)
)
return self.filter_by_uuids(deduped_ids)
# when using postgres, we can call distinct on a specific column
elif connection.vendor == "postgresql":
return self.order_by("content_id").distinct("content_id")
def filter_by_content_ids(self, content_ids, validate=True):
return self._by_uuids(content_ids, validate, "content_id", True)
def exclude_by_content_ids(self, content_ids, validate=True):
return self._by_uuids(content_ids, validate, "content_id", False)
class ContentNodeManager(
models.Manager.from_queryset(ContentNodeQueryset), TreeManager
):
def get_queryset(self, *args, **kwargs):
"""
Ensures that this manager always returns nodes in tree order.
"""
return (
super(TreeManager, self)
.get_queryset(*args, **kwargs)
.order_by(self.tree_id_attr, self.left_attr)
)
def build_tree_nodes(self, data, target=None, position="last-child"):
"""
vendored from:
https://github.com/django-mptt/django-mptt/blob/fe2b9cc8cfd8f4b764d294747dba2758147712eb/mptt/managers.py#L614
"""
opts = self.model._mptt_meta
if target:
tree_id = target.tree_id
if position in ("left", "right"):
level = getattr(target, opts.level_attr)
if position == "left":
cursor = getattr(target, opts.left_attr)
else:
cursor = getattr(target, opts.right_attr) + 1
else:
level = getattr(target, opts.level_attr) + 1
if position == "first-child":
cursor = getattr(target, opts.left_attr) + 1
else:
cursor = getattr(target, opts.right_attr)
else:
tree_id = self._get_next_tree_id()
cursor = 1
level = 0
stack = []
def treeify(data, cursor=1, level=0):
data = dict(data)
children = data.pop("children", [])
node = self.model(**data)
stack.append(node)
setattr(node, opts.tree_id_attr, tree_id)
setattr(node, opts.level_attr, level)
setattr(node, opts.left_attr, cursor)
for child in children:
cursor = treeify(child, cursor=cursor + 1, level=level + 1)
cursor += 1
setattr(node, opts.right_attr, cursor)
return cursor
treeify(data, cursor=cursor, level=level)
if target:
self._create_space(2 * len(stack), cursor - 1, tree_id)
return stack
@python_2_unicode_compatible
class ContentNode(base_models.ContentNode):
"""
The primary object type in a content database. Defines the properties that are shared
across all content types.
It represents videos, exercises, audio, documents, and other 'content items' that
exist as nodes in content channels.
"""
# Fields used only on Kolibri and not imported from a content database
# Total number of coach only resources for this node
num_coach_contents = models.IntegerField(default=0, null=True, blank=True)
# Total number of available resources on the device under this topic - if this is not a topic
# then it is 1 or 0 depending on availability
on_device_resources = models.IntegerField(default=0, null=True, blank=True)
objects = ContentNodeManager()
class Meta:
ordering = ("lft",)
index_together = [
["level", "channel_id", "kind"],
["level", "channel_id", "available"],
]
def __str__(self):
return self.title
def get_descendant_content_ids(self):
"""
Retrieve a queryset of content_ids for non-topic content nodes that are
descendants of this node.
"""
return (
ContentNode.objects.filter(lft__gte=self.lft, lft__lte=self.rght)
.exclude(kind=content_kinds.TOPIC)
.values_list("content_id", flat=True)
)
@python_2_unicode_compatible
class Language(base_models.Language):
def __str__(self):
return self.lang_name or ""
class File(base_models.File):
"""
The second to bottom layer of the contentDB schema, defines the basic building brick for content.
Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...
"""
class Meta:
ordering = ["priority"]
class Admin:
pass
def get_extension(self):
return self.local_file.extension
def get_file_size(self):
return self.local_file.file_size
def get_storage_url(self):
return self.local_file.get_storage_url()
def get_preset(self):
"""
Return the preset.
"""
return PRESET_LOOKUP.get(self.preset, _("Unknown format"))
class LocalFileQueryset(models.QuerySet, FilterByUUIDQuerysetMixin):
def delete_unused_files(self):
for file in self.get_unused_files():
try:
os.remove(paths.get_content_storage_file_path(file.get_filename()))
yield True, file
except (IOError, OSError, InvalidStorageFilenameError):
yield False, file
self.get_unused_files().update(available=False)
def get_orphan_files(self):
return self.filter(files__isnull=True)
def delete_orphan_file_objects(self):
return self.filter(files__isnull=True).delete()
def get_unused_files(self):
return self.filter(
~Q(files__contentnode__available=True) | Q(files__isnull=True)
).filter(available=True)
@python_2_unicode_compatible
class LocalFile(base_models.LocalFile):
"""
The bottom layer of the contentDB schema, defines the local state of files on the device storage.
"""
objects = LocalFileQueryset.as_manager()
class Admin:
pass
def __str__(self):
return paths.get_content_file_name(self)
def get_filename(self):
return self.__str__()
def get_storage_url(self):
"""
Return a url for the client side to retrieve the content file.
The same url will also be exposed by the file serializer.
"""
return paths.get_local_content_storage_file_url(self)
def delete_stored_file(self):
"""
Delete the stored file from disk.
"""
deleted = False
try:
os.remove(paths.get_content_storage_file_path(self.get_filename()))
deleted = True
except (IOError, OSError, InvalidStorageFilenameError):
deleted = False
self.available = False
self.save()
return deleted
class AssessmentMetaData(base_models.AssessmentMetaData):
"""
A model to describe additional metadata that characterizes assessment behaviour in Kolibri.
This model contains additional fields that are only revelant to content nodes that probe a
user's state of knowledge and allow them to practice to Mastery.
ContentNodes with this metadata may also be able to be used within quizzes and exams.
"""
pass
class ChannelMetadataQueryset(QuerySet, FilterByUUIDQuerysetMixin):
pass
@python_2_unicode_compatible
class ChannelMetadata(base_models.ChannelMetadata):
"""
Holds metadata about all existing content databases that exist locally.
"""
# precalculated fields during annotation/migration
published_size = models.BigIntegerField(default=0, null=True, blank=True)
total_resource_count = models.IntegerField(default=0, null=True, blank=True)
included_languages = models.ManyToManyField(
"Language", related_name="channels", verbose_name="languages", blank=True
)
order = models.PositiveIntegerField(default=0, null=True, blank=True)
public = models.NullBooleanField()
objects = ChannelMetadataQueryset.as_manager()
class Admin:
pass
class Meta:
ordering = ["order"]
def __str__(self):
return self.name
def delete_content_tree_and_files(self):
# Use Django ORM to ensure cascading delete:
self.root.delete()
ContentCacheKey.update_cache_key()
| 33.845921
| 118
| 0.668392
|
319971e2b715159eadf6f48530ee23b336f2194e
| 5,648
|
py
|
Python
|
webwhatsapi/objects/message.py
|
uae0786/WhatsApp-Auto-Reply
|
f55c0e376663282cab78f81d01ab24b3cdb4e59d
|
[
"MIT"
] | 3
|
2018-04-11T03:02:30.000Z
|
2018-05-19T13:26:31.000Z
|
webwhatsapi/objects/message.py
|
uae0786/WhatsApp-Auto-Reply
|
f55c0e376663282cab78f81d01ab24b3cdb4e59d
|
[
"MIT"
] | null | null | null |
webwhatsapi/objects/message.py
|
uae0786/WhatsApp-Auto-Reply
|
f55c0e376663282cab78f81d01ab24b3cdb4e59d
|
[
"MIT"
] | 7
|
2018-04-11T09:00:38.000Z
|
2021-01-23T08:58:46.000Z
|
from datetime import datetime
import mimetypes
import os
import pprint
from webwhatsapi.helper import safe_str
pprint = pprint.PrettyPrinter(indent=4).pprint
from webwhatsapi.objects.whatsapp_object import WhatsappObjectWithoutID, driver_needed
from webwhatsapi.objects.contact import Contact
class MessageMetaClass(type):
"""
Message type factory
"""
def __call__(cls, js_obj, driver=None):
"""
Responsible for returning correct Message subtype
:param js_obj: Raw message JS
:return: Instance of appropriate message type
:rtype: MediaMessage | Message | MMSMessage | VCardMessage
"""
if js_obj["isMedia"]:
return type.__call__(MediaMessage, js_obj, driver)
if js_obj["isNotification"]:
return type.__call__(NotificationMessage, js_obj, driver)
if js_obj["isMMS"]:
return type.__call__(MMSMessage, js_obj, driver)
if js_obj["type"] in ["vcard", "multi_vcard"]:
return type.__call__(VCardMessage, js_obj, driver)
return type.__call__(Message, js_obj, driver)
class Message(WhatsappObjectWithoutID):
__metaclass__ = MessageMetaClass
def __init__(self, js_obj, driver=None):
"""
Constructor
:param js_obj: Raw JS message obj
:type js_obj: dict
"""
super(Message, self).__init__(js_obj, driver)
self.sender = False if js_obj["sender"] == False else Contact(js_obj["sender"], driver)
self.timestamp = datetime.fromtimestamp(js_obj["timestamp"])
if js_obj["content"]:
self.content = js_obj["content"]
self.safe_content = safe_str(self.content[0:25]) + '...'
self.js_obj = js_obj
def __repr__(self):
return "<Message - from {sender} at {timestamp}: {content}>".format(
sender=safe_str(self.sender.get_safe_name()),
timestamp=self.timestamp,
content=self.safe_content)
class MediaMessage(Message):
def __init__(self, js_obj, driver=None):
super(MediaMessage, self).__init__(js_obj, driver)
self.type = self.js_obj["type"]
self.size = self.js_obj["size"]
self.mime = self.js_obj["mime"]
def save_media(self, path):
extension = mimetypes.guess_extension(self.mime)
filename = "{0}{1}".format(self["__x_filehash"], extension)
with file(os.path.join(path, filename), "wb") as output:
output.write(self.content.decode("base64"))
def __repr__(self):
return "<MediaMessage - {type} from {sender} at {timestamp}>".format(
type=self.type,
sender=safe_str(self.sender.get_safe_name()),
timestamp=self.timestamp
)
class MMSMessage(MediaMessage):
"""
Represents MMS messages
Example of an MMS message: "ptt" (push to talk), voice memo
"""
def __init__(self, js_obj, driver=None):
super(MMSMessage, self).__init__(js_obj, driver)
def __repr__(self):
return "<MMSMessage - {type} from {sender} at {timestamp}>".format(
type=self.type,
sender=safe_str(self.sender.get_safe_name()),
timestamp=self.timestamp
)
class VCardMessage(Message):
def __init__(self, js_obj, driver=None):
super(VCardMessage, self).__init__(js_obj, driver)
self.type = js_obj["type"]
self.contacts = js_obj["content"].encode("ascii", "ignore")
def __repr__(self):
return "<VCardMessage - {type} from {sender} at {timestamp} ({contacts})>".format(
type=self.type,
sender=safe_str(self.sender.get_safe_name()),
timestamp=self.timestamp,
contacts=self.contacts
)
class NotificationMessage(Message):
def __init__(self, js_obj, driver=None):
super(NotificationMessage, self).__init__(js_obj, driver)
self.type = js_obj["type"]
self.subtype = js_obj["subtype"].encode("ascii", "ignore")
if js_obj["recipients"]:
self.recipients = [self.driver.get_contact_from_id(x) for x in js_obj["recipients"]]
def __repr__(self):
readable = {
'call_log':{
'miss': "Missed Call",
},
'e2e_notification':{
'encrypt': "Messages now Encrypted"
},
'gp2':{
'create': "Created group",
'add': "Added to group",
'remove': "Removed from group",
'leave': "Left the group"
}
}
sender = "" if not self.sender else ("from " + str(safe_str(self.sender.get_safe_name())))
return "<NotificationMessage - {type} {recip} {sender} at {timestamp}>".format(
type=readable[self.type][self.subtype],
sender = sender,
timestamp=self.timestamp,
recip="" if not hasattr(self, 'recipients') else "".join([safe_str(x.get_safe_name()) for x in self.recipients]),
)
class MessageGroup(object):
def __init__(self, chat, messages):
"""
Constructor
:param chat: Chat that contains messages
:type chat: chat.Chat
:param messages: List of messages
:type messages: list[Message]
"""
self.chat = chat
self.messages = messages
def __repr__(self):
safe_chat_name = safe_str(self.chat.name)
return "<MessageGroup - {num} {messages} in {chat}>".format(
num=len(self.messages),
messages="message" if len(self.messages) == 1 else "messages",
chat=safe_chat_name)
| 32.837209
| 125
| 0.607649
|
bcc313d30a411be3bcf49632c6a04032b3428f6d
| 4,719
|
py
|
Python
|
landlab/components/stream_power/examples/perturb_sed_flux_dep.py
|
awickert/landlab
|
496de56717a5877db96f354a1b1285bfabe8b56f
|
[
"MIT"
] | 1
|
2015-08-17T19:29:50.000Z
|
2015-08-17T19:29:50.000Z
|
landlab/components/stream_power/examples/perturb_sed_flux_dep.py
|
awickert/landlab
|
496de56717a5877db96f354a1b1285bfabe8b56f
|
[
"MIT"
] | 1
|
2018-04-07T08:24:56.000Z
|
2018-04-07T13:52:03.000Z
|
landlab/components/stream_power/examples/perturb_sed_flux_dep.py
|
awickert/landlab
|
496de56717a5877db96f354a1b1285bfabe8b56f
|
[
"MIT"
] | 2
|
2017-07-03T20:21:13.000Z
|
2018-09-06T23:58:19.000Z
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from six.moves import range
from landlab.components.flow_routing import FlowRouter
from landlab.components.stream_power import SedDepEroder
from landlab import ModelParameterDictionary
from landlab.plot import imshow
from landlab.plot.video_out import VideoPlotter
from landlab.plot import channel_profile as prf
from landlab.plot.imshow import imshow_node_grid
from pylab import colorbar, show, plot, loglog, figure, savefig, close, ylim
from landlab import RasterModelGrid
import numpy as np
import pylab
from copy import copy, deepcopy
from time import time
#get the needed properties to build the grid:
input_file = './sed_dep_NMGparams2.txt'
#####remember to change the fixed y-axis dimension in the plots!!
y_max = 200
make_output_plots=True
out_interval=15 #was 15
inputs = ModelParameterDictionary(input_file)
nrows = inputs.read_int('nrows')
ncols = inputs.read_int('ncols')
dx = inputs.read_float('dx')
uplift_rate = inputs.read_float('uplift_rate')
runtime = inputs.read_float('total_time')
dt = inputs.read_float('dt')
nt = int(runtime//dt)
uplift_per_step = uplift_rate * dt
print('uplift per step: ', uplift_per_step)
#check we have a plaubible grid
#mg = RasterModelGrid(nrows,ncols,dx)
assert mg.number_of_nodes == nrows*ncols
assert mg.node_spacing == dx
# Display a message
print('Running ...')
# instantiate the components:
fr = FlowRouter(mg)
sde = SedDepEroder(mg, input_file)
# don't allow overwriting of these, just in case
try:
x_profiles
except NameError:
x_profiles = []
z_profiles = []
S_profiles = []
A_profiles = []
# plot init conds
if make_output_plots:
mg = fr.route_flow(grid=mg)
pylab.figure('long_profile_anim')
ylim([0, y_max])
prf.analyze_channel_network_and_plot(mg)
savefig('0profile_anim_init.png')
close('long_profile_anim')
(profile_IDs, dists_upstr) = prf.analyze_channel_network_and_plot(mg)
start_node = [profile_IDs[0]]
time_on = time()
#perform the loops:
for i in range(nt):
#print 'loop ', i
mg.at_node['topographic__elevation'][mg.core_nodes] += uplift_per_step
mg = fr.route_flow()
#mg.calc_grad_across_cell_faces(mg.at_node['topographic__elevation'])
#neighbor_slopes = mg.calc_grad_along_node_links(mg.at_node['topographic__elevation'])
#mean_slope = np.mean(np.fabs(neighbor_slopes),axis=1)
#max_slope = np.max(np.fabs(neighbor_slopes),axis=1)
#mg,_,capacity_out = tl.erode(mg,dt,slopes_at_nodes='topographic__steepest_slope')
#mg,_,capacity_out = tl.erode(mg,dt,slopes_at_nodes=max_slope)
mg_copy = deepcopy(mg)
mg,_ = sde.erode(mg,dt)
#print sde.iterations_in_dt
#print 'capacity ', np.amax(capacity_out[mg.core_nodes])
#print 'rel sed ', np.nanmax(sed_in[mg.core_nodes]/capacity_out[mg.core_nodes])
if i%out_interval == 0:
print('loop ', i)
print('max_slope', np.amax(mg.at_node['topographic__steepest_slope'][mg.core_nodes]))
pylab.figure("long_profiles")
profile_IDs = prf.channel_nodes(mg, mg.at_node['topographic__steepest_slope'],
mg.at_node['drainage_area'], mg.at_node['flow__receiver_node'])
dists_upstr = prf.get_distances_upstream(mg, len(mg.at_node['topographic__steepest_slope']),
profile_IDs, mg.at_node['flow__link_to_receiver_node'])
prf.plot_profiles(dists_upstr, profile_IDs, mg.at_node['topographic__elevation'])
if i%out_interval == 0:
x_profiles.append(dists_upstr)
z_profiles.append(mg.at_node['topographic__elevation'][profile_IDs])
S_profiles.append(mg.at_node['topographic__steepest_slope'][profile_IDs])
A_profiles.append(mg.at_node['drainage_area'][profile_IDs])
if make_output_plots:
pylab.figure('long_profile_anim')
#prf.plot_profiles(dists_upstr, profile_IDs, mg.at_node['topographic_elevation'])
plot(dists_upstr,mg.at_node['topographic_elevation'][profile_IDs])
ylim([0,y_max])
if i==0:
savefig('profile_anim_000'+str(i)+'.png')
elif i<100:
savefig('profile_anim_00'+str(i)+'.png')
elif i<1000:
savefig('profile_anim_0'+str(i)+'.png')
else:
savefig('profile_anim_'+str(i)+'.png')
close('long_profile_anim')
#vid.add_frame(mg, 'topographic__elevation')
print('Completed the simulation. Plotting...')
time_off = time()
#Finalize and plot
elev = mg['node']['topographic__elevation']
#imshow.imshow_node_grid(mg, elev)
print('Done.')
print('Time: ', time_off-time_on)
#pylab.show()
#vid.produce_video()
| 34.698529
| 103
| 0.706718
|
b38b02334bf41174bbfd8ff4ac807e8430936f4f
| 2,574
|
py
|
Python
|
pyActionRec/action_caffe.py
|
zhoutianyi1/caffe_feature
|
88405029404cb9b075f3057ce2f8ef2610a623a9
|
[
"BSD-2-Clause"
] | null | null | null |
pyActionRec/action_caffe.py
|
zhoutianyi1/caffe_feature
|
88405029404cb9b075f3057ce2f8ef2610a623a9
|
[
"BSD-2-Clause"
] | null | null | null |
pyActionRec/action_caffe.py
|
zhoutianyi1/caffe_feature
|
88405029404cb9b075f3057ce2f8ef2610a623a9
|
[
"BSD-2-Clause"
] | null | null | null |
from .config import ANET_CFG
import sys
sys.path.append(ANET_CFG.CAFFE_ROOT+'/python')
import caffe
from caffe.io import oversample
import numpy as np
from utils.io import flow_stack_oversample
import cv2
class CaffeNet(object):
def __init__(self, net_proto, net_weights, device_id, input_size=None):
caffe.set_mode_gpu()
caffe.set_device(device_id)
self._net = caffe.Net(net_proto, net_weights, caffe.TEST)
input_shape = self._net.blobs['data'].data.shape
if input_size is not None:
input_shape = input_shape[:2] + input_size
transformer = caffe.io.Transformer({'data': input_shape})
if self._net.blobs['data'].data.shape[1] == 3:
transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension
transformer.set_mean('data', np.array([104, 117, 123])) # subtract the dataset-mean value in each channel
else:
pass # non RGB data need not use transformer
self._transformer = transformer
self._sample_shape = self._net.blobs['data'].data.shape
def predict_single_frame(self, frame, score_name, over_sample=True, multiscale=None, frame_size=None):
if frame_size is not None:
frame = [cv2.resize(x, frame_size) for x in frame]
if over_sample:
if multiscale is None:
os_frame = oversample(frame, (self._sample_shape[2], self._sample_shape[3]))
else:
os_frame = []
for scale in multiscale:
resized_frame = [cv2.resize(x, (0,0), fx=1.0/scale, fy=1.0/scale) for x in frame]
os_frame.extend(oversample(resized_frame, (self._sample_shape[2], self._sample_shape[3])))
else:
os_frame = np.array(frame)
data = np.array([self._transformer.preprocess('data', x) for x in os_frame])
self._net.blobs['data'].reshape(*data.shape)
self._net.reshape()
out = self._net.forward(blobs=[score_name,], data=data)
return out[score_name].copy()
def predict_single_flow_stack(self, frame, score_name, over_sample=True):
if over_sample:
os_frame = flow_stack_oversample(frame, (self._sample_shape[2], self._sample_shape[3]))
else:
os_frame = np.array([frame,])
data = os_frame - 128
self._net.blobs['data'].reshape(*data.shape)
self._net.reshape()
out = self._net.forward(blobs=[score_name,], data=data)
return out[score_name].copy()
| 34.32
| 118
| 0.634421
|
b2e21a56382ee21307579eadd5fe8bcd77723a99
| 350
|
py
|
Python
|
assignment3/myapp/migrations/0012_alter_winsmodel_options.py
|
spencerleff/Spence-Tac-Toe
|
f85c29d37b4441055a1c93e729dffab0499a7626
|
[
"MIT"
] | null | null | null |
assignment3/myapp/migrations/0012_alter_winsmodel_options.py
|
spencerleff/Spence-Tac-Toe
|
f85c29d37b4441055a1c93e729dffab0499a7626
|
[
"MIT"
] | null | null | null |
assignment3/myapp/migrations/0012_alter_winsmodel_options.py
|
spencerleff/Spence-Tac-Toe
|
f85c29d37b4441055a1c93e729dffab0499a7626
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0 on 2021-12-09 01:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0011_alter_winsmodel_options'),
]
operations = [
migrations.AlterModelOptions(
name='winsmodel',
options={'ordering': ['-wins']},
),
]
| 19.444444
| 50
| 0.594286
|
f92bda7131e208a4f706610c79d6080eb6f7d565
| 543
|
py
|
Python
|
manage.py
|
ChenSunMac/EZonlineEdu
|
bae3dc82a357e7bc0e60dab1f2f1105343aa752e
|
[
"MIT"
] | null | null | null |
manage.py
|
ChenSunMac/EZonlineEdu
|
bae3dc82a357e7bc0e60dab1f2f1105343aa752e
|
[
"MIT"
] | null | null | null |
manage.py
|
ChenSunMac/EZonlineEdu
|
bae3dc82a357e7bc0e60dab1f2f1105343aa752e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "EZonlineEdu.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.9375
| 75
| 0.688766
|
ea3735ed21cda47b20366ab9a2887e4217d15dc4
| 516
|
py
|
Python
|
osf/migrations/0191_abstractnode_external_registered_date.py
|
gaybro8777/osf.io
|
30408511510a40bc393565817b343ef5fd76ab14
|
[
"Apache-2.0"
] | 628
|
2015-01-15T04:33:22.000Z
|
2022-03-30T06:40:10.000Z
|
osf/migrations/0191_abstractnode_external_registered_date.py
|
gaybro8777/osf.io
|
30408511510a40bc393565817b343ef5fd76ab14
|
[
"Apache-2.0"
] | 4,712
|
2015-01-02T01:41:53.000Z
|
2022-03-30T14:18:40.000Z
|
osf/migrations/0191_abstractnode_external_registered_date.py
|
Johnetordoff/osf.io
|
de10bf249c46cede04c78f7e6f7e352c69e6e6b5
|
[
"Apache-2.0"
] | 371
|
2015-01-12T16:14:08.000Z
|
2022-03-31T18:58:29.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-10-21 18:50
from __future__ import unicode_literals
from django.db import migrations
import osf.utils.fields
class Migration(migrations.Migration):
dependencies = [
('osf', '0189_deleted_field_data'),
]
operations = [
migrations.AddField(
model_name='abstractnode',
name='external_registered_date',
field=osf.utils.fields.NonNaiveDateTimeField(blank=True, null=True),
),
]
| 23.454545
| 80
| 0.651163
|
7c4466dbfc48531f1e2bf47d8d750fd61050323d
| 783
|
py
|
Python
|
semantic_seg/tools/imports.py
|
Megvii-BaseDetection/DisAlign
|
a2fc3500a108cb83e3942293a5675c97ab3a2c6e
|
[
"Apache-2.0"
] | 91
|
2021-03-29T08:58:00.000Z
|
2022-03-30T02:42:29.000Z
|
semantic_seg/tools/imports.py
|
Megvii-BaseDetection/DisAlign
|
a2fc3500a108cb83e3942293a5675c97ab3a2c6e
|
[
"Apache-2.0"
] | 22
|
2021-04-07T02:40:52.000Z
|
2022-03-03T07:53:21.000Z
|
semantic_seg/tools/imports.py
|
Megvii-BaseDetection/DisAlign
|
a2fc3500a108cb83e3942293a5675c97ab3a2c6e
|
[
"Apache-2.0"
] | 8
|
2021-08-02T03:43:32.000Z
|
2022-02-24T09:04:46.000Z
|
# Borrowed from cvpods: https://github.com/Megvii-BaseDetection/cvpods/blob/master/cvpods/utils/imports.py
import importlib
import os.path as osp
def dynamic_import(config_path):
"""
Dynamic import a project.
Args:
config_name (str): module name
config_path (str): the dir that contains the .py with this module.
Examples::
>>> root = "/path/to/your/retinanet/"
>>> project = root + "retinanet.res50.fpn.coco.800size.1x.mrcnn_sigmoid"
>>> cfg = dynamic_import("config", project).config
>>> net = dynamic_import("net", project)
"""
spec = importlib.util.spec_from_file_location("", osp.join(config_path))
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
| 35.590909
| 106
| 0.678161
|
669bbd49b5ec36c8450e8d23d12d8926e0f08d2c
| 268
|
py
|
Python
|
EJERCICIOS/Ejercicio_4.py
|
DiegoC386/Taller-de-Estrucuras-de-Control-Repeticion
|
874fb29c5a50398290db631b6ad307c9ec228b1e
|
[
"MIT"
] | null | null | null |
EJERCICIOS/Ejercicio_4.py
|
DiegoC386/Taller-de-Estrucuras-de-Control-Repeticion
|
874fb29c5a50398290db631b6ad307c9ec228b1e
|
[
"MIT"
] | null | null | null |
EJERCICIOS/Ejercicio_4.py
|
DiegoC386/Taller-de-Estrucuras-de-Control-Repeticion
|
874fb29c5a50398290db631b6ad307c9ec228b1e
|
[
"MIT"
] | null | null | null |
"""
Calcular el término doceavo y la suma de los doce primeros
términos de la sucesión: 6, 11, 16, 21.
Respuesta: a12=61, suma=402.
"""
a1=int(input("Ingrese primer termino: "))
a12=int(input("Ingrese ultimo termino: "))
NumTer=12
Suma=((a1+a12)*NumTer)/2
print(Suma)
| 24.363636
| 58
| 0.708955
|
8e010105b490b29a6cf81ceaedaad83237dafa3e
| 828
|
py
|
Python
|
xonotic_radio/util.py
|
z/xonotic-radio-service
|
90ca68acbc12739bb634c4d66a2862326c7195d8
|
[
"MIT"
] | 1
|
2021-02-17T20:20:28.000Z
|
2021-02-17T20:20:28.000Z
|
xonotic_radio/util.py
|
z/xonotic-radio-service
|
90ca68acbc12739bb634c4d66a2862326c7195d8
|
[
"MIT"
] | null | null | null |
xonotic_radio/util.py
|
z/xonotic-radio-service
|
90ca68acbc12739bb634c4d66a2862326c7195d8
|
[
"MIT"
] | 1
|
2016-05-05T13:12:30.000Z
|
2016-05-05T13:12:30.000Z
|
import configparser
import time
import sys
import os
def reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed. " %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
def read_config(config_file):
if not os.path.isfile(config_file):
raise SystemExit(config_file + ' not found, please create one.')
config = configparser.ConfigParser()
config.read(config_file)
return config['default'], config['endpoints']
| 25.090909
| 78
| 0.649758
|
130acbc4b3963e9a1c661c33e08469a5f9448b7c
| 2,393
|
py
|
Python
|
parse.py
|
jbbrokaw/table-of-authorities
|
758d4808403c88c909c24ff308c24be305242ebd
|
[
"MIT"
] | null | null | null |
parse.py
|
jbbrokaw/table-of-authorities
|
758d4808403c88c909c24ff308c24be305242ebd
|
[
"MIT"
] | null | null | null |
parse.py
|
jbbrokaw/table-of-authorities
|
758d4808403c88c909c24ff308c24be305242ebd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import unicode_literals
import re
ALLOWED_SYMBOLS = {'&'}
ABBREVIATIONS = {'Co.',
'U.S.',
'Corp.',
'Inc.',
'Dist.'}
LOWERCASE_WORDS = {'the', 'a', 'an', 'and', 'of'}
CLAUSE_ENDERS = {'.', ';'}
def title_word(word):
if word[0].isupper() and (word[-1] not in CLAUSE_ENDERS):
return True
if word in ALLOWED_SYMBOLS:
return True
if word in LOWERCASE_WORDS:
return True
if word in ABBREVIATIONS:
return True
return False
def find_v_cites(text):
words = text.split()
date_re = re.compile('\d{4}\)')
while 'v.' in words:
v_index = words.index('v.')
begin_index = v_index - 1
while (begin_index >= 0) and title_word(words[begin_index]):
begin_index -= 1
begin_index = min(v_index - 1, begin_index + 1)
end_index = v_index + 1
while (end_index < len(words)) and \
(date_re.search(words[end_index]) is None):
end_index += 1
cite_string = " ".join(words[begin_index:end_index + 1])
if date_re.search(cite_string):
print cite_string
words = words[v_index + 1:]
def find_in_re_cites(text):
words = text.split()
date_re = re.compile('\d{4}\)')
while 're' in words:
re_index = words.index('re')
if (re_index < 1) or (words[re_index - 1] != 'In'):
words = words[re_index + 1:]
continue
end_index = re_index + 1
while (end_index < len(words)) and \
(date_re.search(words[end_index]) is None):
end_index += 1
cite_string = " ".join(words[re_index - 1:end_index + 1])
if date_re.search(cite_string):
print cite_string
words = words[re_index + 1:]
def main():
from sys import argv
from docx import Document
if len(argv) < 2:
print "Usage: python parse.py [file.docx]"
return
filename = argv[1]
file_name_parts = filename.split('.')
if (len(file_name_parts) < 2) or (file_name_parts[1] != 'docx'):
print "Only .docx files supported currently"
doc = Document(filename)
for paragraph in doc.paragraphs:
find_v_cites(paragraph.text)
find_in_re_cites(paragraph.text)
if __name__ == '__main__':
main()
| 27.825581
| 68
| 0.568742
|
f02e332b3488e9c5bfee9e240621b56afa30e873
| 6,241
|
py
|
Python
|
msvd/train_model.py
|
WingsBrokenAngel/general-professional-learning-model
|
c4b892033b814b99c36f1f33b36df787f715ff14
|
[
"MIT"
] | 39
|
2020-01-03T09:46:53.000Z
|
2022-01-26T14:00:31.000Z
|
msvd/train_model.py
|
WingsBrokenAngel/general-professional-learning-model
|
c4b892033b814b99c36f1f33b36df787f715ff14
|
[
"MIT"
] | 7
|
2020-02-21T09:21:56.000Z
|
2020-10-13T05:59:15.000Z
|
msvd/train_model.py
|
WingsBrokenAngel/general-professional-learning-model
|
c4b892033b814b99c36f1f33b36df787f715ff14
|
[
"MIT"
] | 13
|
2020-01-21T07:54:17.000Z
|
2021-11-27T10:02:34.000Z
|
# -*- coding: utf-8 -*-
# Author: Haoran Chen
# Date: 2019-09-17
import tensorflow as tf
import pickle
import numpy as np
import sys
from pprint import pprint
from collections import defaultdict
import time
sys.path.append('..')
from utils import *
np.random.seed(42)
data_dict = None
model = None
options = None
METRICS = {'Bleu_4': 0., 'CIDEr': 0.,
'METEOR': 0., 'ROUGE_L': 0.}
# METRICS = {'ROUGE_L': 0.}
MAX = {key: 0. for key in METRICS}
min_xe = 1000.
def cal_metrics(sess, phase):
sent_dict, sent_list = defaultdict(list), []
loss_list = []
logits_dict = {'xe': [], 'all': []}
if phase == "train":
ref = data_dict["ref"][0]
idx2cap = {idx: elem for idx, elem in enumerate(ref)}
idx_start, idx_end = 0, 1200
elif phase == "val":
ref = data_dict['ref'][1]
idx2cap = {idx+1200: elem for idx, elem in enumerate(ref)}
idx_start, idx_end = 1200, 1300
elif phase == "test":
ref = data_dict['ref'][2]
idx2cap = {idx+1300: elem for idx, elem in enumerate(ref)}
idx_start, idx_end = 1300, 1970
else:
raise ValueError("The phase should be val or test")
tag_feat = data_dict['tag_feat']
eco_res_feat = data_dict['eco_res_feat']
idx2gts = data_dict['idx2gts']
for idx in range(idx_start, idx_end):
tag, ervid = tag_feat[idx], eco_res_feat[idx]
tag, ervid = np.expand_dims(tag, 0), np.expand_dims(ervid, 0)
gts = idx2gts[idx]
maxlen = max([len(gt) for gt in gts])
gts_mat = np.zeros((maxlen, len(gts)), dtype=np.int32)
for idx2, gt in enumerate(gts):
gts_mat[:len(gt), idx2] = gt
# print('tag shape:', tag.shape, 'evid:', evid.shape, 'rvid:', rvid.shape)
wanted_ops = {
'generated_words': model.generated_words, 'test_loss': model.test_loss,
'xe_logits': model.xe_logits, 'all_logits': model.all_logits}
feed_dict = {
model.word_idx: gts_mat, model.vid_inputs: ervid, model.se_inputs: tag}
# sel_word_idx shape: (batch_size, beam_width, n_steps)
res = sess.run(wanted_ops, feed_dict)
generated_words = res['generated_words']
loss_list.append(res['test_loss'])
logits_dict['xe'].append(res['xe_logits'])
logits_dict['all'].append(res['all_logits'])
for x in np.squeeze(generated_words):
if x == 0:
break
sent_dict[idx].append(data_dict['idx2word'][x])
sent_dict[idx] = [' '.join(sent_dict[idx])]
sent_list.append(sent_dict[idx][0])
scores = score(idx2cap, sent_dict)
print(phase)
pprint(scores)
mean_loss = np.mean(loss_list)
print('average loss:', mean_loss, flush=True)
with open(flags.name+'_%s_output.log'%phase, 'w') as fo:
for sent in sent_list:
fo.write(sent+'\n')
with open(flags.name+'_%s_logits.pkl'%phase, 'wb') as fo:
pickle.dump([logits_dict['xe'], logits_dict['all']], fo, -1)
return scores, mean_loss
def main():
global data_dict, model, options
data_dict = get_data(flags)
options = get_options(data_dict)
model = get_model(options)
# model = get_gru(options)
best_score, save_path = 0., None
with model.graph.as_default():
global_step = tf.train.get_or_create_global_step()
train_op = get_train_op(model, options, global_step)
saver = tf.train.Saver()
config = get_config()
sess = tf.Session(config=config, graph=model.graph)
if flags.test is None:
sess.run(tf.global_variables_initializer())
train_idx1 = np.arange(options.train_size, dtype=np.int32)
train_idx2 = np.arange(options.train_size2, dtype=np.int32)
for idx in range(options.epoch):
start_time = time.perf_counter()
train_loss = []
if idx < options.threshold:
np.random.shuffle(train_idx1)
train_part1(train_idx1, train_op, train_loss,
sess, options, data_dict, model)
else:
np.random.shuffle(train_idx2)
train_part2(train_idx2, train_op, train_loss, sess,
idx, options, data_dict, model)
mean_train_loss = np.mean(train_loss)
print('epoch %d: loss %f.' % (idx, mean_train_loss))
scores, mean_val_loss = cal_metrics(sess, 'val')
# update maximum metrics values
global METRICS, MAX, min_xe
METRICS = {key: max(METRICS[key], scores[key]) for key in METRICS}
overall_score1 = np.mean([scores[key] / METRICS[key] for key in METRICS])
overall_score2 = np.mean([MAX[key] / METRICS[key] for key in METRICS])
if overall_score1 > overall_score2:
MAX = scores
save_path = saver.save(sess, './saves/%s-best.ckpt'%flags.name)
print('Epoch %d: the best model has been saved as %s.'
% (idx, save_path), flush=True)
end_time = time.perf_counter()
print('%d epoch: %.2fs.' % (idx, end_time - start_time))
saver.restore(sess, save_path)
cal_metrics(sess, "train")
cal_metrics(sess, 'test')
else:
saver.restore(sess, flags.test)
cal_metrics(sess, 'train')
cal_metrics(sess, 'val')
cal_metrics(sess, 'test')
sess.close()
if __name__ == "__main__":
tf.app.flags.DEFINE_string('name', '1', 'name of model')
tf.app.flags.DEFINE_string('corpus', None, 'Path to corpus file')
tf.app.flags.DEFINE_string('ecores', None, 'Path to ECO-RES feature files')
tf.app.flags.DEFINE_string('tag', None, 'Path to Tag feature files')
tf.app.flags.DEFINE_string('ref', None, 'Path to reference files')
tf.app.flags.DEFINE_string('test', None, 'Path to the saved parameters')
flags = tf.app.flags.FLAGS
start_time = time.perf_counter()
main()
end_time = time.perf_counter()
print('Total time: %.2fs' % (end_time - start_time))
| 39.251572
| 89
| 0.591412
|
457e20cb169349326f042d6e1bc0cb7e823985d2
| 496
|
py
|
Python
|
qupang/images/migrations/0004_auto_20190222_0010.py
|
kibinlee/qupang
|
3d9529c5079e0fd1b2c02dd5b237d2e784065ee3
|
[
"MIT"
] | null | null | null |
qupang/images/migrations/0004_auto_20190222_0010.py
|
kibinlee/qupang
|
3d9529c5079e0fd1b2c02dd5b237d2e784065ee3
|
[
"MIT"
] | null | null | null |
qupang/images/migrations/0004_auto_20190222_0010.py
|
kibinlee/qupang
|
3d9529c5079e0fd1b2c02dd5b237d2e784065ee3
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.13 on 2019-02-21 15:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('images', '0003_auto_20190221_2330'),
]
operations = [
migrations.RenameField(
model_name='comment',
old_name='Image',
new_name='image',
),
migrations.RenameField(
model_name='like',
old_name='Image',
new_name='image',
),
]
| 20.666667
| 48
| 0.548387
|
f585af08941d1d1545220bda4887eda90f9f1745
| 4,523
|
py
|
Python
|
libs/yolo_io.py
|
yangjjie94/labelSeries
|
620372b7d21e410efb009500fdd6cdc668e92106
|
[
"MIT"
] | 11
|
2018-10-17T08:57:27.000Z
|
2020-08-07T02:43:31.000Z
|
libs/yolo_io.py
|
yangjjie94/labelSeries
|
620372b7d21e410efb009500fdd6cdc668e92106
|
[
"MIT"
] | null | null | null |
libs/yolo_io.py
|
yangjjie94/labelSeries
|
620372b7d21e410efb009500fdd6cdc668e92106
|
[
"MIT"
] | 7
|
2018-08-13T01:51:02.000Z
|
2019-11-27T13:36:53.000Z
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
import os
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
from lxml import etree
import codecs
TXT_EXT = '.txt'
ENCODE_METHOD = 'utf-8'
class YOLOWriter:
def __init__(self, foldername, filename, imgSize, databaseSrc='Unknown', localImgPath=None):
self.foldername = foldername
self.filename = filename
self.databaseSrc = databaseSrc
self.imgSize = imgSize
self.boxlist = []
self.localImgPath = localImgPath
self.verified = False
def addBndBox(self, xmin, ymin, xmax, ymax, name, difficult):
bndbox = {'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax}
bndbox['name'] = name
bndbox['difficult'] = difficult
self.boxlist.append(bndbox)
def BndBox2YoloLine(self, box, classList=[]):
xmin = box['xmin']
xmax = box['xmax']
ymin = box['ymin']
ymax = box['ymax']
xcen = (xmin + xmax) / 2 / self.imgSize[1]
ycen = (ymin + ymax) / 2 / self.imgSize[0]
w = (xmax - xmin) / self.imgSize[1]
h = (ymax - ymin) / self.imgSize[0]
classIndex = classList.index(box['name'])
return classIndex, xcen, ycen, w, h
def save(self, classList=[], targetFile=None):
out_file = None #Update yolo .txt
out_class_file = None #Update class list .txt
if targetFile is None:
out_file = open(
self.filename + TXT_EXT, 'w', encoding=ENCODE_METHOD)
classesFile = os.path.join(os.path.dirname(os.path.abspath(self.filename)), "classes.txt")
out_class_file = open(classesFile, 'w')
else:
out_file = codecs.open(targetFile, 'w', encoding=ENCODE_METHOD)
classesFile = os.path.join(os.path.dirname(os.path.abspath(targetFile)), "classes.txt")
out_class_file = open(classesFile, 'w')
for box in self.boxlist:
classIndex, xcen, ycen, w, h = self.BndBox2YoloLine(box, classList)
print (classIndex, xcen, ycen, w, h)
out_file.write("%d %.6f %.6f %.6f %.6f\n" % (classIndex, xcen, ycen, w, h))
print (classList)
print (out_class_file)
for c in classList:
out_class_file.write(c+'\n')
out_class_file.close()
out_file.close()
class YoloReader:
def __init__(self, filepath, image, classListPath=None):
# shapes type:
# [labbel, [(x1,y1), (x2,y2), (x3,y3), (x4,y4)], color, color, difficult]
self.shapes = []
self.filepath = filepath
if classListPath is None:
dir_path = os.path.dirname(os.path.realpath(self.filepath))
self.classListPath = os.path.join(dir_path, "classes.txt")
else:
self.classListPath = classListPath
print (filepath, self.classListPath)
classesFile = open(self.classListPath, 'r')
self.classes = classesFile.read().strip('\n').split('\n')
print (self.classes)
imgSize = [image.height(), image.width(),
1 if image.isGrayscale() else 3]
self.imgSize = imgSize
self.verified = False
# try:
self.parseYoloFormat()
# except:
# pass
def getShapes(self):
return self.shapes
def addShape(self, label, xmin, ymin, xmax, ymax, difficult):
points = [(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]
self.shapes.append((label, points, None, None, difficult))
def yoloLine2Shape(self, classIndex, xcen, ycen, w, h):
label = self.classes[int(classIndex)]
xmin = max(float(xcen) - float(w) / 2, 0)
xmax = min(float(xcen) + float(w) / 2, 1)
ymin = max(float(ycen) - float(h) / 2, 0)
ymax = min(float(ycen) + float(h) / 2, 1)
xmin = int(self.imgSize[1] * xmin)
xmax = int(self.imgSize[1] * xmax)
ymin = int(self.imgSize[0] * ymin)
ymax = int(self.imgSize[0] * ymax)
return label, xmin, ymin, xmax, ymax
def parseYoloFormat(self):
bndBoxFile = open(self.filepath, 'r')
for bndBox in bndBoxFile:
classIndex, xcen, ycen, w, h = bndBox.split(' ')
label, xmin, ymin, xmax, ymax = self.yoloLine2Shape(classIndex, xcen, ycen, w, h)
# Caveat: difficult flag is discarded when saved as yolo format.
self.addShape(label, xmin, ymin, xmax, ymax, False)
| 32.078014
| 102
| 0.586115
|
cf295c7634d1e5d3f5fcad4c1172d59b932de150
| 2,136
|
py
|
Python
|
src/sco/code_gen/gen_operations_code.py
|
HARPLab/trajopt
|
40e2260d8f1e4d0a6a7a8997927bd65e5f36c3a4
|
[
"BSD-2-Clause"
] | 250
|
2015-01-13T04:38:59.000Z
|
2022-03-09T15:52:54.000Z
|
src/sco/code_gen/gen_operations_code.py
|
HARPLab/trajopt
|
40e2260d8f1e4d0a6a7a8997927bd65e5f36c3a4
|
[
"BSD-2-Clause"
] | 31
|
2015-08-19T13:14:56.000Z
|
2022-03-22T08:08:26.000Z
|
src/sco/code_gen/gen_operations_code.py
|
HARPLab/trajopt
|
40e2260d8f1e4d0a6a7a8997927bd65e5f36c3a4
|
[
"BSD-2-Clause"
] | 118
|
2015-01-08T16:06:50.000Z
|
2022-03-19T11:44:00.000Z
|
addition_overloads = """
inline AffExpr operator+(const Var& x, double y) {
return exprAdd(AffExpr(x), y);
}
inline AffExpr operator+(const AffExpr& x, double y) {
return exprAdd(x, y);
}
inline QuadExpr operator+(const QuadExpr& x, double y) {
return exprAdd(x, y);
}
inline AffExpr operator+(const Var& x, const Var& y) {
return exprAdd(AffExpr(x), y);
}
inline AffExpr operator+(const AffExpr& x, const Var& y) {
return exprAdd(x, y);
}
inline QuadExpr operator+(const QuadExpr& x, const Var& y) {
return exprAdd(x, y);
}
inline AffExpr operator+(const Var& x, const AffExpr& y) {
return exprAdd(AffExpr(x), y);
}
inline AffExpr operator+(const AffExpr& x, const AffExpr& y) {
return exprAdd(x, y);
}
inline QuadExpr operator+(const QuadExpr& x, const AffExpr& y) {
return exprAdd(x, y);
}
inline QuadExpr operator+(const Var& x, const QuadExpr& y) {
return exprAdd(QuadExpr(x), y);
}
inline QuadExpr operator+(const AffExpr& x, const QuadExpr& y) {
return exprAdd(QuadExpr(x), y);
}
inline QuadExpr operator+(const QuadExpr& x, const QuadExpr& y) {
return exprAdd(x, y);
}
"""
subtraction_overloads = addition_overloads.replace("operator+", "operator-").replace("exprAdd","exprSub")
def print_overloads():
print addition_overloads
print subtraction_overloads
addition_funcs = """
inline AffExpr exprAdd(AffExpr a, double b) {
exprInc(a, b);
return a;
}
inline AffExpr exprAdd(AffExpr a, const Var& b) {
exprInc(a, b);
return a;
}
inline AffExpr exprAdd(AffExpr a, const AffExpr& b) {
exprInc(a, b);
return a;
}
inline QuadExpr exprAdd(QuadExpr a, double b) {
exprInc(a, b);
return a;
}
inline QuadExpr exprAdd(QuadExpr a, const Var& b) {
exprInc(a, b);
return a;
}
inline QuadExpr exprAdd(QuadExpr a, const AffExpr& b) {
exprInc(a, b);
return a;
}
inline QuadExpr exprAdd(QuadExpr a, const QuadExpr& b) {
exprInc(a, b);
return a;
}
"""
subtraction_funcs = addition_funcs.replace("Add", "Sub").replace("Inc","Dec")
def print_funcs():
print addition_funcs
print subtraction_funcs
print_overloads()
print "///////////////"
print_funcs()
| 22.967742
| 105
| 0.683989
|
1b7acf468bb0b56d3a499d5c9591f90799cfd99a
| 9,077
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_dvmdb_folder.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_dvmdb_folder.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_dvmdb_folder.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_dvmdb_folder
short_description: no description
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
dvmdb_folder:
description: the top level parameters set
required: false
type: dict
suboptions:
desc:
type: str
description: 'Desc.'
name:
type: str
description: 'Name.'
parent:
type: int
description: 'Parent.'
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: no description
fmgr_dvmdb_folder:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
state: <value in [present, absent]>
dvmdb_folder:
desc: <value of string>
name: <value of string>
parent: <value of integer>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/dvmdb/folder',
'/dvmdb/adom/{adom}/folder'
]
perobject_jrpc_urls = [
'/dvmdb/folder/{folder}',
'/dvmdb/adom/{adom}/folder/{folder}'
]
url_params = ['adom']
module_primary_key = 'name'
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'forticloud_access_token': {
'type': 'str',
'required': False,
'no_log': True
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'dvmdb_folder': {
'required': False,
'type': 'dict',
'revision': {
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'options': {
'desc': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'name': {
'required': True,
'revision': {
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'parent': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'dvmdb_folder'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
connection.set_option('forticloud_access_token',
module.params['forticloud_access_token'] if 'forticloud_access_token' in module.params else None)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| 31.3
| 153
| 0.565936
|
6b85754033a054d088d7c35f4c4f0fa91ee23d2e
| 13,195
|
py
|
Python
|
v2.5.7/toontown/estate/BankGUI.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 4
|
2019-07-01T15:46:43.000Z
|
2021-07-23T16:26:48.000Z
|
v2.5.7/toontown/estate/BankGUI.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 1
|
2019-06-29T03:40:05.000Z
|
2021-06-13T01:15:16.000Z
|
v2.5.7/toontown/estate/BankGUI.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 4
|
2019-07-28T21:18:46.000Z
|
2021-02-25T06:37:25.000Z
|
from direct.gui.DirectGui import *
from panda3d.core import *
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from direct.task.Task import Task
class BankGui(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('BankGui')
def __init__(self, doneEvent, allowWithdraw=1):
DirectFrame.__init__(self, relief=None, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=(1.33,
1,
1.1), pos=(0,
0,
0))
self.initialiseoptions(BankGui)
self.doneEvent = doneEvent
self.__transactionAmount = 0
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
jarGui = loader.loadModel('phase_3.5/models/gui/jar_gui')
arrowGui = loader.loadModel('phase_3/models/gui/create_a_toon_gui')
bankModel = loader.loadModel('phase_5.5/models/estate/jellybeanBank')
bankModel.setDepthWrite(1)
bankModel.setDepthTest(1)
bankModel.find('**/jellybeans').setDepthWrite(0)
bankModel.find('**/jellybeans').setDepthTest(0)
okImageList = (buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr'))
cancelImageList = (buttons.find('**/CloseBtn_UP'), buttons.find('**/CloseBtn_DN'), buttons.find('**/CloseBtn_Rllvr'))
arrowImageList = (arrowGui.find('**/CrtATn_R_Arrow_UP'),
arrowGui.find('**/CrtATn_R_Arrow_DN'),
arrowGui.find('**/CrtATn_R_Arrow_RLVR'),
arrowGui.find('**/CrtATn_R_Arrow_UP'))
self.cancelButton = DirectButton(parent=self, relief=None, image=cancelImageList, pos=(-0.2,
0,
-0.4), text=TTLocalizer.BankGuiCancel, text_scale=0.06, text_pos=(0,
-0.1), command=self.__cancel)
self.okButton = DirectButton(parent=self, relief=None, image=okImageList, pos=(0.2,
0,
-0.4), text=TTLocalizer.BankGuiOk, text_scale=0.06, text_pos=(0,
-0.1), command=self.__requestTransaction)
self.jarDisplay = DirectLabel(parent=self, relief=None, pos=(-0.4, 0, 0), scale=0.7, text=str(base.localAvatar.getMoney()), text_scale=0.2, text_fg=(0.95,
0.95,
0,
1), text_shadow=(0,
0,
0,
1), text_pos=(0,
-0.1,
0), image=jarGui.find('**/Jar'), text_font=ToontownGlobals.getSignFont())
self.bankDisplay = DirectLabel(parent=self, relief=None, pos=(0.4, 0, 0), scale=0.9, text=str(base.localAvatar.getBankMoney()), text_scale=0.2, text_fg=(0.95,
0.95,
0,
1), text_shadow=(0,
0,
0,
1), text_pos=(0,
-0.1,
0), geom=bankModel, geom_scale=0.08, geom_pos=(0,
10,
-0.26), geom_hpr=(0,
0,
0), text_font=ToontownGlobals.getSignFont())
self.depositArrow = DirectButton(parent=self, relief=None, image=arrowImageList, image_scale=(1,
1,
1), image3_color=Vec4(0.6, 0.6, 0.6, 0.25), pos=(0.01,
0,
0.15))
self.withdrawArrow = DirectButton(parent=self, relief=None, image=arrowImageList, image_scale=(-1,
1,
1), image3_color=Vec4(0.6, 0.6, 0.6, 0.25), pos=(-0.01,
0,
-0.15))
self.depositArrow.bind(DGG.B1PRESS, self.__depositButtonDown)
self.depositArrow.bind(DGG.B1RELEASE, self.__depositButtonUp)
self.withdrawArrow.bind(DGG.B1PRESS, self.__withdrawButtonDown)
self.withdrawArrow.bind(DGG.B1RELEASE, self.__withdrawButtonUp)
self.accept('bankAsleep', self.__cancel)
self.accept(localAvatar.uniqueName('moneyChange'), self.__moneyChange)
self.accept(localAvatar.uniqueName('bankMoneyChange'), self.__bankMoneyChange)
if allowWithdraw:
self.depositArrow.setPos(0.01, 0, 0.15)
self.withdrawArrow.setPos(-0.01, 0, -0.15)
else:
self.depositArrow.setPos(0, 0, 0)
self.withdrawArrow.hide()
buttons.removeNode()
jarGui.removeNode()
arrowGui.removeNode()
self.__updateTransaction(0)
return
def destroy(self):
taskMgr.remove(self.taskName('runCounter'))
self.ignore(localAvatar.uniqueName('moneyChange'))
self.ignore(localAvatar.uniqueName('bankMoneyChange'))
DirectFrame.destroy(self)
def __cancel(self):
messenger.send(self.doneEvent, [0])
def __requestTransaction(self):
messenger.send(self.doneEvent, [self.__transactionAmount])
def __updateTransaction(self, amount):
hitLimit = 0
self.__transactionAmount += amount
jarMoney = base.localAvatar.getMoney()
maxJarMoney = base.localAvatar.getMaxMoney()
bankMoney = base.localAvatar.getBankMoney()
maxBankMoney = base.localAvatar.getMaxBankMoney()
self.__transactionAmount = min(self.__transactionAmount, jarMoney)
self.__transactionAmount = min(self.__transactionAmount, maxBankMoney - bankMoney)
self.__transactionAmount = -min(-self.__transactionAmount, maxJarMoney - jarMoney)
self.__transactionAmount = -min(-self.__transactionAmount, bankMoney)
newJarMoney = jarMoney - self.__transactionAmount
newBankMoney = bankMoney + self.__transactionAmount
if newJarMoney <= 0 or newBankMoney >= maxBankMoney:
self.depositArrow['state'] = DGG.DISABLED
hitLimit = 1
else:
self.depositArrow['state'] = DGG.NORMAL
if newBankMoney <= 0 or newJarMoney >= maxJarMoney:
self.withdrawArrow['state'] = DGG.DISABLED
hitLimit = 1
else:
self.withdrawArrow['state'] = DGG.NORMAL
self.jarDisplay['text'] = str(newJarMoney)
self.bankDisplay['text'] = str(newBankMoney)
return (
hitLimit,
newJarMoney,
newBankMoney,
self.__transactionAmount)
def __runCounter(self, task):
if task.time - task.prevTime < task.delayTime:
return Task.cont
task.delayTime = max(0.05, task.delayTime * 0.75)
task.prevTime = task.time
hitLimit, jar, bank, trans = self.__updateTransaction(task.delta)
if hitLimit:
return Task.done
return Task.cont
def __depositButtonUp(self, event):
messenger.send('wakeup')
taskMgr.remove(self.taskName('runCounter'))
def __depositButtonDown(self, event):
messenger.send('wakeup')
task = Task(self.__runCounter)
task.delayTime = 0.4
task.prevTime = 0.0
task.delta = 1
hitLimit, jar, bank, trans = self.__updateTransaction(task.delta)
if not hitLimit:
taskMgr.add(task, self.taskName('runCounter'))
def __withdrawButtonUp(self, event):
messenger.send('wakeup')
taskMgr.remove(self.taskName('runCounter'))
def __withdrawButtonDown(self, event):
messenger.send('wakeup')
task = Task(self.__runCounter)
task.delayTime = 0.4
task.prevTime = 0.0
task.delta = -1
hitLimit, jar, bank, trans = self.__updateTransaction(task.delta)
if not hitLimit:
taskMgr.add(task, self.taskName('runCounter'))
def __moneyChange(self, money):
self.__updateTransaction(0)
def __bankMoneyChange(self, bankMoney):
self.__updateTransaction(0)
| 74.129213
| 301
| 0.363698
|
f21502b44f2b67fb195d72cdb7238766c635f7e7
| 793
|
py
|
Python
|
kentik_synth_client/synth_tests/ip.py
|
kentik/synth_tools
|
6551e9caf049e4592da4c28e23341d99fac08d58
|
[
"Apache-2.0"
] | 2
|
2021-10-20T01:01:21.000Z
|
2022-02-21T22:02:26.000Z
|
kentik_synth_client/synth_tests/ip.py
|
kentik/synth_tools
|
6551e9caf049e4592da4c28e23341d99fac08d58
|
[
"Apache-2.0"
] | null | null | null |
kentik_synth_client/synth_tests/ip.py
|
kentik/synth_tools
|
6551e9caf049e4592da4c28e23341d99fac08d58
|
[
"Apache-2.0"
] | 1
|
2021-11-02T01:46:41.000Z
|
2021-11-02T01:46:41.000Z
|
from dataclasses import dataclass, field
from typing import List, Type, TypeVar
from kentik_synth_client.types import *
from .base import PingTraceTest, PingTraceTestSettings, sort_ip_address_list
@dataclass
class IPTestSettings(PingTraceTestSettings):
ip: dict = field(default_factory=dict)
IPTestType = TypeVar("IPTestType", bound="IPTest")
@dataclass
class IPTest(PingTraceTest):
type: TestType = field(init=False, default=TestType.ip)
settings: IPTestSettings = field(default_factory=IPTestSettings)
@classmethod
def create(cls: Type[IPTestType], name: str, targets: List[str], agent_ids: List[str]) -> IPTestType:
return cls(
name=name, settings=IPTestSettings(agentIds=agent_ids, ip=dict(targets=sort_ip_address_list(targets)))
)
| 29.37037
| 114
| 0.752837
|
2528468d38953b37d009f0420915c8593a40d39e
| 11,271
|
py
|
Python
|
Bio/SeqIO/XdnaIO.py
|
gtsueng/biopython
|
4b2adc9f52ae1eda123744a8f4af7c2150505de1
|
[
"BSD-3-Clause"
] | 1
|
2020-11-27T15:46:03.000Z
|
2020-11-27T15:46:03.000Z
|
Bio/SeqIO/XdnaIO.py
|
gtsueng/biopython
|
4b2adc9f52ae1eda123744a8f4af7c2150505de1
|
[
"BSD-3-Clause"
] | null | null | null |
Bio/SeqIO/XdnaIO.py
|
gtsueng/biopython
|
4b2adc9f52ae1eda123744a8f4af7c2150505de1
|
[
"BSD-3-Clause"
] | 1
|
2021-01-07T07:55:09.000Z
|
2021-01-07T07:55:09.000Z
|
# Copyright 2017-2019 Damien Goutte-Gattat. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Bio.SeqIO support for the "xdna" file format.
The Xdna binary format is generated by Christian Marck's DNA Strider program
and also used by Serial Cloner.
"""
from re import match
from struct import pack, unpack
import warnings
from Bio import Alphabet, BiopythonWarning
from Bio.Seq import Seq
from Bio.SeqIO.Interfaces import SequenceWriter
from Bio.SeqFeature import SeqFeature, FeatureLocation, ExactPosition
from Bio.SeqRecord import SeqRecord
_seq_types = {
0: Alphabet.generic_alphabet,
1: Alphabet.generic_dna,
2: Alphabet.generic_dna,
3: Alphabet.generic_rna,
4: Alphabet.generic_protein
}
_seq_topologies = {
0: 'linear',
1: 'circular'
}
def _read(handle, length):
"""Read the specified number of bytes from the given handle."""
data = handle.read(length)
if len(data) < length:
raise ValueError("Cannot read %d bytes from handle" % length)
return data
def _read_pstring(handle):
"""Read a Pascal string.
A Pascal string comprises a single byte giving the length of the string
followed by as many bytes.
"""
length = unpack('>B', _read(handle, 1))[0]
return unpack('%ds' % length, _read(handle, length))[0].decode('ASCII')
def _read_pstring_as_integer(handle):
return int(_read_pstring(handle))
def _read_overhang(handle):
"""Read an overhang specification.
An overhang is represented in a XDNA file as:
- a Pascal string containing the text representation of the overhang
length, which also indicates the nature of the overhang:
- a length of zero means no overhang,
- a negative length means a 3' overhang,
- a positive length means a 5' overhang;
- the actual overhang sequence.
Examples:
- 0x01 0x30: no overhang ("0", as a P-string)
- 0x01 0x32 0x41 0x41: 5' AA overhang (P-string "2", then "AA")
- 0x02 0x2D 0x31 0x43: 3' C overhang (P-string "-1", then "C")
Returns a tuple (length, sequence).
"""
length = _read_pstring_as_integer(handle)
if length != 0:
overhang = _read(handle, abs(length))
return (length, overhang)
else:
return (None, None)
def _parse_feature_description(desc, qualifiers):
"""Parse the description field of a Xdna feature.
The 'description' field of a feature sometimes contains several
GenBank-like qualifiers, separated by carriage returns (CR, 0x0D).
"""
# Split the field's value in CR-separated lines, skipping empty lines
for line in [x for x in desc.split('\x0D') if len(x) > 0]:
# Is it a qualifier="value" line?
m = match('^([^=]+)="([^"]+)"?$', line)
if m:
# Store the qualifier as provided
qual, value = m.groups()
qualifiers[qual] = [value]
elif '"' not in line: # Reject ill-formed qualifiers
# Store the entire line as a generic note qualifier
qualifiers['note'] = [line]
def _read_feature(handle, record):
"""Read a single sequence feature."""
name = _read_pstring(handle)
desc = _read_pstring(handle)
type = _read_pstring(handle) or 'misc_feature'
start = _read_pstring_as_integer(handle)
end = _read_pstring_as_integer(handle)
# Feature flags (4 bytes):
# byte 1 is the strand (0: reverse strand, 1: forward strand);
# byte 2 tells whether to display the feature;
# byte 4 tells whether to draw an arrow when displaying the feature;
# meaning of byte 3 is unknown.
(forward, display, arrow) = unpack('>BBxB', _read(handle, 4))
if forward:
strand = 1
else:
strand = -1
start, end = end, start
# The last field is a Pascal string usually containing a
# comma-separated triplet of numbers ranging from 0 to 255.
# I suspect this represents the RGB color to use when displaying
# the feature. Skip it as we have no need for it.
_read_pstring(handle)
# Assemble the feature
# Shift start by -1 as XDNA feature coordinates are 1-based
# while Biopython uses 0-based couting.
location = FeatureLocation(start - 1, end, strand=strand)
qualifiers = {}
if name:
qualifiers['label'] = [name]
_parse_feature_description(desc, qualifiers)
feature = SeqFeature(location, type=type, qualifiers=qualifiers)
record.features.append(feature)
def XdnaIterator(handle):
"""Parse a Xdna file and return a SeqRecord object.
Note that this is an "iterator" in name only since a Xdna file always
contain a single sequence.
"""
# Parse fixed-size header and do some rudimentary checks
#
# The "neg_length" value is the length of the part of the sequence
# before the nucleotide considered as the "origin" (nucleotide number 1,
# which in DNA Strider is not always the first nucleotide).
# Biopython's SeqRecord has no such concept of a sequence origin as far
# as I know, so we ignore that value. SerialCloner has no such concept
# either and always generates files with a neg_length of zero.
header = _read(handle, 112)
(version, type, topology, length, neg_length, com_length) = unpack('>BBB25xII60xI12x', header)
if version != 0:
raise ValueError("Unsupported XDNA version")
if type not in _seq_types:
raise ValueError("Unknown sequence type")
# Read actual sequence and comment found in all XDNA files
sequence = _read(handle, length).decode('ASCII')
comment = _read(handle, com_length).decode('ASCII')
# Try to derive a name from the first "word" of the comment
name = comment.split(' ')[0]
# Create record object
record = SeqRecord(Seq(sequence, _seq_types[type]),
description=comment, name=name, id=name)
if topology in _seq_topologies:
record.annotations['topology'] = _seq_topologies[topology]
if len(handle.read(1)) == 1:
# This is an XDNA file with an optional annotation section.
# Skip the overhangs as I don't know how to represent
# them in the SeqRecord model.
_read_overhang(handle) # right-side overhang
_read_overhang(handle) # left-side overhang
# Read the features
num_features = unpack('>B', _read(handle, 1))[0]
while num_features > 0:
_read_feature(handle, record)
num_features -= 1
yield record
class XdnaWriter(SequenceWriter):
"""Write files in the Xdna format."""
def write_file(self, records):
"""Write the specified record to a Xdna file.
Note that the function expects a list of records as per the
SequenceWriter interface, but the list should contain only one
record as the Xdna format is a mono-record format.
"""
if not records:
raise ValueError("Must have one sequence")
if len(records) > 1:
raise ValueError("More than one sequence found")
record = records[0]
self._has_truncated_strings = False
alptype = Alphabet._get_base_alphabet(record.seq.alphabet)
if isinstance(alptype, Alphabet.DNAAlphabet):
seqtype = 1
elif isinstance(alptype, Alphabet.RNAAlphabet):
seqtype = 3
elif isinstance(alptype, Alphabet.ProteinAlphabet):
seqtype = 4
else:
seqtype = 0
if record.annotations.get('topology', 'linear') == 'circular':
topology = 1
else:
topology = 0
# We store the record's id and description in the comment field.
# Make sure to avoid duplicating the id if it is already
# contained in the description.
if record.description.startswith(record.id):
comment = record.description
else:
comment = '{} {}'.format(record.id, record.description)
# Write header
self.handle.write(pack('>BBB25xII60xI11xB',
0, # version
seqtype, topology, len(record),
0, # negative length
len(comment),
255 # end of header
))
# Actual sequence and comment
self.handle.write(str(record.seq).encode('ASCII'))
self.handle.write(comment.encode('ASCII'))
self.handle.write(pack('>B', 0)) # Annotation section marker
self._write_pstring('0') # right-side overhang
self._write_pstring('0') # left-side overhand
# Write features
# We must skip features with fuzzy locations as they cannot be
# represented in the Xdna format
features = [f for f in record.features if type(f.location.start) == ExactPosition and type(f.location.end) == ExactPosition]
drop = len(record.features) - len(features)
if drop > 0:
warnings.warn("Dropping {} features with fuzzy locations".format(drop),
BiopythonWarning)
# We also cannot store more than 255 features as the number of
# features is stored on a single byte...
if len(features) > 255:
drop = len(features) - 255
warnings.warn("Too many features, dropping the last {}".format(drop),
BiopythonWarning)
features = features[:255]
self.handle.write(pack('>B', len(features)))
for feature in features:
self._write_pstring(feature.qualifiers.get('label', [''])[0])
description = ''
for qname in feature.qualifiers:
if qname in ('label', 'translation'):
continue
for val in feature.qualifiers[qname]:
if len(description) > 0:
description = description + '\x0D'
description = description + '%s="%s"' % (qname, val)
self._write_pstring(description)
self._write_pstring(feature.type)
start = feature.location.start.position + 1 # 1-based coordinates
end = feature.location.end.position
strand = 1
if feature.location.strand == -1:
start, end = end, start
strand = 0
self._write_pstring(str(start))
self._write_pstring(str(end))
self.handle.write(pack('>BBBB', strand, 1, 0, 1))
self._write_pstring('127,127,127')
if self._has_truncated_strings:
warnings.warn("Some annotations were truncated to 255 characters",
BiopythonWarning)
return 1
def _write_pstring(self, s):
"""Write the given string as a Pascal string."""
if len(s) > 255:
self._has_truncated_strings = True
s = s[:255]
self.handle.write(pack('>B', len(s)))
self.handle.write(s.encode('ASCII'))
| 36.009585
| 132
| 0.62541
|
747cae9b64eb3032e71498dd3dad7880cdc010c4
| 760
|
bzl
|
Python
|
third_party/tf_runtime/workspace.bzl
|
zebrajr/tensorflow
|
d1f3ce8b2bc17e7c885266058dd9a4b74dc8e5e5
|
[
"Apache-2.0"
] | null | null | null |
third_party/tf_runtime/workspace.bzl
|
zebrajr/tensorflow
|
d1f3ce8b2bc17e7c885266058dd9a4b74dc8e5e5
|
[
"Apache-2.0"
] | null | null | null |
third_party/tf_runtime/workspace.bzl
|
zebrajr/tensorflow
|
d1f3ce8b2bc17e7c885266058dd9a4b74dc8e5e5
|
[
"Apache-2.0"
] | null | null | null |
"""Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "0f09e1bfa72855b9f00c28dd95a95f848c42170c"
TFRT_SHA256 = "21923a998212b9b1f3b05b4cf00f18c5e5c866b54fc603ffa2758834df9039b7"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| 36.190476
| 126
| 0.680263
|
bb4cd6e251487ed0805a7f28bfb750bc1f2d0f4a
| 1,488
|
py
|
Python
|
back/disorders/migrations/0003_auto_20200721_2328.py
|
EDario333/idia
|
21cab7057f924c58ec098c27effcee1a8f0dc94e
|
[
"BSD-3-Clause"
] | null | null | null |
back/disorders/migrations/0003_auto_20200721_2328.py
|
EDario333/idia
|
21cab7057f924c58ec098c27effcee1a8f0dc94e
|
[
"BSD-3-Clause"
] | 5
|
2021-03-11T05:33:41.000Z
|
2022-02-27T10:21:50.000Z
|
back/disorders/migrations/0003_auto_20200721_2328.py
|
EDario333/idia
|
21cab7057f924c58ec098c27effcee1a8f0dc94e
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.2.13 on 2020-07-21 23:28
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('axis', '0003_auto_20200721_2328'),
('disorders', '0002_auto_20200618_2224'),
]
operations = [
migrations.AddField(
model_name='disorder',
name='axis',
field=models.OneToOneField(default=None, on_delete=django.db.models.deletion.PROTECT, to='axis.Axis', verbose_name='Axis'),
),
migrations.AddField(
model_name='disorder',
name='parent',
field=models.OneToOneField(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='disorders.Disorder', verbose_name='Parent'),
),
migrations.AlterField(
model_name='disorder',
name='created_at',
field=models.TimeField(default=datetime.datetime(2020, 7, 21, 23, 28, 4, 145955), editable=False),
),
migrations.AlterField(
model_name='disorder',
name='created_when',
field=models.DateField(default=datetime.datetime(2020, 7, 21, 23, 28, 4, 146001), editable=False),
),
migrations.AlterField(
model_name='disorder',
name='symptons',
field=models.ManyToManyField(default=None, to='symptons.Sympton', verbose_name='Symptons'),
),
]
| 35.428571
| 169
| 0.617608
|
cda70354a39e1bd9a4dba44f74dd4f6d531d4f18
| 8,578
|
py
|
Python
|
src/dialognlu/models/base_joint_trans.py
|
hyydrra/dialog-nlu
|
1a2b8cd18fcdcc3ed6374b83ec23ebd9a1a6d25e
|
[
"Apache-2.0"
] | null | null | null |
src/dialognlu/models/base_joint_trans.py
|
hyydrra/dialog-nlu
|
1a2b8cd18fcdcc3ed6374b83ec23ebd9a1a6d25e
|
[
"Apache-2.0"
] | null | null | null |
src/dialognlu/models/base_joint_trans.py
|
hyydrra/dialog-nlu
|
1a2b8cd18fcdcc3ed6374b83ec23ebd9a1a6d25e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author: mwahdan
"""
from .nlu_model import NLUModel
from .callbacks import F1Metrics
import tensorflow as tf
import numpy as np
import os
import json
class BaseJointTransformerModel(NLUModel):
def __init__(self, config, trans_model=None, is_load=False):
self.slots_num = config.get('slots_num')
self.intents_num = config.get('intents_num')
self.pretrained_model_name_or_path = config.get('pretrained_model_name_or_path')
self.cache_dir = config.get('cache_dir', None)
self.from_pt = config.get('from_pt', False)
self.num_bert_fine_tune_layers = config.get('num_bert_fine_tune_layers', 10)
self.intent_loss_weight = config.get('intent_loss_weight', 1.0)
self.slots_loss_weight = config.get('slots_loss_weight', 3.0)
self.max_length = config.get('max_length')
self.model_params = config
if not is_load:
self.trans_model = trans_model
self.build_model()
self.compile_model()
def compile_model(self):
# Instead of `using categorical_crossentropy`,
# we use `sparse_categorical_crossentropy`, which does expect integer targets.
optimizer = tf.keras.optimizers.Adam(lr=5e-5)#0.001)
losses = {
'slots_tagger': 'sparse_categorical_crossentropy',
'intent_classifier': 'sparse_categorical_crossentropy',
}
loss_weights = {'slots_tagger': self.slots_loss_weight, 'intent_classifier': self.intent_loss_weight}
metrics = {'intent_classifier': 'acc'}
self.model.compile(optimizer=optimizer, loss=losses, loss_weights=loss_weights, metrics=metrics)
self.model.summary()
def build_model(self):
raise NotImplementedError()
def save(self, model_path):
raise NotImplementedError()
@staticmethod
def load(load_folder_path):
raise NotImplementedError()
def fit(self, X, Y, validation_data=None, epochs=5, batch_size=32,
id2label=None):
X["valid_positions"] = self.prepare_valid_positions(X["valid_positions"])
if validation_data is not None:
X_val, Y_val = validation_data
X_val["valid_positions"] = self.prepare_valid_positions(X_val["valid_positions"])
validation_data = (X_val, Y_val)
callbacks = [F1Metrics(id2label, validation_data=validation_data)]
history = self.model.fit(X, Y, validation_data=validation_data,
epochs=epochs, batch_size=batch_size,
callbacks=callbacks)
self.visualize_metric(history.history, 'slots_tagger_loss')
self.visualize_metric(history.history, 'intent_classifier_loss')
self.visualize_metric(history.history, 'loss')
self.visualize_metric(history.history, 'intent_classifier_acc')
def prepare_valid_positions(self, in_valid_positions):
in_valid_positions = np.expand_dims(in_valid_positions, axis=2)
in_valid_positions = np.tile(in_valid_positions, (1, 1, self.slots_num))
return in_valid_positions
def predict_slots_intent(self, x, slots_vectorizer, intent_vectorizer, remove_start_end=True,
include_intent_prob=False):
# print("/models/base_joint_trans.py")
valid_positions = x["valid_positions"]
x["valid_positions"] = self.prepare_valid_positions(valid_positions)
y_slots, y_intent = self.predict(x)
slots = slots_vectorizer.inverse_transform(y_slots, valid_positions)
if remove_start_end:
slots = [x[1:-1] for x in slots]
if not include_intent_prob:
intents = np.array([intent_vectorizer.inverse_transform([np.argmax(i)])[0] for i in y_intent])
else:
intents = np.array([(intent_vectorizer.inverse_transform([np.argmax(i)])[0], round(float(np.max(i)), 4)) for i in y_intent])
return slots, intents
def predict_intent1(self, x, slots_vectorizer, intent_vectorizer, num_intents):
valid_positions = x["valid_positions"]
x["valid_positions"] = self.prepare_valid_positions(valid_positions)
y_slots, y_intent = self.predict(x)
top_intents_indexes = (-y_intent[0]).argsort()[:num_intents]
intents = []
for index in top_intents_indexes:
intents.append([intent_vectorizer.inverse_transform([np.int64(index)])[0], round(float(y_intent[0][index]), 4)])
return intents, intents
def save_to_path(self, model_path, trans_model_name):
self.model_params["class"] = self.__class__.__name__
with open(os.path.join(model_path, 'params.json'), 'w') as json_file:
json.dump(self.model_params, json_file)
self.model.save(os.path.join(model_path, trans_model_name))
@staticmethod
def load_model_by_class(klazz, load_folder_path, trans_model_name):
with open(os.path.join(load_folder_path, 'params.json'), 'r') as json_file:
model_params = json.load(json_file)
new_model = klazz(model_params, trans_model=None, is_load=True)
new_model.model = tf.keras.models.load_model(os.path.join(load_folder_path, trans_model_name))
new_model.compile_model()
return new_model
class TfliteBaseJointTransformerModel:
def __init__(self, config):
self.config = config
self.slots_num = config['slots_num']
self.interpreter = None
def predict_slots_intent(self, x, slots_vectorizer, intent_vectorizer, remove_start_end=True,
include_intent_prob=False):
# x = {k:v[0] for k,v in x.items()}
valid_positions = x["valid_positions"]
x["valid_positions"] = self.prepare_valid_positions(valid_positions)
y_slots, y_intent = self.predict(x)
slots = slots_vectorizer.inverse_transform(y_slots, valid_positions)
if remove_start_end:
slots = [x[1:-1] for x in slots]
if not include_intent_prob:
intents = np.array([intent_vectorizer.inverse_transform([np.argmax(i)])[0] for i in y_intent])
else:
intents = np.array([(intent_vectorizer.inverse_transform([np.argmax(i)])[0], round(float(np.max(i)), 4)) for i in y_intent])
return slots[0], intents[0]
def prepare_valid_positions(self, in_valid_positions):
in_valid_positions = np.expand_dims(in_valid_positions, axis=2)
in_valid_positions = np.tile(in_valid_positions, (1, 1, self.slots_num))
return in_valid_positions
def predict(self, inputs):
raise NotImplementedError()
@staticmethod
def load_model_by_class(clazz, path):
with open(os.path.join(path, 'params.json'), 'r') as json_file:
model_params = json.load(json_file)
new_model = clazz(model_params)
quant_model_file = os.path.join(path, 'model.tflite')
new_model.interpreter = tf.lite.Interpreter(model_path=str(quant_model_file), num_threads=1)
new_model.interpreter.allocate_tensors()
return new_model
class TfliteBaseJointTransformer4inputsModel(TfliteBaseJointTransformerModel):
def __init__(self, config):
super(TfliteBaseJointTransformer4inputsModel, self).__init__(config)
def predict(self, inputs):
self.interpreter.set_tensor(self.interpreter.get_input_details()[0]["index"], inputs.get("input_word_ids").astype(np.int32))
self.interpreter.set_tensor(self.interpreter.get_input_details()[1]["index"], inputs.get("input_mask").astype(np.int32))
self.interpreter.set_tensor(self.interpreter.get_input_details()[2]["index"], inputs.get("input_type_ids").astype(np.int32))
self.interpreter.set_tensor(self.interpreter.get_input_details()[3]["index"], inputs.get("valid_positions").astype(np.float32))
output_index_0 = self.interpreter.get_output_details()[0]["index"]
output_index_1 = self.interpreter.get_output_details()[1]["index"]
self.interpreter.invoke()
intent = self.interpreter.get_tensor(output_index_0)
slots = self.interpreter.get_tensor(output_index_1)
return slots, intent
| 45.147368
| 137
| 0.653183
|
b58767a1e21b363730493436c759a12504261ef2
| 6,986
|
py
|
Python
|
selfdrive/car/car_helpers.py
|
kss1930/1111
|
bb095b9b6055888acd14840b191f5332effdae40
|
[
"MIT"
] | null | null | null |
selfdrive/car/car_helpers.py
|
kss1930/1111
|
bb095b9b6055888acd14840b191f5332effdae40
|
[
"MIT"
] | null | null | null |
selfdrive/car/car_helpers.py
|
kss1930/1111
|
bb095b9b6055888acd14840b191f5332effdae40
|
[
"MIT"
] | null | null | null |
import os
from common.params import Params
from common.basedir import BASEDIR
from selfdrive.version import comma_remote, tested_branch
from selfdrive.car.fingerprints import eliminate_incompatible_cars, all_known_cars
from selfdrive.car.vin import get_vin, VIN_UNKNOWN
from selfdrive.car.fw_versions import get_fw_versions, match_fw_to_car
from selfdrive.swaglog import cloudlog
import cereal.messaging as messaging
from selfdrive.car import gen_empty_fingerprint
from cereal import car
EventName = car.CarEvent.EventName
def get_startup_event(car_recognized, controller_available, fuzzy_fingerprint):
#if comma_remote and tested_branch:
# event = EventName.startup
#else:
# event = EventName.startupMaster
event = EventName.startup
if not car_recognized:
event = EventName.startupNoCar
elif car_recognized and not controller_available:
event = EventName.startupNoControl
elif car_recognized and fuzzy_fingerprint:
event = EventName.startupFuzzyFingerprint
return event
def get_one_can(logcan):
while True:
can = messaging.recv_one_retry(logcan)
if len(can.can) > 0:
return can
def load_interfaces(brand_names):
ret = {}
for brand_name in brand_names:
path = ('selfdrive.car.%s' % brand_name)
CarInterface = __import__(path + '.interface', fromlist=['CarInterface']).CarInterface
if os.path.exists(BASEDIR + '/' + path.replace('.', '/') + '/carstate.py'):
CarState = __import__(path + '.carstate', fromlist=['CarState']).CarState
else:
CarState = None
if os.path.exists(BASEDIR + '/' + path.replace('.', '/') + '/carcontroller.py'):
CarController = __import__(path + '.carcontroller', fromlist=['CarController']).CarController
else:
CarController = None
for model_name in brand_names[brand_name]:
ret[model_name] = (CarInterface, CarController, CarState)
return ret
def _get_interface_names():
# read all the folders in selfdrive/car and return a dict where:
# - keys are all the car names that which we have an interface for
# - values are lists of spefic car models for a given car
brand_names = {}
for car_folder in [x[0] for x in os.walk(BASEDIR + '/selfdrive/car')]:
try:
brand_name = car_folder.split('/')[-1]
model_names = __import__('selfdrive.car.%s.values' % brand_name, fromlist=['CAR']).CAR
model_names = [getattr(model_names, c) for c in model_names.__dict__.keys() if not c.startswith("__")]
brand_names[brand_name] = model_names
except (ImportError, IOError):
pass
return brand_names
# imports from directory selfdrive/car/<name>/
interface_names = _get_interface_names()
interfaces = load_interfaces(interface_names)
def only_toyota_left(candidate_cars):
return all(("TOYOTA" in c or "LEXUS" in c) for c in candidate_cars) and len(candidate_cars) > 0
# **** for use live only ****
def fingerprint(logcan, sendcan):
fixed_fingerprint = os.environ.get('FINGERPRINT', "")
skip_fw_query = os.environ.get('SKIP_FW_QUERY', False)
if not fixed_fingerprint and not skip_fw_query:
# Vin query only reliably works thorugh OBDII
bus = 1
cached_params = Params().get("CarParamsCache")
if cached_params is not None:
cached_params = car.CarParams.from_bytes(cached_params)
if cached_params.carName == "mock":
cached_params = None
if cached_params is not None and len(cached_params.carFw) > 0 and cached_params.carVin is not VIN_UNKNOWN:
cloudlog.warning("Using cached CarParams")
vin = cached_params.carVin
car_fw = list(cached_params.carFw)
else:
cloudlog.warning("Getting VIN & FW versions")
_, vin = get_vin(logcan, sendcan, bus)
car_fw = get_fw_versions(logcan, sendcan, bus)
exact_fw_match, fw_candidates = match_fw_to_car(car_fw)
else:
vin = VIN_UNKNOWN
exact_fw_match, fw_candidates, car_fw = True, set(), []
cloudlog.warning("VIN %s", vin)
Params().put("CarVin", vin)
finger = gen_empty_fingerprint()
candidate_cars = {i: all_known_cars() for i in [0, 1]} # attempt fingerprint on both bus 0 and 1
frame = 0
frame_fingerprint = 10 # 0.1s
car_fingerprint = None
done = False
while not done:
a = get_one_can(logcan)
for can in a.can:
# need to independently try to fingerprint both bus 0 and 1 to work
# for the combo black_panda and honda_bosch. Ignore extended messages
# and VIN query response.
# Include bus 2 for toyotas to disambiguate cars using camera messages
# (ideally should be done for all cars but we can't for Honda Bosch)
if can.src in range(0, 4):
finger[can.src][can.address] = len(can.dat)
for b in candidate_cars:
if (can.src == b or (only_toyota_left(candidate_cars[b]) and can.src == 2)) and \
can.address < 0x800 and can.address not in [0x7df, 0x7e0, 0x7e8]:
candidate_cars[b] = eliminate_incompatible_cars(can, candidate_cars[b])
# if we only have one car choice and the time since we got our first
# message has elapsed, exit
for b in candidate_cars:
# Toyota needs higher time to fingerprint, since DSU does not broadcast immediately
if only_toyota_left(candidate_cars[b]):
frame_fingerprint = 100 # 1s
if len(candidate_cars[b]) == 1 and frame > frame_fingerprint:
# fingerprint done
car_fingerprint = candidate_cars[b][0]
# bail if no cars left or we've been waiting for more than 2s
failed = (all(len(cc) == 0 for cc in candidate_cars.values()) and frame > frame_fingerprint) or frame > 200
succeeded = car_fingerprint is not None
done = failed or succeeded
frame += 1
exact_match = True
source = car.CarParams.FingerprintSource.can
# If FW query returns exactly 1 candidate, use it
if len(fw_candidates) == 1:
car_fingerprint = list(fw_candidates)[0]
source = car.CarParams.FingerprintSource.fw
exact_match = exact_fw_match
if fixed_fingerprint:
car_fingerprint = fixed_fingerprint
source = car.CarParams.FingerprintSource.fixed
cloudlog.warning("fingerprinted %s", car_fingerprint)
return car_fingerprint, finger, vin, car_fw, source, exact_match
def get_car(logcan, sendcan):
candidate, fingerprints, vin, car_fw, source, exact_match = fingerprint(logcan, sendcan)
if candidate is None:
cloudlog.warning("car doesn't match any fingerprints: %r", fingerprints)
candidate = "mock"
if Params().get("CarModel", encoding="utf8") is not None:
car_name = Params().get("CarModel", encoding="utf8")
car_name = car_name.rstrip('\n')
candidate = car_name
CarInterface, CarController, CarState = interfaces[candidate]
car_params = CarInterface.get_params(candidate, fingerprints, car_fw)
car_params.carVin = vin
car_params.carFw = car_fw
car_params.fingerprintSource = source
car_params.fuzzyFingerprint = not exact_match
return CarInterface(car_params, CarController, CarState), car_params
| 35.825641
| 111
| 0.716576
|
0c723e7839651229b2059f11b1a698c979645367
| 457
|
py
|
Python
|
Python/ex035.py
|
MarcosRibas/Projeto100Exercicios
|
15c16eb0d9c4182d93e4bb83e11acad0728f5ec9
|
[
"MIT"
] | null | null | null |
Python/ex035.py
|
MarcosRibas/Projeto100Exercicios
|
15c16eb0d9c4182d93e4bb83e11acad0728f5ec9
|
[
"MIT"
] | null | null | null |
Python/ex035.py
|
MarcosRibas/Projeto100Exercicios
|
15c16eb0d9c4182d93e4bb83e11acad0728f5ec9
|
[
"MIT"
] | null | null | null |
"""Ex035 Desenvolva um programa que leia o comprimento de três retas e diga ao usuário se elas podem ou não formar um
triangulo."""
print('Analisador de Triângulos')
r1 = float(input('Primeiro segmento: '))
r2 = float(input('Segundo segmento: '))
r3 = float(input('Terceiro segmento: '))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print('Os segmentos acima podem formar triangulo')
else:
print('Os segmentos acima não pode formar triângulo')
| 45.7
| 117
| 0.713348
|
6573ae2cf643e1877809b1368bfddb221773239c
| 189
|
py
|
Python
|
test_funcs/torch2pytorch.py
|
ahmedshingaly/sketch2shape
|
128f83d760d215ec7fae35aeb1430512552f2b92
|
[
"MIT"
] | 3
|
2020-04-07T06:54:47.000Z
|
2021-06-30T14:14:36.000Z
|
test_funcs/torch2pytorch.py
|
ahmedshingaly/sketch2shape
|
128f83d760d215ec7fae35aeb1430512552f2b92
|
[
"MIT"
] | 1
|
2020-09-18T01:58:15.000Z
|
2020-09-18T01:58:15.000Z
|
test_funcs/torch2pytorch.py
|
ahmedshingaly/sketch2shape
|
128f83d760d215ec7fae35aeb1430512552f2b92
|
[
"MIT"
] | 1
|
2020-09-18T01:58:47.000Z
|
2020-09-18T01:58:47.000Z
|
import torch
from torch.utils.serialization import load_lua
model_path = r"./models_cpu/"
model_file = "car_G_cpu.pth.t7"
full_path = model_path + model_file
model = load_lua(model_file)
| 21
| 46
| 0.78836
|
a3d81a92715e62dedc56fa54ec582fa484b702e1
| 894
|
py
|
Python
|
tests/processor/unit/test_scale.py
|
yubessy/prepkit
|
5d2732c05288cd2e76d5d3f539210a91b01f5804
|
[
"MIT"
] | 3
|
2018-01-21T07:21:27.000Z
|
2018-01-21T11:22:08.000Z
|
tests/processor/unit/test_scale.py
|
yubessy/prepkit
|
5d2732c05288cd2e76d5d3f539210a91b01f5804
|
[
"MIT"
] | null | null | null |
tests/processor/unit/test_scale.py
|
yubessy/prepkit
|
5d2732c05288cd2e76d5d3f539210a91b01f5804
|
[
"MIT"
] | null | null | null |
from pandas import Series
from pandas.util.testing import assert_series_equal
from prepkit.processor.unit.scale import Scale
from ..._helper import array_float
def test_process():
processor = Scale(minlim=-1, maxlim=1)
target = Series([-2.0, -1.0, 0.0, 1.0, 2.0])
result = processor.process(target)
expected = Series(array_float([-1.0, -1.0, 0.0, 1.0, 1.0]))
assert_series_equal(result, expected)
def test_process_normalize():
processor = Scale(normalize=True)
target = Series([1, 2, 3])
result = processor.process(target)
expected = Series(array_float([0.0, 0.5, 1.0]))
assert_series_equal(result, expected)
def test_process_standardize():
processor = Scale(standardize=True)
target = Series([0, 1, 2])
result = processor.process(target)
expected = Series(array_float([-1.0, 0.0, 1.0]))
assert_series_equal(result, expected)
| 28.83871
| 63
| 0.692394
|
0b4c46064eaf3b0a3d4c6a81dd52354c1c6d6973
| 8,038
|
py
|
Python
|
miprometheus/problems/seq_to_seq/vqa/cog/cog_utils/json_to_img.py
|
vincentalbouy/mi-prometheus
|
99a0c94b0d0f3476fa021213b3246fda0db8b2db
|
[
"Apache-2.0"
] | null | null | null |
miprometheus/problems/seq_to_seq/vqa/cog/cog_utils/json_to_img.py
|
vincentalbouy/mi-prometheus
|
99a0c94b0d0f3476fa021213b3246fda0db8b2db
|
[
"Apache-2.0"
] | null | null | null |
miprometheus/problems/seq_to_seq/vqa/cog/cog_utils/json_to_img.py
|
vincentalbouy/mi-prometheus
|
99a0c94b0d0f3476fa021213b3246fda0db8b2db
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training utility functions."""
from six import string_types
import re
import numpy as np
from miprometheus.problems.seq_to_seq.vqa.cog.cog_utils import stim_generator as sg
from miprometheus.problems.seq_to_seq.vqa.cog.cog_utils import constants as const
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
def convert_to_grid(xy_coord, prefs):
"""Given a x-y coordinate, return the target activity for a grid of neurons.
Args:
xy_coord : numpy 2-D array (batch_size, 2)
prefs: numpy 2-D array (n_out_pnt, 2). x and y preferences.
Returns:
activity: numpy array (batch_size, GRID_SIZE**2)
"""
sigma2 = 0.02 # 2*sigma-squared
activity = np.exp(-((xy_coord[:, 0:1] - prefs[:, 0])**2 +
(xy_coord[:, 1:2] - prefs[:, 1])**2) / sigma2)
activity = (activity.T / np.sum(activity, axis=1)).T
return activity
def tasks_to_rules(tasks):
"""Generate in_rule and seq_length arrays.
Args:
tasks: a list of tg.Task instances or string rules, length is batch_size.
"""
batch_size = len(tasks)
in_rule = np.zeros((const.MAXSEQLENGTH, batch_size), dtype=np.int64)
seq_length = np.zeros((batch_size,), dtype=np.int64)
for i_task, task in enumerate(tasks):
word_list = re.findall(r"[\w']+|[.,!?;]", str(task))
seq_length[i_task] = len(word_list)
for i_word, word in enumerate(word_list):
in_rule[i_word, i_task] = const.INPUTVOCABULARY.index(word)
return in_rule, seq_length
def set_outputs_from_tasks(n_epoch, tasks, objsets,
out_pnt_xy, out_word,
mask_pnt, mask_word):
j = 0
for epoch_now in range(n_epoch):
for task, objset in zip(tasks, objsets):
target = task(objset, epoch_now)
if target == const.INVALID:
# For invalid target, no loss is used. Everything remains zero.
pass
elif isinstance(target, sg.Loc):
# minimize point loss
out_pnt_xy[j, :] = target.value
mask_pnt[j] = 1.
elif isinstance(target, bool) or isinstance(target, sg.Attribute):
if isinstance(target, bool):
target = 'true' if target else 'false'
else:
target = target.value
# For boolean target, only minimize word loss
out_word[j] = const.OUTPUTVOCABULARY.index(target)
mask_word[j] = 1.
else:
raise TypeError('Unknown target type.')
j += 1
def set_outputs_from_targets(n_epoch, objsets,
out_pnt_xy, out_word,
mask_pnt, mask_word):
j = 0
for epoch_now in range(n_epoch):
for objset in objsets:
target = objset.targets[epoch_now]
if target == 'invalid':
# For invalid target, no loss is used. Everything remains zero.
pass
elif isinstance(target, (list, tuple)):
assert len(target) == 2, "Expected 2-D target. Got " + str(target)
# minimize point loss
out_pnt_xy[j, :] = target
mask_pnt[j] = 1.
elif isinstance(target, string_types):
out_word[j] = const.OUTPUTVOCABULARY.index(target)
mask_word[j] = 1.
else:
raise TypeError('Unknown target type: %s %s' % (type(target), target))
j += 1
def generate_batch(tasks,
n_epoch=30,
img_size=224,
objsets=None,
n_distractor=1,
average_memory_span=2):
"""Generate a batch of trials.
Return numpy arrays to feed the tensorflow placeholders.
Args:
tasks: a list of tg.Task instances, length is batch_size.
n_epoch: int, number of epochs
img_size: int, image size
objsets: None or list of ObjectSet/StaticObjectSet instances
n_distractor: int, number of distractors to add
average_memory_span: int, the average number of epochs by which an object
need to be held in working memory, if needed at all
Returns:
All variables are numpy array of float32
in_imgs: (n_epoch*batch_size, img_size, img_size, 3)
in_rule: (max_seq_length, batch_size) the rule language input, type int32
seq_length: (batch_size,) the length of each task instruction
out_pnt: (n_epoch*batch_size, n_out_pnt)
out_pnt_xy: (n_epoch*batch_size, 2)
out_word: (n_epoch*batch_size, n_out_word)
mask_pnt: (n_epoch*batch_size)
mask_word: (n_epoch*batch_size)
Raises:
TypeError: when target type is incorrect.
"""
batch_size = len(tasks)
if objsets is None:
objsets = list()
for task in tasks:
objsets.append(
task.generate_objset(n_epoch,
n_distractor=n_distractor,
average_memory_span=average_memory_span))
max_objset_epoch = max([objset.n_epoch for objset in objsets])
assert max_objset_epoch == n_epoch, '%d != %d' % (max_objset_epoch, n_epoch)
in_imgs = sg.render(objsets, img_size)
# The rendered images are batch major
in_imgs = np.reshape(in_imgs, [batch_size, n_epoch, img_size, img_size, 3])
# Swap to time major
in_imgs = np.swapaxes(in_imgs, 0, 1)
# Outputs and masks
out_pnt_xy = np.zeros((n_epoch * batch_size, 2), dtype=np.float32)
out_word = np.zeros((n_epoch * batch_size), dtype=np.int64)
mask_pnt = np.zeros((n_epoch * batch_size), dtype=np.float32)
mask_word = np.zeros((n_epoch * batch_size), dtype=np.float32)
if isinstance(objsets[0], sg.StaticObjectSet):
set_outputs_from_targets(n_epoch, objsets,
out_pnt_xy, out_word,
mask_pnt, mask_word)
else:
set_outputs_from_tasks(n_epoch, tasks, objsets,
out_pnt_xy, out_word,
mask_pnt, mask_word)
# Process outputs
out_pnt = convert_to_grid(out_pnt_xy, const.PREFS)
# Generate rule inputs, padded to maximum number of words in a sentence
in_rule, seq_length = tasks_to_rules(tasks)
return (in_imgs, in_rule, seq_length, out_pnt, out_pnt_xy, out_word, mask_pnt,
mask_word)
def static_objsets_from_examples(examples):
"""Returns a list of StaticObjectSet objects.
Args:
examples: an iterable of dictionaries decoded from json examples.
"""
static_objsets = []
for e in examples:
static_objs = [o for multi_epoch_obj in e['objects']
for o in sg.static_objects_from_dict(multi_epoch_obj)]
static_objset = sg.StaticObjectSet(n_epoch=e['epochs'],
static_objects=static_objs,
targets=e['answers'])
static_objsets.append(static_objset)
return static_objsets
def json_to_feeds(json_examples):
if isinstance(json_examples, string_types):
json_examples = [json_examples]
examples = []
families = []
rules = []
for entry in json_examples:
rules.append(entry['question'])
examples.append(entry)
families.append(entry['family'])
epochs = examples[0]['epochs']
static_objsets = static_objsets_from_examples(examples)
values = generate_batch(rules, n_epoch=epochs,
img_size=112, objsets=static_objsets,
# not used when objsets are given
n_distractor=0,
# not used when objsets are given
average_memory_span=0)
values = values + (families,)
return values
| 34.497854
| 83
| 0.643941
|
ecf1c92f5cb87f2bd7fab685a58a363c73a32613
| 241
|
py
|
Python
|
Intermediate/map_function.py
|
BjornChrisnach/Python_6hour_course
|
0949387c2e423ed0ba7914db7c58af2f913bda1c
|
[
"MIT"
] | null | null | null |
Intermediate/map_function.py
|
BjornChrisnach/Python_6hour_course
|
0949387c2e423ed0ba7914db7c58af2f913bda1c
|
[
"MIT"
] | null | null | null |
Intermediate/map_function.py
|
BjornChrisnach/Python_6hour_course
|
0949387c2e423ed0ba7914db7c58af2f913bda1c
|
[
"MIT"
] | null | null | null |
# map function nr3
li = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def func(x):
return x**x
# newList = []
# for x in li:
# newList.append(func(x))
# print(newList)
# print(list(map(func, li)))
print([func(x) for x in li if x % 2 == 0])
| 13.388889
| 42
| 0.53527
|
a57fcea3109f3848bde2b219f47aab5c16c848d0
| 90
|
py
|
Python
|
src/fermulerpy/__init__.py
|
Deepthi2001/fermulerpy
|
5721e2d027598063cb4f2fd3cb5cc79ffd22890b
|
[
"MIT"
] | null | null | null |
src/fermulerpy/__init__.py
|
Deepthi2001/fermulerpy
|
5721e2d027598063cb4f2fd3cb5cc79ffd22890b
|
[
"MIT"
] | null | null | null |
src/fermulerpy/__init__.py
|
Deepthi2001/fermulerpy
|
5721e2d027598063cb4f2fd3cb5cc79ffd22890b
|
[
"MIT"
] | null | null | null |
"""
==========
fermulerpy
==========
Python for Number Theory
"""
__version__ = "0.1.v3"
| 10
| 24
| 0.511111
|
280ccef1a3f87d042a4365f6e7cc00c4c7d7f03a
| 539
|
py
|
Python
|
build.py
|
appimage-conan-community/conan-libappimage
|
8a616c55f921c3c512139c500fd1f20b763584ea
|
[
"MIT"
] | null | null | null |
build.py
|
appimage-conan-community/conan-libappimage
|
8a616c55f921c3c512139c500fd1f20b763584ea
|
[
"MIT"
] | null | null | null |
build.py
|
appimage-conan-community/conan-libappimage
|
8a616c55f921c3c512139c500fd1f20b763584ea
|
[
"MIT"
] | null | null | null |
from cpt.packager import ConanMultiPackager
if __name__ == "__main__":
remotes = [("https://api.bintray.com/conan/bincrafters/public-conan", "yes", "bincrafters"),
("https://api.bintray.com/conan/conan-community/conan", "yes", "conan-community"),
("https://api.bintray.com/conan/appimage-conan-community/public-conan", "yes", "appimage")]
builder = ConanMultiPackager(build_policy="missing", remotes=remotes)
builder.add_common_builds(shared_option_name="libappimage:shared")
builder.run()
| 44.916667
| 106
| 0.697588
|
abfb66b1df32bf1a267c99876d5493ed834d4b3a
| 2,069
|
py
|
Python
|
profiles_api/models.py
|
jacob-crider/profiles-rest-api
|
b153dd72fd8c40008967a9c72d0bccc6892d905d
|
[
"MIT"
] | null | null | null |
profiles_api/models.py
|
jacob-crider/profiles-rest-api
|
b153dd72fd8c40008967a9c72d0bccc6892d905d
|
[
"MIT"
] | null | null | null |
profiles_api/models.py
|
jacob-crider/profiles-rest-api
|
b153dd72fd8c40008967a9c72d0bccc6892d905d
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django.conf import settings
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""Create a new user profile"""
if not email:
raise ValueError('User must have an email address')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create and save a new superuser with given details"""
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users in the system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve full name of user"""
return self.name
def get_short_name(self):
"""Retrieve short name of user"""
return self.name
def __str__(self):
"""Return string representation of our user"""
return self.email
class ProfileFeedItem(models.Model):
"""Profile status update"""
user_profile = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
status_text = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
"""Return the model as a string"""
return self.status_text
| 28.342466
| 64
| 0.679072
|
e13cc0e78dbe71b6276f8f38ebdf62bf15454240
| 6,203
|
py
|
Python
|
nara_wpe/ntt_wpe.py
|
oucxlw/nara_wpe
|
a537e080bc419e5a01e7f83b81e5d8ae058c363c
|
[
"MIT"
] | 344
|
2018-05-03T00:27:46.000Z
|
2022-03-28T02:13:54.000Z
|
nara_wpe/ntt_wpe.py
|
oucxlw/nara_wpe
|
a537e080bc419e5a01e7f83b81e5d8ae058c363c
|
[
"MIT"
] | 47
|
2018-06-27T07:22:53.000Z
|
2022-02-12T01:18:39.000Z
|
nara_wpe/ntt_wpe.py
|
oucxlw/nara_wpe
|
a537e080bc419e5a01e7f83b81e5d8ae058c363c
|
[
"MIT"
] | 135
|
2018-05-24T09:14:58.000Z
|
2022-03-25T02:55:17.000Z
|
from pathlib import Path
from cached_property import cached_property
import tempfile
import numpy as np
import soundfile as sf
import click
from pymatbridge import Matlab
from nara_wpe import project_root
def ntt_wrapper(
y,
taps=10,
delay=3,
iterations=3,
sampling_rate=16000,
path_to_package=project_root / 'cache' / 'wpe_v1.33',
stft_size=512,
stft_shift=128
):
wpe = NTTWrapper(path_to_package)
return wpe(
y=y,
taps=taps,
delay=delay,
iterations=iterations,
sampling_rate=sampling_rate,
stft_size=stft_size,
stft_shift=stft_shift
)
class NTTWrapper:
"""
The WPE package has to be downloaded from
http://www.kecl.ntt.co.jp/icl/signal/wpe/download.html. It is recommended
to store it in the cache directory of Nara-WPE.
"""
def __init__(self, path_to_pkg):
self.path_to_pkg = Path(path_to_pkg)
if not self.path_to_pkg.exists():
raise OSError(
'NTT WPE package does not exist. It has to be downloaded'
'from http://www.kecl.ntt.co.jp/icl/signal/wpe/download.html'
'and stored in the cache directory of Nara-WPE, preferably.'
)
@cached_property
def process(self):
mlab = Matlab()
mlab.start()
return mlab
def cfg(self, channels, sampling_rate, iterations, taps,
stft_size, stft_shift
):
"""
Check settings and set local.m accordingly
"""
cfg = self.path_to_pkg / 'settings' / 'local.m'
lines = []
with cfg.open() as infile:
for line in infile:
if 'num_mic = ' in line and 'num_out' not in line:
if not str(channels) in line:
line = 'num_mic = ' + str(channels) + ";\n"
elif 'fs' in line:
if not str(sampling_rate) in line:
line = 'fs =' + str(sampling_rate) + ";\n"
elif 'channel_setup' in line and 'ssd_param' not in line:
if not str(taps) in line and '%' not in line:
line = "channel_setup = [" + str(taps) + "; ..." + "\n"
elif 'ssd_conf' in line:
if not str(iterations) in line:
line = "ssd_conf = struct('max_iter',"\
+ str(iterations) + ", ...\n"
elif 'analym_param' in line:
if not str(stft_size) in line:
line = "analy_param = struct('win_size',"\
+ str(stft_size) + ", ..."
elif 'shift_size' in line:
if not str(stft_shift) in line:
line = " 'shift_size',"\
+ str(stft_shift) + ", ..."
elif 'hanning' in line:
if not str(stft_size) in line:
line = " 'win' , hanning("\
+ str(stft_size) + "));"
lines.append(line)
return lines
def __call__(
self,
y,
taps=10,
delay=3,
iterations=3,
sampling_rate=16000,
stft_size=512,
stft_shift=128
):
"""
Args:
y: observation (channels. samples)
delay:
iterations:
taps:
stft_opts: dict contains size, shift
Returns: dereverberated observation (channels, samples)
"""
y = y.transpose(1, 0)
channels = y.shape[1]
cfg_lines = self.cfg(
channels, sampling_rate, iterations, taps, stft_size, stft_shift
)
with tempfile.TemporaryDirectory() as tempdir:
with (Path(tempdir) / 'local.m').open('w') as cfg_file:
for line in cfg_lines:
cfg_file.write(line)
self.process.set_variable("y", y)
self.process.set_variable("cfg", cfg_file.name)
self.process.run_code("addpath('" + str(cfg_file.name) + "');")
self.process.run_code("addpath('" + str(self.path_to_pkg) + "');")
msg = self.process.run_code("y = wpe(y, cfg);")
assert msg['success'] is True, \
f'WPE has failed. {msg["content"]["stdout"]}'
y = self.process.get_variable("y")
return y.transpose(1, 0)
@click.command()
@click.argument(
'files', nargs=-1,
type=click.Path(exists=True),
)
@click.option(
'--path_to_pkg',
default=str(project_root / 'cache' / 'wpe_v1.33'),
help='It is recommended to save the '
'NTT-WPE package in the cache directory.'
)
@click.option(
'--output_dir',
default=str(project_root / 'data' / 'dereverberation_ntt'),
help='Output path.'
)
@click.option(
'--iterations',
default=5,
help='Iterations of WPE'
)
@click.option(
'--taps',
default=10,
help='Number of filter taps of WPE'
)
def main(path_to_pkg, files, output_dir, taps=10, delay=3, iterations=5):
"""
A small command line wrapper around the NTT-WPE matlab file.
http://www.kecl.ntt.co.jp/icl/signal/wpe/
"""
if len(files) > 1:
signal_list = [
sf.read(str(file))[0]
for file in files
]
y = np.stack(signal_list, axis=0)
sampling_rate = sf.read(str(files[0]))[1]
else:
y, sampling_rate = sf.read(files)
wrapper = NTTWrapper(path_to_pkg)
x = wrapper(y, delay, iterations, taps,
sampling_rate, stft_size=512, stft_shift=128
)
if len(files) > 1:
for i, file in enumerate(files):
sf.write(
str(Path(output_dir) / Path(file).name),
x[i],
samplerate=sampling_rate
)
else:
sf.write(
str(Path(output_dir) / Path(files).name),
x,
samplerate=sampling_rate
)
if __name__ == '__main__':
main()
| 29.679426
| 79
| 0.516041
|
b218e09ffae6ab9785c80d4788f224506b18aa03
| 1,222
|
py
|
Python
|
meiduo_mall/apps/contents/utils.py
|
MarioKarting/Django_meiduo_project
|
ef06e70b1ddb6709983ebb644452c980afc29000
|
[
"MIT"
] | null | null | null |
meiduo_mall/apps/contents/utils.py
|
MarioKarting/Django_meiduo_project
|
ef06e70b1ddb6709983ebb644452c980afc29000
|
[
"MIT"
] | null | null | null |
meiduo_mall/apps/contents/utils.py
|
MarioKarting/Django_meiduo_project
|
ef06e70b1ddb6709983ebb644452c980afc29000
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python
# _*_ coding:utf-8 _*_
from collections import OrderedDict
# 封装 商品三级分类查询的数据 函数
def get_categories():
# 1.1 获取频道表的数据 37个频道 goodschannel
from apps.goods.models import GoodsChannel
# channels = GoodsChannel.objects.order_by('group_id', 'sequence')
channels = GoodsChannel.objects.all()
# 1.2 遍历37个频道
categories = OrderedDict() # 有序的字典
for channel in channels:
# 1.3 通过频道 获取 组id 11个
group_id = channel.group_id
# 1.4 判断 当前组id 在不在 字典里面,如果不在:塞进去
if group_id not in categories:
categories[group_id] = {'channels': [], 'sub_cats': []}
# 1.5 通过外键属性category--获取一级分类
cat1 = channel.category
# 1.6 拼接 channels 里面的字典数据
categories[group_id]['channels'].append({
'id': cat1,
'name': cat1.name,
'url': channel.url,
})
# 1.7 根据一级分类找2 .subs, --3 subs级分类 构建前端需要的数据
for cat2 in cat1.subs.all():
cat2.sub_cats = []
# 二级找3级
for cat3 in cat2.subs.all():
cat2.sub_cats.append(cat3)
# 将拼接完毕的ca2 添加到大字典的key里面
categories[group_id]['sub_cats'].append(cat2)
return categories
| 29.095238
| 70
| 0.587561
|
59382594e5677e91521c794a01f57618296bb724
| 19,227
|
py
|
Python
|
src/sagemaker/fw_utils.py
|
billdoors/sagemaker-python-sdk
|
2df8fb616cc3e28032aae5dccdc93a0c340b6d8b
|
[
"Apache-2.0"
] | null | null | null |
src/sagemaker/fw_utils.py
|
billdoors/sagemaker-python-sdk
|
2df8fb616cc3e28032aae5dccdc93a0c340b6d8b
|
[
"Apache-2.0"
] | null | null | null |
src/sagemaker/fw_utils.py
|
billdoors/sagemaker-python-sdk
|
2df8fb616cc3e28032aae5dccdc93a0c340b6d8b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Utility methods used by framework classes"""
from __future__ import absolute_import
import os
import re
import shutil
import tempfile
from collections import namedtuple
import sagemaker.utils
from sagemaker import s3
from sagemaker.utils import get_ecr_image_uri_prefix, ECR_URI_PATTERN
_TAR_SOURCE_FILENAME = "source.tar.gz"
UploadedCode = namedtuple("UserCode", ["s3_prefix", "script_name"])
"""sagemaker.fw_utils.UserCode: An object containing the S3 prefix and script name.
This is for the source code used for the entry point with an ``Estimator``. It can be
instantiated with positional or keyword arguments.
"""
EMPTY_FRAMEWORK_VERSION_WARNING = "No framework_version specified, defaulting to version {}."
LATER_FRAMEWORK_VERSION_WARNING = (
"This is not the latest supported version. "
"If you would like to use version {latest}, "
"please add framework_version={latest} to your constructor."
)
PYTHON_2_DEPRECATION_WARNING = (
"The Python 2 {framework} images will be soon deprecated and may not be "
"supported for newer upcoming versions of the {framework} images.\n"
"Please set the argument \"py_version='py3'\" to use the Python 3 {framework} image."
)
EMPTY_FRAMEWORK_VERSION_ERROR = (
"framework_version is required for script mode estimator. "
"Please add framework_version={} to your constructor to avoid this error."
)
UNSUPPORTED_FRAMEWORK_VERSION_ERROR = (
"{} framework does not support version {}. Please use one of the following: {}."
)
VALID_PY_VERSIONS = ["py2", "py3"]
VALID_EIA_FRAMEWORKS = ["tensorflow", "tensorflow-serving", "mxnet", "mxnet-serving"]
VALID_ACCOUNTS_BY_REGION = {"us-gov-west-1": "246785580436", "us-iso-east-1": "744548109606"}
ASIMOV_VALID_ACCOUNTS_BY_REGION = {"us-iso-east-1": "886529160074"}
OPT_IN_ACCOUNTS_BY_REGION = {"ap-east-1": "057415533634", "me-south-1": "724002660598"}
ASIMOV_OPT_IN_ACCOUNTS_BY_REGION = {"ap-east-1": "871362719292", "me-south-1": "217643126080"}
DEFAULT_ACCOUNT = "520713654638"
ASIMOV_PROD_ACCOUNT = "763104351884"
ASIMOV_DEFAULT_ACCOUNT = ASIMOV_PROD_ACCOUNT
MERGED_FRAMEWORKS_REPO_MAP = {
"tensorflow-scriptmode": "tensorflow-training",
"tensorflow-serving": "tensorflow-inference",
"tensorflow-serving-eia": "tensorflow-inference-eia",
"mxnet": "mxnet-training",
"mxnet-serving": "mxnet-inference",
"mxnet-serving-eia": "mxnet-inference-eia",
"pytorch": "pytorch-training",
"pytorch-serving": "pytorch-inference",
}
MERGED_FRAMEWORKS_LOWEST_VERSIONS = {
"tensorflow-scriptmode": {"py3": [1, 13, 1], "py2": [1, 14, 0]},
"tensorflow-serving": [1, 13, 0],
"tensorflow-serving-eia": [1, 14, 0],
"mxnet": {"py3": [1, 4, 1], "py2": [1, 6, 0]},
"mxnet-serving": {"py3": [1, 4, 1], "py2": [1, 6, 0]},
"mxnet-serving-eia": [1, 4, 1],
"pytorch": [1, 2, 0],
"pytorch-serving": [1, 2, 0],
}
def is_version_equal_or_higher(lowest_version, framework_version):
"""Determine whether the ``framework_version`` is equal to or higher than
``lowest_version``
Args:
lowest_version (List[int]): lowest version represented in an integer
list
framework_version (str): framework version string
Returns:
bool: Whether or not ``framework_version`` is equal to or higher than
``lowest_version``
"""
version_list = [int(s) for s in framework_version.split(".")]
return version_list >= lowest_version[0 : len(version_list)]
def _is_dlc_version(framework, framework_version, py_version):
"""Return if the framework's version uses the corresponding DLC image.
Args:
framework (str): The framework name, e.g. "tensorflow-scriptmode"
framework_version (str): The framework version
py_version (str): The Python version, e.g. "py3"
Returns:
bool: Whether or not the framework's version uses the DLC image.
"""
lowest_version_list = MERGED_FRAMEWORKS_LOWEST_VERSIONS.get(framework)
if isinstance(lowest_version_list, dict):
lowest_version_list = lowest_version_list[py_version]
if lowest_version_list:
return is_version_equal_or_higher(lowest_version_list, framework_version)
return False
def _use_dlc_image(region, framework, py_version, framework_version):
"""Return if the DLC image should be used for the given framework,
framework version, Python version, and region.
Args:
region (str): The AWS region.
framework (str): The framework name, e.g. "tensorflow-scriptmode".
py_version (str): The Python version, e.g. "py3".
framework_version (str): The framework version.
Returns:
bool: Whether or not to use the corresponding DLC image.
"""
is_gov_region = region in VALID_ACCOUNTS_BY_REGION
is_dlc_version = _is_dlc_version(framework, framework_version, py_version)
return ((not is_gov_region) or region in ASIMOV_VALID_ACCOUNTS_BY_REGION) and is_dlc_version
def _registry_id(region, framework, py_version, account, framework_version):
"""Return the Amazon ECR registry number (or AWS account ID) for
the given framework, framework version, Python version, and region.
Args:
region (str): The AWS region.
framework (str): The framework name, e.g. "tensorflow-scriptmode".
py_version (str): The Python version, e.g. "py3".
account (str): The AWS account ID to use as a default.
framework_version (str): The framework version.
Returns:
str: The appropriate Amazon ECR registry number. If there is no
specific one for the framework, framework version, Python version,
and region, then ``account`` is returned.
"""
if _use_dlc_image(region, framework, py_version, framework_version):
if region in ASIMOV_OPT_IN_ACCOUNTS_BY_REGION:
return ASIMOV_OPT_IN_ACCOUNTS_BY_REGION.get(region)
if region in ASIMOV_VALID_ACCOUNTS_BY_REGION:
return ASIMOV_VALID_ACCOUNTS_BY_REGION.get(region)
return ASIMOV_DEFAULT_ACCOUNT
if region in OPT_IN_ACCOUNTS_BY_REGION:
return OPT_IN_ACCOUNTS_BY_REGION.get(region)
return VALID_ACCOUNTS_BY_REGION.get(region, account)
def create_image_uri(
region,
framework,
instance_type,
framework_version,
py_version=None,
account=None,
accelerator_type=None,
optimized_families=None,
):
"""Return the ECR URI of an image.
Args:
region (str): AWS region where the image is uploaded.
framework (str): framework used by the image.
instance_type (str): SageMaker instance type. Used to determine device
type (cpu/gpu/family-specific optimized).
framework_version (str): The version of the framework.
py_version (str): Optional. Python version. If specified, should be one
of 'py2' or 'py3'. If not specified, image uri will not include a
python component.
account (str): AWS account that contains the image. (default:
'520713654638')
accelerator_type (str): SageMaker Elastic Inference accelerator type.
optimized_families (str): Instance families for which there exist
specific optimized images.
Returns:
str: The appropriate image URI based on the given parameters.
"""
optimized_families = optimized_families or []
if py_version and py_version not in VALID_PY_VERSIONS:
raise ValueError("invalid py_version argument: {}".format(py_version))
if _accelerator_type_valid_for_framework(
framework=framework,
accelerator_type=accelerator_type,
optimized_families=optimized_families,
):
framework += "-eia"
# Handle account number for specific cases (e.g. GovCloud, opt-in regions, DLC images etc.)
if account is None:
account = _registry_id(
region=region,
framework=framework,
py_version=py_version,
account=DEFAULT_ACCOUNT,
framework_version=framework_version,
)
# Handle Local Mode
if instance_type.startswith("local"):
device_type = "cpu" if instance_type == "local" else "gpu"
elif not instance_type.startswith("ml."):
raise ValueError(
"{} is not a valid SageMaker instance type. See: "
"https://aws.amazon.com/sagemaker/pricing/instance-types/".format(instance_type)
)
else:
family = instance_type.split(".")[1]
# For some frameworks, we have optimized images for specific families, e.g c5 or p3.
# In those cases, we use the family name in the image tag. In other cases, we use
# 'cpu' or 'gpu'.
if family in optimized_families:
device_type = family
elif family[0] in ["g", "p"]:
device_type = "gpu"
else:
device_type = "cpu"
use_dlc_image = _use_dlc_image(region, framework, py_version, framework_version)
if not py_version or (use_dlc_image and framework == "tensorflow-serving-eia"):
tag = "{}-{}".format(framework_version, device_type)
else:
tag = "{}-{}-{}".format(framework_version, device_type, py_version)
if use_dlc_image:
ecr_repo = MERGED_FRAMEWORKS_REPO_MAP[framework]
else:
ecr_repo = "sagemaker-{}".format(framework)
return "{}/{}:{}".format(get_ecr_image_uri_prefix(account, region), ecr_repo, tag)
def _accelerator_type_valid_for_framework(
framework, accelerator_type=None, optimized_families=None
):
"""
Args:
framework:
accelerator_type:
optimized_families:
"""
if accelerator_type is None:
return False
if framework not in VALID_EIA_FRAMEWORKS:
raise ValueError(
"{} is not supported with Amazon Elastic Inference. Currently only "
"Python-based TensorFlow and MXNet are supported.".format(framework)
)
if optimized_families:
raise ValueError("Neo does not support Amazon Elastic Inference.")
if (
not accelerator_type.startswith("ml.eia")
and not accelerator_type == "local_sagemaker_notebook"
):
raise ValueError(
"{} is not a valid SageMaker Elastic Inference accelerator type. "
"See: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html".format(accelerator_type)
)
return True
def validate_source_dir(script, directory):
"""Validate that the source directory exists and it contains the user script
Args:
script (str): Script filename.
directory (str): Directory containing the source file.
Raises:
ValueError: If ``directory`` does not exist, is not a directory, or does
not contain ``script``.
"""
if directory:
if not os.path.isfile(os.path.join(directory, script)):
raise ValueError(
'No file named "{}" was found in directory "{}".'.format(script, directory)
)
return True
def tar_and_upload_dir(
session, bucket, s3_key_prefix, script, directory=None, dependencies=None, kms_key=None
):
"""Package source files and upload a compress tar file to S3. The S3
location will be ``s3://<bucket>/s3_key_prefix/sourcedir.tar.gz``.
If directory is an S3 URI, an UploadedCode object will be returned, but
nothing will be uploaded to S3 (this allow reuse of code already in S3).
If directory is None, the script will be added to the archive at
``./<basename of script>``.
If directory is not None, the (recursive) contents of the directory will
be added to the archive. directory is treated as the base path of the
archive, and the script name is assumed to be a filename or relative path
inside the directory.
Args:
session (boto3.Session): Boto session used to access S3.
bucket (str): S3 bucket to which the compressed file is uploaded.
s3_key_prefix (str): Prefix for the S3 key.
script (str): Script filename or path.
directory (str): Optional. Directory containing the source file. If it
starts with "s3://", no action is taken.
dependencies (List[str]): Optional. A list of paths to directories
(absolute or relative) containing additional libraries that will be
copied into /opt/ml/lib
kms_key (str): Optional. KMS key ID used to upload objects to the bucket
(default: None).
Returns:
sagemaker.fw_utils.UserCode: An object with the S3 bucket and key (S3 prefix) and
script name.
"""
if directory and directory.lower().startswith("s3://"):
return UploadedCode(s3_prefix=directory, script_name=os.path.basename(script))
script_name = script if directory else os.path.basename(script)
dependencies = dependencies or []
key = "%s/sourcedir.tar.gz" % s3_key_prefix
tmp = tempfile.mkdtemp()
try:
source_files = _list_files_to_compress(script, directory) + dependencies
tar_file = sagemaker.utils.create_tar_file(
source_files, os.path.join(tmp, _TAR_SOURCE_FILENAME)
)
if kms_key:
extra_args = {"ServerSideEncryption": "aws:kms", "SSEKMSKeyId": kms_key}
else:
extra_args = None
session.resource("s3").Object(bucket, key).upload_file(tar_file, ExtraArgs=extra_args)
finally:
shutil.rmtree(tmp)
return UploadedCode(s3_prefix="s3://%s/%s" % (bucket, key), script_name=script_name)
def _list_files_to_compress(script, directory):
"""
Args:
script:
directory:
"""
if directory is None:
return [script]
basedir = directory if directory else os.path.dirname(script)
return [os.path.join(basedir, name) for name in os.listdir(basedir)]
def framework_name_from_image(image_name):
# noinspection LongLine
"""Extract the framework and Python version from the image name.
Args:
image_name (str): Image URI, which should be one of the following forms:
legacy:
'<account>.dkr.ecr.<region>.amazonaws.com/sagemaker-<fw>-<py_ver>-<device>:<container_version>'
legacy:
'<account>.dkr.ecr.<region>.amazonaws.com/sagemaker-<fw>-<py_ver>-<device>:<fw_version>-<device>-<py_ver>'
current:
'<account>.dkr.ecr.<region>.amazonaws.com/sagemaker-<fw>:<fw_version>-<device>-<py_ver>'
current:
'<account>.dkr.ecr.<region>.amazonaws.com/sagemaker-rl-<fw>:<rl_toolkit><rl_version>-<device>-<py_ver>'
Returns:
tuple: A tuple containing:
str: The framework name str: The Python version str: The image tag
str: If the image is script mode
"""
sagemaker_pattern = re.compile(ECR_URI_PATTERN)
sagemaker_match = sagemaker_pattern.match(image_name)
if sagemaker_match is None:
return None, None, None, None
# extract framework, python version and image tag
# We must support both the legacy and current image name format.
name_pattern = re.compile(
r"^(?:sagemaker(?:-rl)?-)?(tensorflow|mxnet|chainer|pytorch|scikit-learn|xgboost)(?:-)?(scriptmode|training)?:(.*)-(.*?)-(py2|py3)$" # noqa: E501 # pylint: disable=line-too-long
)
legacy_name_pattern = re.compile(r"^sagemaker-(tensorflow|mxnet)-(py2|py3)-(cpu|gpu):(.*)$")
name_match = name_pattern.match(sagemaker_match.group(9))
legacy_match = legacy_name_pattern.match(sagemaker_match.group(9))
if name_match is not None:
fw, scriptmode, ver, device, py = (
name_match.group(1),
name_match.group(2),
name_match.group(3),
name_match.group(4),
name_match.group(5),
)
return fw, py, "{}-{}-{}".format(ver, device, py), scriptmode
if legacy_match is not None:
return (legacy_match.group(1), legacy_match.group(2), legacy_match.group(4), None)
return None, None, None, None
def framework_version_from_tag(image_tag):
"""Extract the framework version from the image tag.
Args:
image_tag (str): Image tag, which should take the form
'<framework_version>-<device>-<py_version>'
Returns:
str: The framework version.
"""
tag_pattern = re.compile("^(.*)-(cpu|gpu)-(py2|py3)$")
tag_match = tag_pattern.match(image_tag)
return None if tag_match is None else tag_match.group(1)
def parse_s3_url(url):
"""Calls the method with the same name in the s3 module.
:func:~sagemaker.s3.parse_s3_url
Args:
url: A URL, expected with an s3 scheme.
Returns: The return value of s3.parse_s3_url, which is a tuple containing:
str: S3 bucket name str: S3 key
"""
return s3.parse_s3_url(url)
def model_code_key_prefix(code_location_key_prefix, model_name, image):
"""Returns the s3 key prefix for uploading code during model deployment
The location returned is a potential concatenation of 2 parts
1. code_location_key_prefix if it exists
2. model_name or a name derived from the image
Args:
code_location_key_prefix (str): the s3 key prefix from code_location
model_name (str): the name of the model
image (str): the image from which a default name can be extracted
Returns:
str: the key prefix to be used in uploading code
"""
training_job_name = sagemaker.utils.name_from_image(image)
return "/".join(filter(None, [code_location_key_prefix, model_name or training_job_name]))
def empty_framework_version_warning(default_version, latest_version):
"""
Args:
default_version:
latest_version:
"""
msgs = [EMPTY_FRAMEWORK_VERSION_WARNING.format(default_version)]
if default_version != latest_version:
msgs.append(LATER_FRAMEWORK_VERSION_WARNING.format(latest=latest_version))
return " ".join(msgs)
def get_unsupported_framework_version_error(
framework_name, unsupported_version, supported_versions
):
"""Return error message for unsupported framework version.
This should also return the supported versions for customers.
:param framework_name:
:param unsupported_version:
:param supported_versions:
:return:
"""
return UNSUPPORTED_FRAMEWORK_VERSION_ERROR.format(
framework_name,
unsupported_version,
", ".join('"{}"'.format(version) for version in supported_versions),
)
def python_deprecation_warning(framework):
"""
Args:
framework:
"""
return PYTHON_2_DEPRECATION_WARNING.format(framework=framework)
| 38.14881
| 186
| 0.680449
|
8523ebc612d1396c62ebce87cce9c107133cd6ed
| 3,883
|
py
|
Python
|
projects/steganography/dct.py
|
rossi2018/python-mini-projects
|
a85c140b990ec9d0fd491da5508fe188278032b0
|
[
"MIT"
] | 2
|
2022-01-08T16:59:55.000Z
|
2022-01-08T17:34:28.000Z
|
projects/steganography/dct.py
|
rossi2018/python-mini-projects
|
a85c140b990ec9d0fd491da5508fe188278032b0
|
[
"MIT"
] | 14
|
2022-02-13T10:28:48.000Z
|
2022-03-15T21:11:46.000Z
|
projects/steganography/dct.py
|
rossi2018/python-mini-projects
|
a85c140b990ec9d0fd491da5508fe188278032b0
|
[
"MIT"
] | 2
|
2022-03-09T11:11:57.000Z
|
2022-03-09T16:23:32.000Z
|
#!/usr/bin/env python3
#
# Copyright(C) 2021 wuyaoping
#
# DCT algorithm has great a robust but lower capacity.
import numpy as np
import os.path as osp
import cv2
FLAG = '%'
# Select a part location from the middle frequency
LOC_MAX = (4, 1)
LOC_MIN = (3, 2)
# The difference between MAX and MIN,
# bigger to improve robust but make picture low quality.
ALPHA = 1
# Quantizer table
TABLE = np.array([
[16, 11, 10, 16, 24, 40, 51, 61],
[12, 12, 14, 19, 26, 58, 60, 55],
[14, 13, 16, 24, 40, 57, 69, 56],
[14, 17, 22, 29, 51, 87, 80, 62],
[18, 22, 37, 56, 68, 109, 103, 77],
[24, 35, 55, 64, 81, 104, 113, 92],
[49, 64, 78, 87, 103, 121, 120, 101],
[72, 92, 95, 98, 112, 100, 103, 99]
])
def insert(path, txt):
img = cv2.imread(path, cv2.IMREAD_ANYCOLOR)
txt = "{}{}{}".format(len(txt), FLAG, txt)
row, col = img.shape[:2]
max_bytes = (row // 8) * (col // 8) // 8
assert max_bytes >= len(
txt), "Message overflow the capacity:{}".format(max_bytes)
img = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
# Just use the Y plane to store message, you can use all plane
y, u, v = cv2.split(img)
y = y.astype(np.float32)
blocks = []
# Quantize blocks
for r_idx in range(0, 8 * (row // 8), 8):
for c_idx in range(0, 8 * (col // 8), 8):
quantized = cv2.dct(y[r_idx: r_idx+8, c_idx: c_idx+8]) / TABLE
blocks.append(quantized)
for idx in range(len(txt)):
encode(blocks[idx*8: (idx+1)*8], txt[idx])
idx = 0
# Restore Y plane
for r_idx in range(0, 8 * (row // 8), 8):
for c_idx in range(0, 8 * (col // 8), 8):
y[r_idx: r_idx+8, c_idx: c_idx+8] = cv2.idct(blocks[idx] * TABLE)
idx += 1
y = y.astype(np.uint8)
img = cv2.cvtColor(cv2.merge((y, u, v)), cv2.COLOR_YUV2BGR)
filename, _ = osp.splitext(path)
# DCT algorithm can save message even if jpg
filename += '_dct_embeded' + '.jpg'
cv2.imwrite(filename, img)
return filename
# Encode a char into the blocks
def encode(blocks, data):
data = ord(data)
for idx in range(len(blocks)):
bit_val = (data >> idx) & 1
max_val = max(blocks[idx][LOC_MAX], blocks[idx][LOC_MIN])
min_val = min(blocks[idx][LOC_MAX], blocks[idx][LOC_MIN])
if max_val - min_val <= ALPHA:
max_val = min_val + ALPHA + 1e-3
if bit_val == 1:
blocks[idx][LOC_MAX] = max_val
blocks[idx][LOC_MIN] = min_val
else:
blocks[idx][LOC_MAX] = min_val
blocks[idx][LOC_MIN] = max_val
# Decode a char from the blocks
def decode(blocks):
val = 0
for idx in range(len(blocks)):
if blocks[idx][LOC_MAX] > blocks[idx][LOC_MIN]:
val |= 1 << idx
return chr(val)
def extract(path):
img = cv2.imread(path, cv2.IMREAD_ANYCOLOR)
row, col = img.shape[:2]
max_bytes = (row // 8) * (col // 8) // 8
img = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
y, u, v = cv2.split(img)
y = y.astype(np.float32)
blocks = []
for r_idx in range(0, 8 * (row // 8), 8):
for c_idx in range(0, 8 * (col // 8), 8):
quantized = cv2.dct(y[r_idx: r_idx+8, c_idx: c_idx+8]) / TABLE
blocks.append(quantized)
res = ''
idx = 0
# Extract the length of the message
while idx < max_bytes:
ch = decode(blocks[idx*8: (idx+1)*8])
idx += 1
if ch == FLAG:
break
res += ch
end = int(res) + idx
assert end <= max_bytes, "Input image isn't correct."
res = ''
while idx < end:
res += decode(blocks[idx*8: (idx+1)*8])
idx += 1
return res
if __name__ == '__main__':
data = 'A collection of simple python mini projects to enhance your Python skills.'
res_path = insert('./example.png', data)
res = extract(res_path)
print(res)
| 30.335938
| 87
| 0.567345
|
b0410c053cc907d954e523544f4d6943395d6ad6
| 1,008
|
py
|
Python
|
acq4/analysis/scripts/beamProfiler.py
|
aleonlein/acq4
|
4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555
|
[
"MIT"
] | 1
|
2020-06-04T17:04:53.000Z
|
2020-06-04T17:04:53.000Z
|
acq4/analysis/scripts/beamProfiler.py
|
aleonlein/acq4
|
4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555
|
[
"MIT"
] | 24
|
2016-09-27T17:25:24.000Z
|
2017-03-02T21:00:11.000Z
|
acq4/analysis/scripts/beamProfiler.py
|
sensapex/acq4
|
9561ba73caff42c609bd02270527858433862ad8
|
[
"MIT"
] | 4
|
2016-10-19T06:39:36.000Z
|
2019-09-30T21:06:45.000Z
|
from __future__ import print_function
from acq4.util import Qt
import acq4.Manager
import acq4.util.imageAnalysis as imageAnalysis
run = True
man = acq4.Manager.getManager()
cam = man.getDevice('Camera')
frames = []
def collect(frame):
global frames
frames.append(frame)
cam.sigNewFrame.connect(collect)
def measure():
if len(frames) == 0:
Qt.QTimer.singleShot(100, measure)
return
global run
if run:
global frames
frame = frames[-1]
frames = []
img = frame.data()
w,h = img.shape
img = img[2*w/5:3*w/5, 2*h/5:3*h/5]
w,h = img.shape
fit = imageAnalysis.fitGaussian2D(img, [100, w/2., h/2., w/4., 0])
# convert sigma to full width at 1/e
fit[0][3] *= 2 * 2**0.5
print("WIDTH:", fit[0][3] * frame.info()['pixelSize'][0] * 1e6, "um")
print(" fit:", fit)
else:
global frames
frames = []
Qt.QTimer.singleShot(2000, measure)
measure()
| 23.44186
| 77
| 0.574405
|
706c91fda1a1bbe5f4734eda54106940cbb307dd
| 1,027
|
py
|
Python
|
aceapi/events/__init__.py
|
krayzpipes/ACE-1
|
138bf2aecad949f0b72b66519c32893df033de39
|
[
"Apache-2.0"
] | 28
|
2018-08-08T11:57:31.000Z
|
2022-01-12T23:06:18.000Z
|
aceapi/events/__init__.py
|
krayzpipes/ACE-1
|
138bf2aecad949f0b72b66519c32893df033de39
|
[
"Apache-2.0"
] | 108
|
2018-08-08T12:35:06.000Z
|
2019-07-19T22:57:19.000Z
|
aceapi/events/__init__.py
|
krayzpipes/ACE-1
|
138bf2aecad949f0b72b66519c32893df033de39
|
[
"Apache-2.0"
] | 16
|
2018-08-03T18:48:00.000Z
|
2021-11-09T00:35:35.000Z
|
# vim: sw=4:ts=4:et
#
# ACE API event routines
from .. import db, json_result
from flask import Blueprint, request, abort, Response
from saq.database import Event
events_bp = Blueprint('events', __name__, url_prefix='/events')
@events_bp.route('/open', methods=['GET'])
def get_open_events():
open_events = db.session.query(Event).filter_by(status='OPEN')
return json_result([event.json for event in open_events])
@events_bp.route('/<int:event_id>/status', methods=['PUT'])
def update_event_status(event_id):
event = db.session.query(Event).get(event_id)
if not event:
abort(Response("Event ID not found", 404))
status = request.values.get('status', None)
if status:
if status in Event.status.property.columns[0].type.enums:
event.status = status
db.session.commit()
return json_result(event.json)
else:
abort(Response("Invalid event status: {}".format(status), 400))
abort(Response("Must specify event status", 400))
| 29.342857
| 75
| 0.673807
|
5138ef622f4ef1e4d2efc241a66b5c6dcaddb8ac
| 1,288
|
py
|
Python
|
dymos/utils/indexing.py
|
kaushikponnapalli/dymos
|
3fba91d0fc2c0e8460717b1bec80774676287739
|
[
"Apache-2.0"
] | 1
|
2021-07-19T17:03:49.000Z
|
2021-07-19T17:03:49.000Z
|
dymos/utils/indexing.py
|
kaushikponnapalli/dymos
|
3fba91d0fc2c0e8460717b1bec80774676287739
|
[
"Apache-2.0"
] | null | null | null |
dymos/utils/indexing.py
|
kaushikponnapalli/dymos
|
3fba91d0fc2c0e8460717b1bec80774676287739
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
def get_src_indices_by_row(row_idxs, shape, flat=True):
"""
Provide the src_indices when connecting a vectorized variable from an output to an input.
Indices are selected by choosing the first indices to be passed, corresponding to node
index in Dymos.
Parameters
----------
row_idxs : array_like
The rows/node indices to be connected from the source to the target.
shape : tuple
The shape of the variable at each node (ignores the first dimension).
flat : bool
If True, return the source indices in flat source indices form.
Returns
-------
array_like
If flat, a numpy array of shape `(row_idxs,) + shape` where each element is the index
of the source of that element in the source array, in C-order.
"""
if not flat:
raise NotImplementedError('Currently get_src_indices_by_row only returns '
'flat source indices.')
num_src_rows = np.max(row_idxs) + 1
src_shape = (num_src_rows,) + shape
other_idxs = [np.arange(n, dtype=int) for n in shape]
ixgrid = np.ix_(row_idxs, *other_idxs)
a = np.reshape(np.arange(np.prod(src_shape), dtype=int), newshape=src_shape)
src_idxs = a[ixgrid]
return src_idxs
| 34.810811
| 93
| 0.662267
|
b77939c6e2d24241c10370aabfa623112e5d908e
| 1,614
|
py
|
Python
|
kolibri/core/content/migrations/0014_auto_20181218_1132.py
|
MBKayro/kolibri
|
0a38a5fb665503cf8f848b2f65938e73bfaa5989
|
[
"MIT"
] | 545
|
2016-01-19T19:26:55.000Z
|
2022-03-20T00:13:04.000Z
|
kolibri/core/content/migrations/0014_auto_20181218_1132.py
|
MBKayro/kolibri
|
0a38a5fb665503cf8f848b2f65938e73bfaa5989
|
[
"MIT"
] | 8,329
|
2016-01-19T19:32:02.000Z
|
2022-03-31T21:23:12.000Z
|
kolibri/core/content/migrations/0014_auto_20181218_1132.py
|
MBKayro/kolibri
|
0a38a5fb665503cf8f848b2f65938e73bfaa5989
|
[
"MIT"
] | 493
|
2016-01-19T19:26:48.000Z
|
2022-03-28T14:35:05.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-12-18 19:32
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [("content", "0013_auto_20180919_1142")]
operations = [
migrations.AlterField(
model_name="file",
name="preset",
field=models.CharField(
blank=True,
choices=[
("high_res_video", "High Resolution"),
("low_res_video", "Low Resolution"),
("video_thumbnail", "Thumbnail"),
("video_subtitle", "Subtitle"),
("video_dependency", "Video (dependency)"),
("audio", "Audio"),
("audio_thumbnail", "Thumbnail"),
("document", "Document"),
("epub", "ePub Document"),
("document_thumbnail", "Thumbnail"),
("exercise", "Exercise"),
("exercise_thumbnail", "Thumbnail"),
("exercise_image", "Exercise Image"),
("exercise_graphie", "Exercise Graphie"),
("channel_thumbnail", "Channel Thumbnail"),
("topic_thumbnail", "Thumbnail"),
("html5_zip", "HTML5 Zip"),
("html5_dependency", "HTML5 Dependency (Zip format)"),
("html5_thumbnail", "HTML5 Thumbnail"),
],
max_length=150,
),
)
]
| 36.681818
| 74
| 0.483271
|
7bf731b74d5704e848485eac41fc8f1f6c565b37
| 12,044
|
py
|
Python
|
we-word-embeddings-huth/SemanticModel.py
|
bastivkl/nh2020-curriculum
|
245a72af3f325495448cbf6c0c6baa2499d43d94
|
[
"CC-BY-4.0"
] | 94
|
2020-06-27T19:04:11.000Z
|
2022-03-28T00:44:44.000Z
|
we-word-embeddings-huth/SemanticModel.py
|
bastivkl/nh2020-curriculum
|
245a72af3f325495448cbf6c0c6baa2499d43d94
|
[
"CC-BY-4.0"
] | 13
|
2020-07-23T02:11:40.000Z
|
2020-09-09T21:28:36.000Z
|
we-word-embeddings-huth/SemanticModel.py
|
bastivkl/nh2020-curriculum
|
245a72af3f325495448cbf6c0c6baa2499d43d94
|
[
"CC-BY-4.0"
] | 50
|
2020-07-15T03:37:49.000Z
|
2022-02-27T23:07:14.000Z
|
import tables
import pickle
import numpy as np
import logging
logger = logging.getLogger("SemanticModel")
class SemanticModel(object):
"""This class defines a semantic vector-space model based on HAL or LSA with some
prescribed preprocessing pipeline.
It contains two important variables: vocab and data.
vocab is a 1D list (or array) of words.
data is a 2D array (features by words) of word-feature values.
"""
def __init__(self, data, vocab):
"""Initializes a SemanticModel with the given [data] and [vocab].
"""
self.data = data
self.vocab = vocab
def get_ndim(self):
"""Returns the number of dimensions in this model.
"""
return self.data.shape[0]
ndim = property(get_ndim)
def get_vindex(self):
"""Return {vocab: index} dictionary.
"""
if "_vindex" not in dir(self):
self._vindex = dict([(v,i) for (i,v) in enumerate(self.vocab)])
return self._vindex
vindex = property(get_vindex)
def __getitem__(self, word):
"""Returns the vector corresponding to the given [word].
"""
return self.data[:,self.vindex[word]]
def load_root(self, rootfile, vocab):
"""Load the SVD-generated semantic vector space from [rootfile], assumed to be
an HDF5 file.
"""
roothf = tables.open_file(rootfile)
self.data = roothf.get_node("/R").read()
self.vocab = vocab
roothf.close()
def load_ascii_root(self, rootfile, vocab):
"""Loads the SVD-generated semantic vector space from [rootfile], assumed to be
an ASCII dense matrix output from SDVLIBC.
"""
vtfile = open(rootfile)
nrows, ncols = map(int, vtfile.readline().split())
Vt = np.zeros((nrows,ncols))
nrows_done = 0
for row in vtfile:
Vt[nrows_done,:] = map(float, row.split())
nrows_done += 1
self.data = Vt
self.vocab = vocab
def restrict_by_occurrence(self, min_rank=60, max_rank=60000):
"""Restricts the data to words that have an occurrence rank lower than
[min_rank] and higher than [max_rank].
"""
logger.debug("Restricting words by occurrence..")
nwords = self.data.shape[1]
wordranks = np.argsort(np.argsort(self.data[0,:]))
goodwords = np.nonzero(np.logical_and((nwords-wordranks)>min_rank,
(nwords-wordranks)<max_rank))[0]
self.data = self.data[:,goodwords]
self.vocab = [self.vocab[i] for i in goodwords]
logger.debug("Done restricting words..")
def pca_reduce(self, ndims):
"""Reduces the dimensionality of the vector-space using PCA.
"""
logger.debug("Reducing with PCA to %d dimensions"%ndims)
U,S,Vh = np.linalg.svd(self.data, full_matrices=False)
self.data = np.dot(Vh[:ndims].T, np.diag(S[:ndims])).T
logger.debug("Done with PCA..")
def pca_reduce_multi(self, ndimlist):
"""Reduces the dimensionality of the vector-space using PCA for many
different numbers of dimensions. More efficient than running
pca_reduce many times.
Instead of modifying this object, this function returns a list of new
SemanticModels with the specified numbers of dimensions.
"""
logger.debug("Reducing with PCA to fewer dimensions..")
U,S,Vh = np.linalg.svd(self.data, full_matrices=False)
newmodels = []
for nd in ndimlist:
newmodel = SemanticModel()
newmodel.vocab = list(self.vocab)
newmodel.data = np.dot(Vh[:nd].T, np.diag(S[:nd])).T
newmodels.append(newmodel)
return newmodels
def save(self, filename):
"""Saves this semantic model at the given filename.
"""
logger.debug("Saving file: %s"%filename)
shf = tables.open_file(filename, mode="w", title="SemanticModel")
shf.create_array("/", "data", self.data)
shf.create_array("/", "vocab", self.vocab)
shf.close()
logger.debug("Done saving file..")
@classmethod
def load(cls, filename):
"""Loads a semantic model from the given filename.
"""
logger.debug("Loading file: %s"%filename)
shf = tables.open_file(filename)
newsm = cls(None, None)
newsm.data = shf.get_node("/data").read()
newsm.vocab = [s.decode('utf-8') for s in shf.get_node("/vocab").read()]
shf.close()
logger.debug("Done loading file..")
return newsm
def copy(self):
"""Returns a copy of this model.
"""
logger.debug("Copying model..")
cp = SemanticModel(self.data.copy(), list(self.vocab))
logger.debug("Done copying model..")
return cp
def project_stims(self, stimwords):
"""Projects the stimuli given in [stimwords], which should be a list of lists
of words, into this feature space. Returns the average feature vector across
all the words in each stimulus.
"""
logger.debug("Projecting stimuli..")
stimlen = len(stimwords)
ndim = self.data.shape[0]
pstim = np.zeros((stimlen, ndim))
vset = set(self.vocab)
for t in range(stimlen):
dropped = 0
for w in stimwords[t]:
dropped = 0
if w in vset:
pstim[t] += self[w]
else:
dropped += 1
pstim[t] /= (len(stimwords[t])-dropped)
return pstim
def uniformize(self):
"""Uniformizes each feature.
"""
logger.debug("Uniformizing features..")
R = np.zeros_like(self.data).astype(np.uint32)
for ri in range(self.data.shape[0]):
R[ri] = np.argsort(np.argsort(self.data[ri]))
self.data = R.astype(np.float64)
logger.debug("Done uniformizing...")
def gaussianize(self):
"""Gaussianizes each feature.
"""
logger.debug("Gaussianizing features..")
self.data = gaussianize_mat(self.data.T).T
logger.debug("Done gaussianizing..")
def zscore(self, axis=0):
"""Z-scores either each feature (if axis is 0) or each word (if axis is 1).
If axis is None nothing will be Z-scored.
"""
if axis is None:
logger.debug("Not Z-scoring..")
return
logger.debug("Z-scoring on axis %d"%axis)
if axis==1:
self.data = zscore(self.data.T).T
elif axis==0:
self.data = zscore(self.data)
def rectify(self):
"""Rectifies the features.
"""
self.data = np.vstack([-np.clip(self.data, -np.inf, 0), np.clip(self.data, 0, np.inf)])
def clip(self, sds):
"""Clips feature values more than [sds] standard deviations away from the mean
to that value. Another method for dealing with outliers.
"""
logger.debug("Truncating features to %d SDs.."%sds)
fsds = self.data.std(1)
fms = self.data.mean(1)
newdata = np.zeros(self.data.shape)
for fi in range(self.data.shape[0]):
newdata[fi] = np.clip(self.data[fi],
fms[fi]-sds*fsds[fi],
fms[fi]+sds*fsds[fi])
self.data = newdata
logger.debug("Done truncating..")
def find_words_like_word(self, word, n=10):
"""Finds the [n] words most like the given [word].
"""
return self.find_words_like_vec(self.data[:,self.vocab.index(word)], n)
def find_words_like_vec(self, vec, n=10, corr=True):
"""Finds the [n] words most like the given [vector].
"""
nwords = len(self.vocab)
if corr:
corrs = np.nan_to_num([np.corrcoef(vec, self.data[:,wi])[1,0] for wi in range(nwords)])
scorrs = np.argsort(corrs)
words = list(reversed([(corrs[i], self.vocab[i]) for i in scorrs[-n:]]))
else:
proj = np.nan_to_num(np.dot(vec, self.data))
sproj = np.argsort(proj)
words = list(reversed([(proj[i], self.vocab[i]) for i in sproj[-n:]]))
return words
def find_words_like_vecs(self, vecs, n=10, corr=True, distance_cull=None):
"""Find the `n` words most like each vector in `vecs`.
"""
if corr:
from text.npp import xcorr
vproj = xcorr(vecs, self.data.T)
else:
vproj = np.dot(vecs, self.data)
return np.vstack([self._get_best_words(vp, n, distance_cull) for vp in vproj])
def _get_best_words(self, proj, n=10, distance_cull=None):
"""Find the `n` words corresponding to the highest values in the vector `proj`.
If `distance_cull` is an int, greedily find words with the following algorithm:
1. Initialize the possible set of words with all words.
2. Add the best possible word, w*. Remove w* from the possible set.
3. Remove the `distance_cull` closest neighbors of w* from the possible set.
4. Goto 2.
"""
vocarr = np.array(self.vocab)
if distance_cull is None:
return vocarr[np.argsort(proj)[-n:][::-1]]
elif not isinstance(distance_cull, int):
raise TypeError("distance_cull should be an integer value, not %s" % str(distance_cull))
poss_set = set(self.vocab)
poss_set = np.arange(len(self.vocab))
best_words = []
while len(best_words) < n:
# Find best word in poss_set
best_poss = poss_set[proj[poss_set].argmax()]
# Add word to best_words
best_words.append(self.vocab[best_poss])
# Remove nearby words (by L2-norm..?)
bwdists = ((self.data.T - self.data[:,best_poss])**2).sum(1)
nearest_inds = np.argsort(bwdists)[:distance_cull+1]
poss_set = np.setdiff1d(poss_set, nearest_inds)
return np.array(best_words)
def similarity(self, word1, word2):
"""Returns the correlation between the vectors for [word1] and [word2].
"""
return np.corrcoef(self.data[:,self.vocab.index(word1)], self.data[:,self.vocab.index(word2)])[0,1]
def print_best_worst(self, ii, n=10):
vector = self.data[ii]
sv = np.argsort(self.data[ii])
print ("Best:")
print ("-------------")
for ni in range(1,n+1):
print ("%s: %0.08f"%(np.array(self.vocab)[sv[-ni]], vector[sv[-ni]]))
print ("\nWorst:")
print ("-------------")
for ni in range(n):
print ("%s: %0.08f"%(np.array(self.vocab)[sv[ni]], vector[sv[ni]]))
print ("\n")
def gaussianize(vec):
"""Uses a look-up table to force the values in [vec] to be gaussian."""
import scipy.stats
ranks = np.argsort(np.argsort(vec))
cranks = (ranks+1).astype(float)/(ranks.max()+2)
vals = scipy.stats.norm.isf(1-cranks)
zvals = vals/vals.std()
return zvals
def gaussianize_mat(mat):
"""Gaussianizes each column of [mat]."""
gmat = np.empty(mat.shape)
for ri in range(mat.shape[1]):
gmat[:,ri] = gaussianize(mat[:,ri])
return gmat
def zscore(mat, return_unzvals=False):
"""Z-scores the rows of [mat] by subtracting off the mean and dividing
by the standard deviation.
If [return_unzvals] is True, a matrix will be returned that can be used
to return the z-scored values to their original state.
"""
zmat = np.empty(mat.shape)
unzvals = np.zeros((zmat.shape[0], 2))
for ri in range(mat.shape[0]):
unzvals[ri,0] = np.std(mat[ri,:])
unzvals[ri,1] = np.mean(mat[ri,:])
zmat[ri,:] = (mat[ri,:]-unzvals[ri,1]) / (1e-10+unzvals[ri,0])
if return_unzvals:
return zmat, unzvals
return zmat
| 36.831804
| 107
| 0.578877
|
15ef45d6140a90270fa9569c605952a740c7de37
| 73
|
py
|
Python
|
virtual/lib/python3.6/site-packages/grappelli/__init__.py
|
silver230/Instachat
|
a0fce33648855c167259341adc06412c4fa3e9c5
|
[
"Unlicense"
] | null | null | null |
virtual/lib/python3.6/site-packages/grappelli/__init__.py
|
silver230/Instachat
|
a0fce33648855c167259341adc06412c4fa3e9c5
|
[
"Unlicense"
] | null | null | null |
virtual/lib/python3.6/site-packages/grappelli/__init__.py
|
silver230/Instachat
|
a0fce33648855c167259341adc06412c4fa3e9c5
|
[
"Unlicense"
] | null | null | null |
VERSION = '2.12.1'
default_app_config = 'grappelli.apps.GrappelliConfig'
| 24.333333
| 53
| 0.780822
|
2cdd59c1aef0d4f06004df7716ac1837eddef054
| 1,269
|
py
|
Python
|
dataset_scripts/dataset_selector_line.py
|
shpotes/self-driving-car
|
7329e6213c483a7695ab4e97cf16c93ce6d0b25f
|
[
"MIT"
] | 1
|
2019-06-02T22:27:31.000Z
|
2019-06-02T22:27:31.000Z
|
dataset_scripts/dataset_selector_line.py
|
shpotes/self-driving-car
|
7329e6213c483a7695ab4e97cf16c93ce6d0b25f
|
[
"MIT"
] | null | null | null |
dataset_scripts/dataset_selector_line.py
|
shpotes/self-driving-car
|
7329e6213c483a7695ab4e97cf16c93ce6d0b25f
|
[
"MIT"
] | null | null | null |
import sys
from os import listdir
from os.path import isfile, isdir
import numpy as np
import cv2
video = sys.argv[1]
cap = cv2.VideoCapture(video)
kernel = np.ones((10,10),np.uint8)
kernel_black = np.ones((10,10),np.uint8)
sensitivity = 0
lower_white = np.array([0,0,0])
upper_white = np.array([180,255,120])
while(True):
# Capture frame-by-frame
ret, img = cap.read()
height, width, channels = img.shape
img2 = img#[int(height*0):int(height*0.7), int(width*0.2):int(width*0.8)]
hsv = cv2.cvtColor(img2, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_white, upper_white)
res = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel_black)
res = cv2.morphologyEx(res, cv2.MORPH_CLOSE, kernel)
cv2.imshow('mask',mask)
#cv2.imshow('res',res)
cv2.imshow('image',img)
k = cv2.waitKey(0)
print(k)
if k==82:
print("forward")
#cv2.imwrite('./sem/'+carpeta[2:-1]+ar,img)
if k==81:
print("left")
#cv2.imwrite('./left_arrow/'+carpeta[2:-1]+ar,img)
if k==83:
print("right")
#cv2.imwrite('./right_arrow/'+carpeta[2:-1]+ar,img)
if k==32:
continue;
if k==113:
break;
cv2.destroyAllWindows()
# When everything done, release the capture
cap.release()
| 28.840909
| 77
| 0.631994
|
a59cc33e1be8318ebf85dde5c97df8d9674551ae
| 4,337
|
py
|
Python
|
libraries/mosek/9.3/tools/examples/python/qcqo1.py
|
TimDSF/SBSOS_ShapeSegmentation
|
e30495dcf71dc63d1d54f3b73132fcfa75d7647e
|
[
"MIT"
] | null | null | null |
libraries/mosek/9.3/tools/examples/python/qcqo1.py
|
TimDSF/SBSOS_ShapeSegmentation
|
e30495dcf71dc63d1d54f3b73132fcfa75d7647e
|
[
"MIT"
] | null | null | null |
libraries/mosek/9.3/tools/examples/python/qcqo1.py
|
TimDSF/SBSOS_ShapeSegmentation
|
e30495dcf71dc63d1d54f3b73132fcfa75d7647e
|
[
"MIT"
] | 1
|
2022-02-24T02:51:35.000Z
|
2022-02-24T02:51:35.000Z
|
##
# Copyright : Copyright (c) MOSEK ApS, Denmark. All rights reserved.
#
# File : qcqo1.py
#
# Purpose : Demonstrates how to solve small linear
# optimization problem using the MOSEK Python API.
##
import sys
import mosek
# Since the actual value of Infinity is ignores, we define it solely
# for symbolic purposes:
inf = 0.0
# Define a stream printer to grab output from MOSEK
def streamprinter(text):
sys.stdout.write(text)
sys.stdout.flush()
def main():
# Make a MOSEK environment
with mosek.Env() as env:
# Attach a printer to the environment
env.set_Stream(mosek.streamtype.log, streamprinter)
# Create a task
with env.Task(0, 0) as task:
# Attach a printer to the task
task.set_Stream(mosek.streamtype.log, streamprinter)
# Set up and input bounds and linear coefficients
bkc = [mosek.boundkey.lo]
blc = [1.0]
buc = [inf]
bkx = [mosek.boundkey.lo,
mosek.boundkey.lo,
mosek.boundkey.lo]
blx = [0.0, 0.0, 0.0]
bux = [inf, inf, inf]
c = [0.0, -1.0, 0.0]
asub = [[0], [0], [0]]
aval = [[1.0], [1.0], [1.0]]
numvar = len(bkx)
numcon = len(bkc)
NUMANZ = 3
# Append 'numcon' empty constraints.
# The constraints will initially have no bounds.
task.appendcons(numcon)
#Append 'numvar' variables.
# The variables will initially be fixed at zero (x=0).
task.appendvars(numvar)
#Optionally add a constant term to the objective.
task.putcfix(0.0)
for j in range(numvar):
# Set the linear term c_j in the objective.
task.putcj(j, c[j])
# Set the bounds on variable j
# blx[j] <= x_j <= bux[j]
task.putvarbound(j, bkx[j], blx[j], bux[j])
# Input column j of A
task.putacol(j, # Variable (column) index.
# Row index of non-zeros in column j.
asub[j],
aval[j]) # Non-zero Values of column j.
for i in range(numcon):
task.putconbound(i, bkc[i], blc[i], buc[i])
# Set up and input quadratic objective
qsubi = [0, 1, 2, 2]
qsubj = [0, 1, 0, 2]
qval = [2.0, 0.2, -1.0, 2.0]
task.putqobj(qsubi, qsubj, qval)
# The lower triangular part of the Q^0
# matrix in the first constraint is specified.
# This corresponds to adding the term
# - x0^2 - x1^2 - 0.1 x2^2 + 0.2 x0 x2
qsubi = [0, 1, 2, 2]
qsubj = [0, 1, 2, 0]
qval = [-2.0, -2.0, -0.2, 0.2]
# put Q^0 in constraint with index 0.
task.putqconk(0, qsubi, qsubj, qval)
# Input the objective sense (minimize/maximize)
task.putobjsense(mosek.objsense.minimize)
# Optimize the task
task.optimize()
# Print a summary containing information
# about the solution for debugging purposes
task.solutionsummary(mosek.streamtype.msg)
prosta = task.getprosta(mosek.soltype.itr)
solsta = task.getsolsta(mosek.soltype.itr)
# Output a solution
xx = [0.] * numvar
task.getxx(mosek.soltype.itr,
xx)
if solsta == mosek.solsta.optimal:
print("Optimal solution: %s" % xx)
elif solsta == mosek.solsta.dual_infeas_cer:
print("Primal or dual infeasibility.\n")
elif solsta == mosek.solsta.prim_infeas_cer:
print("Primal or dual infeasibility.\n")
elif mosek.solsta.unknown:
print("Unknown solution status")
else:
print("Other solution status")
# call the main function
try:
main()
except mosek.MosekException as e:
print("ERROR: %s" % str(e.errno))
print("\t%s" % e.msg)
sys.exit(1)
except:
import traceback
traceback.print_exc()
sys.exit(1)
| 31.427536
| 79
| 0.5181
|
a60efbeeacac9571a49618bc4c07a0676fd8261b
| 345
|
py
|
Python
|
stubs/micropython-esp8266-1_11/websocket_helper.py
|
RonaldHiemstra/micropython-stubs
|
d97f879b01f6687baaebef1c7e26a80909c3cff3
|
[
"MIT"
] | 38
|
2020-10-18T21:59:44.000Z
|
2022-03-17T03:03:28.000Z
|
stubs/micropython-esp8266-1_11/websocket_helper.py
|
RonaldHiemstra/micropython-stubs
|
d97f879b01f6687baaebef1c7e26a80909c3cff3
|
[
"MIT"
] | 176
|
2020-10-18T14:31:03.000Z
|
2022-03-30T23:22:39.000Z
|
stubs/micropython-esp8266-1_11/websocket_helper.py
|
RonaldHiemstra/micropython-stubs
|
d97f879b01f6687baaebef1c7e26a80909c3cff3
|
[
"MIT"
] | 6
|
2020-12-28T21:11:12.000Z
|
2022-02-06T04:07:50.000Z
|
"""
Module: 'websocket_helper' on esp8266 v1.11
"""
# MCU: (sysname='esp8266', nodename='esp8266', release='2.2.0-dev(9422289)', version='v1.11-8-g48dcbbe60 on 2019-05-29', machine='ESP module with ESP8266')
# Stubber: 1.1.0
DEBUG = 0
binascii = None
def client_handshake():
pass
hashlib = None
def server_handshake():
pass
sys = None
| 21.5625
| 155
| 0.689855
|
b2b8ee14a17a822ebb1f3dcae4b4a77efd7d590b
| 12,637
|
py
|
Python
|
discord/ui/select.py
|
brotherelric/deezcord.py
|
f7419bf2c67c2006702cccc4850cd9332bce00c6
|
[
"MIT"
] | null | null | null |
discord/ui/select.py
|
brotherelric/deezcord.py
|
f7419bf2c67c2006702cccc4850cd9332bce00c6
|
[
"MIT"
] | null | null | null |
discord/ui/select.py
|
brotherelric/deezcord.py
|
f7419bf2c67c2006702cccc4850cd9332bce00c6
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Copyright (c) 2021-present 404kuso
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import List, Optional, TYPE_CHECKING, Tuple, TypeVar, Type, Callable, Union
import inspect
import os
from .item import Item, ItemCallbackType
from ..enums import ComponentType
from ..partial_emoji import PartialEmoji
from ..emoji import Emoji
from ..interactions import Interaction
from ..utils import MISSING
from ..components import (
SelectOption,
SelectMenu,
)
__all__ = (
'Select',
'select',
)
if TYPE_CHECKING:
from .view import View
from ..types.components import SelectMenu as SelectMenuPayload
from ..types.interactions import (
ComponentInteractionData,
)
S = TypeVar('S', bound='Select')
V = TypeVar('V', bound='View', covariant=True)
class Select(Item[V]):
"""Represents a UI select menu.
This is usually represented as a drop down menu.
In order to get the selected items that the user has chosen, use :attr:`Select.values`.
.. versionadded:: 2.0
Parameters
------------
custom_id: :class:`str`
The ID of the select menu that gets received during an interaction.
If not given then one is generated for you.
placeholder: Optional[:class:`str`]
The placeholder text that is shown if nothing is selected, if any.
min_values: :class:`int`
The minimum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 1 and 25.
max_values: :class:`int`
The maximum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 1 and 25.
options: List[:class:`discord.SelectOption`]
A list of options that can be selected in this menu.
disabled: :class:`bool`
Whether the select is disabled or not.
row: Optional[:class:`int`]
The relative row this select menu belongs to. A Discord component can only have 5
rows. By default, items are arranged automatically into those 5 rows. If you'd
like to control the relative positioning of the row then passing an index is advised.
For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
ordering. The row number must be between 0 and 4 (i.e. zero indexed).
"""
__item_repr_attributes__: Tuple[str, ...] = (
'placeholder',
'min_values',
'max_values',
'options',
'disabled',
)
def __init__(
self,
*,
custom_id: str = MISSING,
placeholder: Optional[str] = None,
min_values: int = 1,
max_values: int = 1,
options: List[SelectOption] = MISSING,
disabled: bool = False,
row: Optional[int] = None,
) -> None:
super().__init__()
self._selected_values: List[str] = []
self._provided_custom_id = custom_id is not MISSING
custom_id = os.urandom(16).hex() if custom_id is MISSING else custom_id
options = [] if options is MISSING else options
self._underlying = SelectMenu._raw_construct(
custom_id=custom_id,
type=ComponentType.select,
placeholder=placeholder,
min_values=min_values,
max_values=max_values,
options=options,
disabled=disabled,
)
self.row = row
@property
def custom_id(self) -> str:
""":class:`str`: The ID of the select menu that gets received during an interaction."""
return self._underlying.custom_id
@custom_id.setter
def custom_id(self, value: str):
if not isinstance(value, str):
raise TypeError('custom_id must be None or str')
self._underlying.custom_id = value
@property
def placeholder(self) -> Optional[str]:
"""Optional[:class:`str`]: The placeholder text that is shown if nothing is selected, if any."""
return self._underlying.placeholder
@placeholder.setter
def placeholder(self, value: Optional[str]):
if value is not None and not isinstance(value, str):
raise TypeError('placeholder must be None or str')
self._underlying.placeholder = value
@property
def min_values(self) -> int:
""":class:`int`: The minimum number of items that must be chosen for this select menu."""
return self._underlying.min_values
@min_values.setter
def min_values(self, value: int):
self._underlying.min_values = int(value)
@property
def max_values(self) -> int:
""":class:`int`: The maximum number of items that must be chosen for this select menu."""
return self._underlying.max_values
@max_values.setter
def max_values(self, value: int):
self._underlying.max_values = int(value)
@property
def options(self) -> List[SelectOption]:
"""List[:class:`discord.SelectOption`]: A list of options that can be selected in this menu."""
return self._underlying.options
@options.setter
def options(self, value: List[SelectOption]):
if not isinstance(value, list):
raise TypeError('options must be a list of SelectOption')
if not all(isinstance(obj, SelectOption) for obj in value):
raise TypeError('all list items must subclass SelectOption')
self._underlying.options = value
def add_option(
self,
*,
label: str,
value: str = MISSING,
description: Optional[str] = None,
emoji: Optional[Union[str, Emoji, PartialEmoji]] = None,
default: bool = False,
):
"""Adds an option to the select menu.
To append a pre-existing :class:`discord.SelectOption` use the
:meth:`append_option` method instead.
Parameters
-----------
label: :class:`str`
The label of the option. This is displayed to users.
Can only be up to 100 characters.
value: :class:`str`
The value of the option. This is not displayed to users.
If not given, defaults to the label. Can only be up to 100 characters.
description: Optional[:class:`str`]
An additional description of the option, if any.
Can only be up to 100 characters.
emoji: Optional[Union[:class:`str`, :class:`.Emoji`, :class:`.PartialEmoji`]]
The emoji of the option, if available. This can either be a string representing
the custom or unicode emoji or an instance of :class:`.PartialEmoji` or :class:`.Emoji`.
default: :class:`bool`
Whether this option is selected by default.
Raises
-------
ValueError
The number of options exceeds 25.
"""
option = SelectOption(
label=label,
value=value,
description=description,
emoji=emoji,
default=default,
)
self.append_option(option)
def append_option(self, option: SelectOption):
"""Appends an option to the select menu.
Parameters
-----------
option: :class:`discord.SelectOption`
The option to append to the select menu.
Raises
-------
ValueError
The number of options exceeds 25.
"""
if len(self._underlying.options) > 25:
raise ValueError('maximum number of options already provided')
self._underlying.options.append(option)
@property
def disabled(self) -> bool:
""":class:`bool`: Whether the select is disabled or not."""
return self._underlying.disabled
@disabled.setter
def disabled(self, value: bool):
self._underlying.disabled = bool(value)
@property
def values(self) -> List[str]:
"""List[:class:`str`]: A list of values that have been selected by the user."""
return self._selected_values
@property
def width(self) -> int:
return 5
def to_component_dict(self) -> SelectMenuPayload:
return self._underlying.to_dict()
def refresh_component(self, component: SelectMenu) -> None:
self._underlying = component
def refresh_state(self, interaction: Interaction) -> None:
data: ComponentInteractionData = interaction.data # type: ignore
self._selected_values = data.get('values', [])
@classmethod
def from_component(cls: Type[S], component: SelectMenu) -> S:
return cls(
custom_id=component.custom_id,
placeholder=component.placeholder,
min_values=component.min_values,
max_values=component.max_values,
options=component.options,
disabled=component.disabled,
row=None,
)
@property
def type(self) -> ComponentType:
return self._underlying.type
def is_dispatchable(self) -> bool:
return True
def select(
*,
placeholder: Optional[str] = None,
custom_id: str = MISSING,
min_values: int = 1,
max_values: int = 1,
options: List[SelectOption] = MISSING,
disabled: bool = False,
row: Optional[int] = None,
) -> Callable[[ItemCallbackType], ItemCallbackType]:
"""A decorator that attaches a select menu to a component.
The function being decorated should have three parameters, ``self`` representing
the :class:`discord.ui.View`, the :class:`discord.ui.Select` being pressed and
the :class:`discord.Interaction` you receive.
In order to get the selected items that the user has chosen within the callback
use :attr:`Select.values`.
Parameters
------------
placeholder: Optional[:class:`str`]
The placeholder text that is shown if nothing is selected, if any.
custom_id: :class:`str`
The ID of the select menu that gets received during an interaction.
It is recommended not to set this parameter to prevent conflicts.
row: Optional[:class:`int`]
The relative row this select menu belongs to. A Discord component can only have 5
rows. By default, items are arranged automatically into those 5 rows. If you'd
like to control the relative positioning of the row then passing an index is advised.
For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
ordering. The row number must be between 0 and 4 (i.e. zero indexed).
min_values: :class:`int`
The minimum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 1 and 25.
max_values: :class:`int`
The maximum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 1 and 25.
options: List[:class:`discord.SelectOption`]
A list of options that can be selected in this menu.
disabled: :class:`bool`
Whether the select is disabled or not. Defaults to ``False``.
"""
def decorator(func: ItemCallbackType) -> ItemCallbackType:
if not inspect.iscoroutinefunction(func):
raise TypeError('select function must be a coroutine function')
func.__discord_ui_model_type__ = Select
func.__discord_ui_model_kwargs__ = {
'placeholder': placeholder,
'custom_id': custom_id,
'row': row,
'min_values': min_values,
'max_values': max_values,
'options': options,
'disabled': disabled,
}
return func
return decorator
| 35.200557
| 104
| 0.64968
|
d313ccef5a2797bd5e255a4ccfc3c31a6defe85b
| 4,355
|
py
|
Python
|
.circleci/regenerate.py
|
fabibo3/pytorch3d
|
36b7656753ae759aed2eb7ffb432b6eca4d42fe2
|
[
"BSD-3-Clause"
] | null | null | null |
.circleci/regenerate.py
|
fabibo3/pytorch3d
|
36b7656753ae759aed2eb7ffb432b6eca4d42fe2
|
[
"BSD-3-Clause"
] | null | null | null |
.circleci/regenerate.py
|
fabibo3/pytorch3d
|
36b7656753ae759aed2eb7ffb432b6eca4d42fe2
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
This script is adapted from the torchvision one.
"""
import os.path
import jinja2
import yaml
# The CUDA versions which have pytorch conda packages available for linux for each
# version of pytorch.
# Pytorch 1.4 also supports cuda 10.0 but we no longer build for cuda 10.0 at all.
CONDA_CUDA_VERSIONS = {
"1.4": ["cu92", "cu101"],
"1.5.0": ["cu92", "cu101", "cu102"],
"1.5.1": ["cu92", "cu101", "cu102"],
"1.6.0": ["cu92", "cu101", "cu102"],
"1.7.0": ["cu101", "cu102", "cu110"],
"1.7.1": ["cu101", "cu102", "cu110"],
"1.8.0": ["cu101", "cu102", "cu111"],
"1.8.1": ["cu101", "cu102", "cu111"],
"1.9.0": ["cu102", "cu111"],
}
def pytorch_versions_for_python(python_version):
if python_version in ["3.6", "3.7", "3.8"]:
return list(CONDA_CUDA_VERSIONS)
pytorch_without_py39 = ["1.4", "1.5.0", "1.5.1", "1.6.0", "1.7.0"]
return [i for i in CONDA_CUDA_VERSIONS if i not in pytorch_without_py39]
def workflows(prefix="", filter_branch=None, upload=False, indentation=6):
w = []
for btype in ["conda"]:
for python_version in ["3.6", "3.7", "3.8", "3.9"]:
for pytorch_version in pytorch_versions_for_python(python_version):
for cu_version in CONDA_CUDA_VERSIONS[pytorch_version]:
w += workflow_pair(
btype=btype,
python_version=python_version,
pytorch_version=pytorch_version,
cu_version=cu_version,
prefix=prefix,
upload=upload,
filter_branch=filter_branch,
)
return indent(indentation, w)
def workflow_pair(
*,
btype,
python_version,
pytorch_version,
cu_version,
prefix="",
upload=False,
filter_branch,
):
w = []
py = python_version.replace(".", "")
pyt = pytorch_version.replace(".", "")
base_workflow_name = f"{prefix}linux_{btype}_py{py}_{cu_version}_pyt{pyt}"
w.append(
generate_base_workflow(
base_workflow_name=base_workflow_name,
python_version=python_version,
pytorch_version=pytorch_version,
cu_version=cu_version,
btype=btype,
filter_branch=filter_branch,
)
)
if upload:
w.append(
generate_upload_workflow(
base_workflow_name=base_workflow_name,
btype=btype,
cu_version=cu_version,
filter_branch=filter_branch,
)
)
return w
def generate_base_workflow(
*,
base_workflow_name,
python_version,
cu_version,
pytorch_version,
btype,
filter_branch=None,
):
d = {
"name": base_workflow_name,
"python_version": python_version,
"cu_version": cu_version,
"pytorch_version": pytorch_version,
"context": "DOCKERHUB_TOKEN",
}
if filter_branch is not None:
d["filters"] = {"branches": {"only": filter_branch}}
return {f"binary_linux_{btype}": d}
def generate_upload_workflow(*, base_workflow_name, btype, cu_version, filter_branch):
d = {
"name": f"{base_workflow_name}_upload",
"context": "org-member",
"requires": [base_workflow_name],
}
if btype == "wheel":
d["subfolder"] = cu_version + "/"
if filter_branch is not None:
d["filters"] = {"branches": {"only": filter_branch}}
return {f"binary_{btype}_upload": d}
def indent(indentation, data_list):
if len(data_list) == 0:
return ""
return ("\n" + " " * indentation).join(
yaml.dump(data_list, default_flow_style=False).splitlines()
)
if __name__ == "__main__":
d = os.path.dirname(__file__)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(d),
lstrip_blocks=True,
autoescape=False,
keep_trailing_newline=True,
)
with open(os.path.join(d, "config.yml"), "w") as f:
f.write(env.get_template("config.in.yml").render(workflows=workflows))
| 27.389937
| 86
| 0.591963
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.