hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9afbc58c35485195590c0111ab875fa7190d1ec1
| 621
|
py
|
Python
|
kesko_webapp/models.py
|
kounelisagis/kesko-food-waste-hackathon
|
6b66806aeaf4fc72ea96e47f152cd4bbd8b5a43d
|
[
"MIT"
] | 1
|
2019-12-29T16:16:54.000Z
|
2019-12-29T16:16:54.000Z
|
kesko_webapp/models.py
|
kounelisagis/kesko-food-waste-hackathon
|
6b66806aeaf4fc72ea96e47f152cd4bbd8b5a43d
|
[
"MIT"
] | 14
|
2019-11-16T18:27:51.000Z
|
2022-02-26T20:17:01.000Z
|
kesko_webapp/models.py
|
kounelisagis/kesko-food-waste-hackathon
|
6b66806aeaf4fc72ea96e47f152cd4bbd8b5a43d
|
[
"MIT"
] | 8
|
2019-11-15T20:27:32.000Z
|
2020-08-26T16:21:48.000Z
|
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
pass
class Article(models.Model):
"""
Models available articles
"""
name = models.CharField(max_length=200)
serial_id = models.CharField(max_length=200)
class ArticlePurchase(models.Model):
"""
Models users purchases.
"""
article = models.ForeignKey(Article, on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
amount = models.IntegerField()
date = models.DateTimeField()
| 23.884615
| 80
| 0.727858
| 498
| 0.801932
| 0
| 0
| 0
| 0
| 0
| 0
| 80
| 0.128824
|
9afd4d7170021441a6b8eb952c84d874debdddcf
| 5,925
|
py
|
Python
|
source/prosumer.py
|
gus0k/LEMsim
|
a008a2d25d1de9d5d07706ebeaaaa402bee97bef
|
[
"Apache-2.0"
] | null | null | null |
source/prosumer.py
|
gus0k/LEMsim
|
a008a2d25d1de9d5d07706ebeaaaa402bee97bef
|
[
"Apache-2.0"
] | null | null | null |
source/prosumer.py
|
gus0k/LEMsim
|
a008a2d25d1de9d5d07706ebeaaaa402bee97bef
|
[
"Apache-2.0"
] | null | null | null |
"""
Prosumer class, extendes the battery controler
"""
import numpy as np
from source.batterycontroller import BatteryController
class Prosumer(BatteryController):
def __init__(self, owner_id, b_max, b_min, eff_c, eff_d, d_max, d_min, price_buy, price_sell, load, expected_price_buy, expected_price_sell, update_type, horizon):
super().__init__(owner_id, b_max, b_min, eff_c, eff_d, d_max, d_min)
self.price_buy = price_buy.copy()
self.horizon = horizon
self.expected_price_buy = expected_price_buy.copy()
self.price_sell = price_sell.copy()
self.expected_price_sell = expected_price_sell.copy()
self.load = load.copy()
self.time = -1
self.incurred_costs = np.zeros(len(load))
self.consumed_energy = np.zeros(len(load))
self.incurred_costs_wm = np.zeros(len(load))
self.consumed_energy_wm = np.zeros(len(load))
self.update_type = update_type
self.pre_market = []
self.post_market = []
## Solve default usage
#xs = self.find_optimal_step(load, price_buy, price_sell, None)
#xs = np.array([x / self.eff_c if x > 0 else x * self.eff_d for x in xs])
#self.profile_only_battery = xs + load #* self.resolution
# self.cost_only_battery = xs[1]
cons = []
for t in range(len(load)):
xs = self.find_optimal_step(load[t:], price_buy[t:], price_sell[t:], None)
self.update_charge(xs[0])
cons.append(xs[0] / eff_c if xs[0] > 0 else xs[0] * eff_d)
self.profile_only_battery = np.array(cons) + load
self.reset()
def estimate_consumption(self):
"""
Estimate the consumption of the next timeslot
Returns:
q: quantity wanted to be traded in the market
p: price at which it would normall be traded
"""
self.time += 1
t = self.time
load = self.load[t:].copy()
pb = self.expected_price_buy[t:].copy()
ps = self.expected_price_sell[t:].copy()
print(pb, ps)
xs = self.find_optimal_step(load, pb, ps, None) # battery usage
# Convert from battery usage to energy seen from outside
q = xs[0] / self.eff_c if xs[0] > 0 else xs[0] * self.eff_d
q += load[0] #* self.resolution
p = pb[0] if q > 0 else ps[0]
self.incurred_costs_wm[t] = q * p
self.consumed_energy_wm[t] = q
#print(q)
self.pre_market.append([pb.copy(), ps.copy(), self.charge, xs[0]])
return q, p
def update_expected_price(self, tq, tp, pb, ps, cost, q):
"""
Depending on the selected strategy, updates the expected
trading price accordingly
Parameters
-----------
tq: traded quantity
tp: traded price
cp: clearing price, can be None
cost: what was paid
q: the quantity bought or sold
"""
t = self.time
ut = self.update_type
new_sp = self.expected_price_sell[t + 1]
new_bp = self.expected_price_buy[t + 1]
if ut == 'donothing':
pass
elif ut == 'lastcp':
new_sp = ps if ps is not None else self.expected_price_sell[t + 1]
new_bp = pb if pb is not None else self.expected_price_buy[t + 1]
elif ut == 'lastpaid':
tmp = np.abs(cost / q) if not np.allclose(q, 0) else 0
if q < 0:
if tmp > new_bp:
new_sp = new_bp
else:
new_sp = tmp
else:
if tmp < new_sp:
new_bp = new_sp
else:
new_bp = tmp
if self.horizon:
print('aca me rompí')
self.expected_price_buy[t + 1:] = new_bp
self.expected_price_sell[t + 1:] = new_sp
else:
print('entré aca')
self.expected_price_buy[t + 1] = new_bp
self.expected_price_sell[t + 1] = new_sp
def process_market_results(self, traded_quantity, traded_price, pb_, ps_):
"""
Process the market result and takes the appropiate
action to move forward
Params:
traded_quantity, amount of energy that was finally traded
in the market
traded_price: price at which it was traded.
"""
#print('entre', self.owner_id)
t = self.time
load = self.load[t:].copy()
pb = self.expected_price_buy[t:].copy()
ps = self.expected_price_sell[t:].copy()
pb[0] = self.price_buy[t]
ps[0] = self.price_sell[t]
#print(self.profile_only_battery[t] - self.load[t], traded_quantity)
# Respect the quantity to be traded in the market
commitment = traded_quantity if not np.allclose(traded_quantity, 0, atol=1e-5) else None
#print(commitment)
#print(self.charge)
xs = self.find_optimal_step(load, pb, ps, commitment) # battery usage
xf = xs[0]
self.post_market.append([pb.copy(), ps.copy(), commitment, self.charge, xf])
# Update the battery with the new action
self.update_charge(xf)
q = xf / self.eff_c if xf > 0 else xf * self.eff_d
#print(q, xf)
#print(q, load[0])
q += load[0] #* self.resolution
p = pb[0] if q > 0 else ps[0]
#print(q, '--------\n')
#print(traded_quantity, commitment, q)
print(traded_quantity, traded_price)
print('second part', pb, ps, self.price_buy, self.price_sell)
cost = traded_quantity * traded_price + (q - traded_quantity) * p
if q > 0: print(cost / q)
self.incurred_costs[t] = cost
self.consumed_energy[t] = q
if t < len(self.load) - 1:
self.update_expected_price(traded_quantity, traded_price, pb_, ps_, cost, q)
| 37.738854
| 167
| 0.575021
| 5,796
| 0.977898
| 0
| 0
| 0
| 0
| 0
| 0
| 1,619
| 0.273157
|
9afd605d71b6ed6dddc10236ff2ea972b58f32f8
| 1,630
|
py
|
Python
|
tests/calculations/test_inner_goals_regression.py
|
frc1678/server-2021-public
|
d61e35f8385bf1debc9daaaed40208f6c783ed77
|
[
"MIT"
] | null | null | null |
tests/calculations/test_inner_goals_regression.py
|
frc1678/server-2021-public
|
d61e35f8385bf1debc9daaaed40208f6c783ed77
|
[
"MIT"
] | null | null | null |
tests/calculations/test_inner_goals_regression.py
|
frc1678/server-2021-public
|
d61e35f8385bf1debc9daaaed40208f6c783ed77
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2019 FRC Team 1678: Citrus Circuits
import pytest
import numpy as np
import os, sys
current_directory = os.path.dirname(os.path.realpath(__file__))
parent_directory = os.path.dirname(current_directory)
grandparent_directory = os.path.dirname(parent_directory)
sys.path.append(grandparent_directory)
from calculations import inner_goals_regression
def test_dimension_mismatch_error():
b = np.array([[1, 6, 7, 8]]).T
A = np.array([[2, 5, 4], [6, 9, 4], [9, 7, 3]])
with pytest.raises(ValueError):
inner_goals_regression.least_squares(A, b)
def test_singular_matrix_error():
b = np.array([[1, 6, 7, 8]]).T
A = np.array([[2, 5, 4], [6, 9, 4], [0, 0, 0], [0, 0, 0]])
with pytest.raises(ValueError):
inner_goals_regression.least_squares(A, b)
def test_no_cap():
b = np.array([[0, 1, 2]]).T
A = np.array([[0, 0], [1, 0], [1, -1]])
expected_result = np.array([[1], [-1]])
assert (expected_result == inner_goals_regression.least_squares(A, b)).all()
def test_capped():
b = np.array([[0, 1, 2]]).T
A = np.array([[0, 0], [1, 0], [1, -1]])
actual_result = inner_goals_regression.least_squares(A, b, cap_0_to_1=True)
expected_result = np.array([[1], [0]])
assert (abs(actual_result - expected_result) < 0.01).all()
def test_monte_carlo_accuracy():
b = np.array([[16, 78, 10]]).T
A = np.array([[5, 1], [25, 3], [3, 1.001]])
actual_result = inner_goals_regression.least_squares(A, b, cap_0_to_1=True)
expected_result = np.array([[1], [1]])
assert (abs(actual_result - expected_result) < 0.01).all()
| 33.265306
| 80
| 0.648466
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 73
| 0.044785
|
9afec172d7c5d85ad984f002f65f8f198cc1e65d
| 13,758
|
py
|
Python
|
trove/tests/unittests/taskmanager/test_galera_clusters.py
|
a4913994/openstack_trove
|
3b550048dd1e5841ad0f3295679e0f0b913a5687
|
[
"Apache-2.0"
] | 244
|
2015-01-01T12:04:44.000Z
|
2022-03-25T23:38:39.000Z
|
trove/tests/unittests/taskmanager/test_galera_clusters.py
|
a4913994/openstack_trove
|
3b550048dd1e5841ad0f3295679e0f0b913a5687
|
[
"Apache-2.0"
] | 6
|
2015-08-18T08:19:10.000Z
|
2022-03-05T02:32:36.000Z
|
trove/tests/unittests/taskmanager/test_galera_clusters.py
|
a4913994/openstack_trove
|
3b550048dd1e5841ad0f3295679e0f0b913a5687
|
[
"Apache-2.0"
] | 178
|
2015-01-02T15:16:58.000Z
|
2022-03-23T03:30:20.000Z
|
# Copyright [2015] Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from unittest.mock import Mock
from unittest.mock import patch
from trove.cluster.models import ClusterTasks as ClusterTaskStatus
from trove.cluster.models import DBCluster
from trove.common.exception import GuestError
from trove.common.strategies.cluster.experimental.galera_common.taskmanager \
import GaleraCommonClusterTasks
from trove.common.strategies.cluster.experimental.galera_common.taskmanager \
import GaleraCommonTaskManagerStrategy
from trove.datastore import models as datastore_models
from trove.instance.models import BaseInstance
from trove.instance.models import DBInstance
from trove.instance.models import Instance
from trove.instance.models import InstanceServiceStatus
from trove.instance.models import InstanceTasks
from trove.instance.service_status import ServiceStatuses
from trove.tests.unittests import trove_testtools
from trove.tests.unittests.util import util
class GaleraClusterTasksTest(trove_testtools.TestCase):
def setUp(self):
super(GaleraClusterTasksTest, self).setUp()
util.init_db()
self.cluster_id = "1232"
self.cluster_name = "Cluster-1234"
self.tenant_id = "6789"
self.db_cluster = DBCluster(ClusterTaskStatus.NONE,
id=self.cluster_id,
created=str(datetime.date),
updated=str(datetime.date),
name=self.cluster_name,
task_id=ClusterTaskStatus.NONE._code,
tenant_id=self.tenant_id,
datastore_version_id="1",
deleted=False)
self.dbinst1 = DBInstance(InstanceTasks.NONE, id="1", name="member1",
compute_instance_id="compute-1",
task_id=InstanceTasks.NONE._code,
task_description=InstanceTasks.NONE._db_text,
volume_id="volume-1",
datastore_version_id="1",
cluster_id=self.cluster_id,
type="member")
self.dbinst2 = DBInstance(InstanceTasks.NONE, id="2", name="member2",
compute_instance_id="compute-2",
task_id=InstanceTasks.NONE._code,
task_description=InstanceTasks.NONE._db_text,
volume_id="volume-2",
datastore_version_id="1",
cluster_id=self.cluster_id,
type="member")
self.dbinst3 = DBInstance(InstanceTasks.NONE, id="3", name="member3",
compute_instance_id="compute-3",
task_id=InstanceTasks.NONE._code,
task_description=InstanceTasks.NONE._db_text,
volume_id="volume-3",
datastore_version_id="1",
cluster_id=self.cluster_id,
type="member")
mock_ds1 = Mock()
mock_ds1.name = 'pxc'
mock_dv1 = Mock()
mock_dv1.name = '7.1'
self.clustertasks = GaleraCommonClusterTasks(
Mock(), self.db_cluster, datastore=mock_ds1,
datastore_version=mock_dv1)
self.cluster_context = {
'replication_user': {
'name': "name",
'password': "password",
},
'cluster_name': self.cluster_name,
'admin_password': "admin_password"
}
@patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure')
@patch.object(DBInstance, 'find_by')
@patch.object(InstanceServiceStatus, 'find_by')
@patch('trove.taskmanager.models.LOG')
def test_all_instances_ready_with_server_error(self,
mock_logging, mock_find,
mock_db_find, mock_update):
(mock_find.return_value.
get_status.return_value) = ServiceStatuses.NEW
(mock_db_find.return_value.
get_task_status.return_value) = InstanceTasks.BUILDING_ERROR_SERVER
ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"],
self.cluster_id)
mock_update.assert_called_with(self.cluster_id, None)
self.assertFalse(ret_val)
@patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure')
@patch.object(DBInstance, 'find_by')
@patch.object(InstanceServiceStatus, 'find_by')
@patch('trove.taskmanager.models.LOG')
def test_all_instances_ready_bad_status(self, mock_logging,
mock_find, mock_db_find,
mock_update):
(mock_find.return_value.
get_status.return_value) = ServiceStatuses.FAILED
(mock_db_find.return_value.
get_task_status.return_value) = InstanceTasks.NONE
ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"],
self.cluster_id)
mock_update.assert_called_with(self.cluster_id, None)
self.assertFalse(ret_val)
@patch.object(DBInstance, 'find_by')
@patch.object(InstanceServiceStatus, 'find_by')
def test_all_instances_ready(self, mock_find, mock_db_find):
(mock_find.return_value.
get_status.return_value) = ServiceStatuses.INSTANCE_READY
(mock_db_find.return_value.
get_task_status.return_value) = InstanceTasks.NONE
ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"],
self.cluster_id)
self.assertTrue(ret_val)
@patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure')
@patch.object(GaleraCommonClusterTasks, '_all_instances_ready',
return_value=False)
@patch.object(Instance, 'load')
@patch.object(DBInstance, 'find_all')
@patch.object(datastore_models.Datastore, 'load')
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
@patch('trove.common.strategies.cluster.experimental.galera_common.'
'taskmanager.LOG')
def test_create_cluster_instance_not_ready(self, mock_logging, mock_dv,
mock_ds, mock_find_all,
mock_load, mock_ready,
mock_update):
mock_find_all.return_value.all.return_value = [self.dbinst1]
mock_load.return_value = BaseInstance(Mock(),
self.dbinst1, Mock(),
InstanceServiceStatus(
ServiceStatuses.NEW))
self.clustertasks.create_cluster(Mock(), self.cluster_id)
mock_update.assert_called_with(self.cluster_id)
@patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure')
@patch.object(GaleraCommonClusterTasks, 'reset_task')
@patch.object(GaleraCommonClusterTasks, 'get_ip')
@patch.object(GaleraCommonClusterTasks, '_all_instances_ready')
@patch.object(Instance, 'load')
@patch.object(DBInstance, 'find_all')
@patch.object(datastore_models.Datastore, 'load')
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
@patch('trove.common.strategies.cluster.experimental.galera_common.'
'taskmanager.LOG')
def test_create_cluster_fail(self, mock_logging, mock_dv, mock_ds,
mock_find_all, mock_load, mock_ready, mock_ip,
mock_reset_task, mock_update_status):
mock_find_all.return_value.all.return_value = [self.dbinst1]
mock_load.return_value = BaseInstance(Mock(),
self.dbinst1, Mock(),
InstanceServiceStatus(
ServiceStatuses.NEW))
mock_ip.return_value = "10.0.0.2"
guest_client = Mock()
guest_client.install_cluster = Mock(side_effect=GuestError("Error"))
with patch.object(GaleraCommonClusterTasks, 'get_guest',
return_value=guest_client):
self.clustertasks.create_cluster(Mock(), self.cluster_id)
mock_update_status.assert_called_with('1232')
mock_reset_task.assert_called_with()
@patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure')
@patch('trove.common.strategies.cluster.experimental.galera_common.'
'taskmanager.LOG')
def test_grow_cluster_does_not_exist(self, mock_logging,
mock_update_status):
context = Mock()
bad_cluster_id = '1234'
new_instances = [Mock(), Mock()]
self.clustertasks.grow_cluster(context, bad_cluster_id, new_instances)
mock_update_status.assert_called_with(
'1234',
status=InstanceTasks.GROWING_ERROR)
@patch.object(GaleraCommonClusterTasks, '_check_cluster_for_root')
@patch.object(GaleraCommonClusterTasks, 'reset_task')
@patch.object(GaleraCommonClusterTasks, '_render_cluster_config')
@patch.object(GaleraCommonClusterTasks, 'get_ip')
@patch.object(GaleraCommonClusterTasks, 'get_guest')
@patch.object(GaleraCommonClusterTasks, '_all_instances_ready',
return_value=True)
@patch.object(Instance, 'load')
@patch.object(DBInstance, 'find_all')
@patch.object(datastore_models.Datastore, 'load')
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
def test_grow_cluster_successs(self, mock_dv, mock_ds, mock_find_all,
mock_load, mock_ready, mock_guest, mock_ip,
mock_render, mock_reset_task,
mock_check_root):
mock_find_all.return_value.all.return_value = [self.dbinst1]
mock_ip.return_value = "10.0.0.2"
context = Mock()
new_instances = [Mock(), Mock()]
mock_guest.get_cluster_context = Mock(
return_value=self.cluster_context)
mock_guest.reset_admin_password = Mock()
self.clustertasks.grow_cluster(context, self.cluster_id,
new_instances)
mock_reset_task.assert_called_with()
@patch.object(GaleraCommonClusterTasks, 'reset_task')
@patch.object(Instance, 'load')
@patch.object(Instance, 'delete')
@patch.object(DBInstance, 'find_all')
@patch.object(GaleraCommonClusterTasks, 'get_guest')
@patch.object(GaleraCommonClusterTasks, 'get_ip')
@patch.object(GaleraCommonClusterTasks, '_render_cluster_config')
def test_shrink_cluster_success(self, mock_render, mock_ip, mock_guest,
mock_find_all, mock_delete, mock_load,
mock_reset_task):
mock_find_all.return_value.all.return_value = [self.dbinst1]
context = Mock()
remove_instances = [Mock()]
mock_ip.return_value = "10.0.0.2"
mock_guest.get_cluster_context = Mock(
return_value=self.cluster_context)
self.clustertasks.shrink_cluster(context, self.cluster_id,
remove_instances)
mock_reset_task.assert_called_with()
@patch.object(Instance, 'load')
@patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure')
@patch('trove.common.strategies.cluster.experimental.galera_common.'
'taskmanager.LOG')
def test_shrink_cluster_does_not_exist(self, mock_logging,
mock_update_status,
mock_load):
context = Mock()
bad_cluster_id = '1234'
remove_instances = [Mock()]
self.clustertasks.shrink_cluster(context, bad_cluster_id,
remove_instances)
mock_update_status.assert_called_with(
'1234',
status=InstanceTasks.SHRINKING_ERROR)
class GaleraTaskManagerStrategyTest(trove_testtools.TestCase):
def test_task_manager_cluster_tasks_class(self):
strategy = GaleraCommonTaskManagerStrategy()
self.assertFalse(
hasattr(strategy.task_manager_cluster_tasks_class,
'rebuild_cluster'))
self.assertTrue(callable(
strategy.task_manager_cluster_tasks_class.create_cluster))
def test_task_manager_api_class(self):
strategy = GaleraCommonTaskManagerStrategy()
self.assertFalse(hasattr(strategy.task_manager_api_class,
'add_new_node'))
| 50.029091
| 79
| 0.611353
| 12,232
| 0.889083
| 0
| 0
| 8,669
| 0.630106
| 0
| 0
| 1,954
| 0.142026
|
9afeccca8e9baead9183ce3029a46c08b65bc934
| 3,814
|
py
|
Python
|
AStyleTest/file-py/locale_enum_i18n.py
|
a-w/astyle
|
8225c7fc9b65162bdd958cabb87eedd9749f1ecd
|
[
"MIT"
] | null | null | null |
AStyleTest/file-py/locale_enum_i18n.py
|
a-w/astyle
|
8225c7fc9b65162bdd958cabb87eedd9749f1ecd
|
[
"MIT"
] | null | null | null |
AStyleTest/file-py/locale_enum_i18n.py
|
a-w/astyle
|
8225c7fc9b65162bdd958cabb87eedd9749f1ecd
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python
""" Enumerate selected locales and sort by codepage to determine
which languages the locales support.
"""
# to disable the print statement and use the print() function (version 3 format)
from __future__ import print_function
import libastyle # local directory
import locale
import os
import platform
import sys
# -----------------------------------------------------------------------------
def main():
"""Main processing function.
"""
if os.name != "nt":
libastyle.system_exit("This script is for Windows only!")
if platform.python_implementation() == "IronPython":
libastyle.system_exit("IronPython is not currently supported")
libastyle.set_text_color("yellow")
print(libastyle.get_python_version())
languages = (
# "chinese", # returns chinese-simplified
"chinese-simplified",
"chinese-traditional",
"czech",
"danish",
"dutch",
"belgian",
"english",
"finnish",
"french",
"german",
"greek",
"hungarian",
"icelandic",
"italian",
"japanese",
"korean",
"norwegian",
"polish",
"portuguese",
"russian",
"slovak",
"spanish",
"swedish",
"turkish",
)
# build list of locale names
locale_names = []
for language in languages:
# print language
try:
locale.setlocale(locale.LC_ALL, language)
except locale.Error:
print("unsupported locale: " + language)
# print(locale.getlocale(locale.LC_CTYPE))
locale_name = locale.setlocale(locale.LC_ALL, None)
locale_names.append(locale_name)
# sort the list of locale names
# the call changed with version 3
if sys.version_info[0] < 3:
locale_names.sort(sort_compare)
else:
locale_names.sort(key=get_codepage)
# print the list of locale names
prevoius_codepage = 0
total1252 = 0
for locale_name in locale_names:
codepage = get_codepage(locale_name)
if codepage == "1252":
total1252 += 1
if codepage != prevoius_codepage:
if prevoius_codepage == "1252":
print("1252 TOTAL " + str(total1252))
print()
prevoius_codepage = codepage
print(codepage + ' ' + locale_name)
# -----------------------------------------------------------------------------
def sort_compare(locale_name1, locale_name2):
"""Sort comparison function.
Not used by version 3.
"""
# get codepage from the locale
codepage1 = get_codepage(locale_name1)
codepage2 = get_codepage(locale_name2)
# then sort by codepage
if codepage1 < codepage2:
return -1
if codepage1 > codepage2:
return 1
# codepage is equal, sort by name
if locale_name1 < locale_name2:
return -1
return 1
# -----------------------------------------------------------------------------
def get_codepage(locale_name):
"""Extract codepage from the locale name.
"""
# extract codepage
codepage_sep = locale_name.rfind('.')
if codepage_sep == -1:
codepage = "0"
else:
codepage = locale_name[codepage_sep + 1:]
# if less than 4 bytes prefix with a zero
if len(codepage) == 3:
codepage = '0' + codepage
return codepage
# -----------------------------------------------------------------------------
# make the module executable
if __name__ == "__main__":
main()
libastyle.system_exit()
# -----------------------------------------------------------------------------
| 28.893939
| 80
| 0.522811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,589
| 0.416623
|
9aff6921a655770822f92c25247b7dfa80a21333
| 2,521
|
py
|
Python
|
src/Coord_cmd.py
|
aembillo/MNWellRecordGui
|
1683bdde75ff37a17726ce1cd7ba0135988f2992
|
[
"BSD-3-Clause"
] | null | null | null |
src/Coord_cmd.py
|
aembillo/MNWellRecordGui
|
1683bdde75ff37a17726ce1cd7ba0135988f2992
|
[
"BSD-3-Clause"
] | null | null | null |
src/Coord_cmd.py
|
aembillo/MNWellRecordGui
|
1683bdde75ff37a17726ce1cd7ba0135988f2992
|
[
"BSD-3-Clause"
] | null | null | null |
""" 2015-07-23
Perform coordinate conversions from the command line.
Uses
"""
import argparse
import pyperclip
# p1 = argparse.ArgumentParser()
# p1.add_argument('x')
# print p1.parse_args(['123'])
#
# p2 = argparse.ArgumentParser()
# p2.add_argument('-d', action='store_const',const='dak')
# print p2.parse_args(['-d'])
#
# p3 = argparse.ArgumentParser()
# p3.add_argument('-d', action='store_const',const='dak')
# p3.add_argument('x')
# p3.add_argument('y')
# print p3.parse_args(['-d','1','2'])
#p1.add_argument(
from Coordinate_Transform import DCcoordinate_projector
# #
# # parser = argparse.ArgumentParser()
# # parser.add_argument("coord_1")
# # parser.add_argument("coord_2")
# # args = parser.parse_args()
# # x,y = args.coord_1, args.coord_2
#
def coord_convert():
parser = argparse.ArgumentParser()
parser.add_argument('-d','--dak', action='store_const', const='dak', help="return Dakota County coords on clipboard")
parser.add_argument('-u','--utm', action='store_const', const='utm', help="return UTM NAD 83, Zone 15 coords on clipboard")
parser.add_argument('x')
parser.add_argument('y')
args = parser.parse_args()
print 'args=',args
coordtext = '%s,%s'%( args.x, args.y)
Cprojector = DCcoordinate_projector()
cliptext = Cprojector.handle_unspecified_coords(coordtext)
#print outtext
try:
if args.dak:
cliptext = '%4.2f,%4.2f'%(Cprojector.dakx,Cprojector.daky)
#print 'returning dakx,daky to clipboard "%s"'%cliptext
elif args.utm:
cliptext = '%4.2f,%4.2f'%(Cprojector.utmx,Cprojector.utmy)
#print 'returning utmx,utmy to clipboard "%s"'%cliptext
except:
pass
pyperclip.copy(cliptext)
pyperclip.paste()
return cliptext
def test_parse_args():
import sys
sys.argv = ["prog", '-d', "93.0444", "44.5926"]
rv = coord_convert()
print '>>\n'+ str(rv) +'\n================'
sys.argv = ["prog", '--utm', "93.0444", "44.5926"]
rv = coord_convert()
print '>>\n'+ str(rv) +'\n================'
if __name__ == '__main__':
#test_parse_args()
coord_convert()
'''
ERROR coordinates not recognized or not within Dakota County
"570931,1441"
496475.91,4937695.85
Dakota Co: 570931, 144108
Dakota Co: 570931.0, 144108.0
UTM : 496475.91, 4937695.85
D.d : -93.044399765, 44.592598646
D M.m : -93 2.663986, 44 35.555919
D M S.s : -93 2 39.839", 44 35 33.355"'''
| 28.647727
| 127
| 0.623165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,454
| 0.576755
|
9aff8c7e14210fed3124a5e6c2fdfe6fc51837d4
| 58
|
py
|
Python
|
contest/abc106/A.py
|
mola1129/atcoder
|
1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db
|
[
"MIT"
] | null | null | null |
contest/abc106/A.py
|
mola1129/atcoder
|
1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db
|
[
"MIT"
] | null | null | null |
contest/abc106/A.py
|
mola1129/atcoder
|
1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db
|
[
"MIT"
] | null | null | null |
A, B = map(int, input().split())
print((A - 1) * (B - 1))
| 19.333333
| 32
| 0.465517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b1011b3a30f3ce240dd73397c6dc7062b1511e60
| 774
|
py
|
Python
|
pythonmisc/string_manipulation.py
|
davikawasaki/python-misc-module-library
|
c66b3e8be09db741c3b62d3a4e4a92ce70e1edb5
|
[
"MIT"
] | null | null | null |
pythonmisc/string_manipulation.py
|
davikawasaki/python-misc-module-library
|
c66b3e8be09db741c3b62d3a4e4a92ce70e1edb5
|
[
"MIT"
] | null | null | null |
pythonmisc/string_manipulation.py
|
davikawasaki/python-misc-module-library
|
c66b3e8be09db741c3b62d3a4e4a92ce70e1edb5
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# Version: 0.1.1
import re
def convert_ws_header_vb_attributes(df):
output_txt = ""
for key in df.keys():
variant_array = "\'Dim "
i = 0
for word in [w.capitalize().replace('\t', '') for w in str(key).lower().split("(")[0].split(" ")]:
if i == 0:
word = word.lower()
if word != "" and word != "-":
variant_array += word
i += 1
variant_array = variant_array.rstrip()
variant_array += "() As Variant\n"
output_txt += variant_array
return output_txt
def remove_special_characters(text):
_vec = ''.join(re.split(r'[^a-zA-Z]', text)).split()
if len(_vec) == 1:
return _vec[0]
else:
return text
| 24.967742
| 106
| 0.524548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 96
| 0.124031
|
b101387aab58adbece7fb5e7de6f69fdf986d8dd
| 6,979
|
py
|
Python
|
ALLCools/clustering/incremental_pca.py
|
mukamel-lab/ALLCools
|
756ef790665c6ce40633873211929ea92bcccc21
|
[
"MIT"
] | 5
|
2019-07-16T17:27:15.000Z
|
2022-01-14T19:12:27.000Z
|
ALLCools/clustering/incremental_pca.py
|
mukamel-lab/ALLCools
|
756ef790665c6ce40633873211929ea92bcccc21
|
[
"MIT"
] | 12
|
2019-10-17T19:34:43.000Z
|
2022-03-23T16:04:18.000Z
|
ALLCools/clustering/incremental_pca.py
|
mukamel-lab/ALLCools
|
756ef790665c6ce40633873211929ea92bcccc21
|
[
"MIT"
] | 4
|
2019-10-18T23:43:48.000Z
|
2022-02-12T04:12:26.000Z
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import IncrementalPCA as _IncrementalPCA
from ..count_matrix.zarr import dataset_to_array
def _normalize_per_cell(matrix, cell_sum):
print('normalize per cell to CPM')
if cell_sum is None:
norm_vec = (matrix.sum(axis=1) + 1) / 1000000
else:
norm_vec = cell_sum / 1000000
norm_vec = norm_vec.values
norm_vec = norm_vec.astype(np.float32)
matrix /= norm_vec[:, None]
return matrix
class IncrementalPCA:
def __init__(self, n_components=100, sparse=False, normalize_per_cell=True, log1p=True, scale=True, **kwargs):
self.pca = _IncrementalPCA(n_components=n_components, **kwargs)
self.sparse = sparse
self.normalize_per_cell = normalize_per_cell
self.log1p = log1p
self.scale = scale
self.scaler = None
self.cell_sum = None
self.use_features = None
self.obs_dim = None
self.var_dim = None
self.load_chunk = None
self._fit = False
return
def fit(self,
ds,
use_cells=None,
use_features=None,
chunk=500000,
cell_sum=None,
var_dim='gene',
obs_dim='cell',
load_chunk=None,
random_shuffle=True):
self.cell_sum = cell_sum
self.use_features = use_features
self.obs_dim = obs_dim
self.var_dim = var_dim
self.load_chunk = chunk if load_chunk is None else load_chunk
# prepare index
cell_index = ds.get_index(obs_dim)
if use_cells is not None:
cell_index = cell_index[cell_index.isin(use_cells)].copy()
# random shuffle to make fitting more stable
if random_shuffle:
cell_order = cell_index.tolist()
np.random.shuffle(cell_order)
cell_order = pd.Index(cell_order)
else:
cell_order = cell_index
# fit by chunks
chunk_stds = []
chunk_means = []
for chunk_start in range(0, cell_order.size, chunk):
print(f'Fitting {chunk_start}-{chunk_start + chunk}')
_chunk_cells = cell_order[chunk_start:chunk_start + chunk]
_chunk_matrix, _chunk_cells, _chunk_genes = dataset_to_array(
ds,
use_cells=_chunk_cells,
use_genes=use_features,
sparse=self.sparse,
obs_dim=obs_dim,
var_dim=var_dim,
chunk=self.load_chunk)
if cell_sum is not None:
_chunk_cell_sum = cell_sum.loc[_chunk_cells]
else:
_chunk_cell_sum = None
_chunk_matrix = _chunk_matrix.astype(np.float32)
# normalize cell counts
if self.normalize_per_cell:
_chunk_matrix = _normalize_per_cell(matrix=_chunk_matrix,
cell_sum=_chunk_cell_sum)
# log transfer
if self.log1p:
print('log1p transform')
_chunk_matrix = np.log1p(_chunk_matrix)
# scale
if self.scale:
print('Scale')
if self.scaler is None:
# assume the chunk is large enough, so only use the first chunk to fit
# e.g., 5,000,000 cells
self.scaler = StandardScaler(with_mean=not self.sparse)
_chunk_matrix = self.scaler.fit_transform(_chunk_matrix)
else:
# transform remaining cells
_chunk_matrix = self.scaler.transform(_chunk_matrix)
# save chunk stats for checking robustness
chunk_stds.append(_chunk_matrix.std(axis=0))
chunk_means.append(_chunk_matrix.mean(axis=0))
# fit IncrementalPCA
print('Fit PCA')
self.pca.partial_fit(_chunk_matrix)
self._fit = True
return
def transform(self, ds, use_cells=None, chunk=100000):
if not self._fit:
raise ValueError('fit first before transform')
cell_index = ds.get_index(self.obs_dim)
if use_cells is not None:
cell_index = cell_index[cell_index.isin(use_cells)].copy()
total_pcs = []
for chunk_start in range(0, cell_index.size, chunk):
print(f'Transforming {chunk_start}-{chunk_start + chunk}')
_chunk_cells = cell_index[chunk_start:chunk_start + chunk]
_chunk_matrix, _chunk_cells, _chunk_genes = dataset_to_array(
ds,
use_cells=_chunk_cells,
use_genes=self.use_features,
sparse=self.sparse,
obs_dim=self.obs_dim,
var_dim=self.var_dim,
chunk=self.load_chunk)
if self.cell_sum is not None:
_chunk_cell_sum = self.cell_sum.loc[_chunk_cells]
else:
_chunk_cell_sum = None
_chunk_matrix = _chunk_matrix.astype(np.float32)
# normalize cell counts
if self.normalize_per_cell:
_chunk_matrix = _normalize_per_cell(matrix=_chunk_matrix,
cell_sum=_chunk_cell_sum)
# log transfer
if self.log1p:
print('log1p transform')
_chunk_matrix = np.log1p(_chunk_matrix)
# scale
if self.scale:
print('Scale')
if self.scaler is None:
# this shouldn't happen in transform
raise ValueError('scale is True, but scaler not exist')
else:
# transform remaining cells
_chunk_matrix = self.scaler.transform(_chunk_matrix)
# transform
print('Transform PCA')
pcs = self.pca.transform(_chunk_matrix)
pcs = pd.DataFrame(pcs, index=_chunk_cells)
total_pcs.append(pcs)
total_pcs = pd.concat(total_pcs)
return total_pcs
def fit_transform(self, ds,
use_cells=None,
use_features=None,
chunk=500000,
cell_sum=None,
var_dim='gene',
obs_dim='cell',
load_chunk=None,
random_shuffle=True):
self.fit(ds,
use_cells=use_cells,
use_features=use_features,
chunk=chunk,
cell_sum=cell_sum,
var_dim=var_dim,
obs_dim=obs_dim,
load_chunk=load_chunk,
random_shuffle=random_shuffle)
total_pcs = self.transform(ds, use_cells=use_cells, chunk=self.load_chunk)
return total_pcs
| 36.160622
| 114
| 0.55796
| 6,432
| 0.921622
| 0
| 0
| 0
| 0
| 0
| 0
| 703
| 0.100731
|
b103007297614b73c2ae8e2e4d5c35bd947a709c
| 1,051
|
py
|
Python
|
wordcount/views.py
|
chinya07/Django-wordcount
|
57808f922a140b341807a5b5352864cec5728695
|
[
"MIT"
] | null | null | null |
wordcount/views.py
|
chinya07/Django-wordcount
|
57808f922a140b341807a5b5352864cec5728695
|
[
"MIT"
] | null | null | null |
wordcount/views.py
|
chinya07/Django-wordcount
|
57808f922a140b341807a5b5352864cec5728695
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse
from django.shortcuts import render
import operator
def home(request):
return render(request, 'home.html')
def count(request):
fulltext1=request.GET['fulltext1']
fulltext2=request.GET['fulltext2']
wordlist1=fulltext1.split(' ')
wordlist2=fulltext2.split(' ')
from difflib import SequenceMatcher
similarity_ratio = SequenceMatcher(None, wordlist1, wordlist2).ratio()
# count=0
# for word in wordlist1:
# if word in wordlist2:
# count+=1
# worddic = {}
#
# for word in wordlist:
# if word in worddic:
# #increase
# worddic[word] += 1
# else:
# # add to worddic
# worddic[word] = 1
#sortedwords=sorted(worddic.items(), key=operator.itemgetter(1), reverse=True)
return render(request, 'count.html', {'fulltext1':fulltext1, 'fulltext2':fulltext2, 'count':similarity_ratio})
def about(request):
return render(request, 'about.html')
| 30.028571
| 115
| 0.617507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 437
| 0.415794
|
b103796b9eb62b2e02e96ca3c1828f5ebc3886b8
| 3,137
|
py
|
Python
|
example/06-modules/modules.py
|
iten-engineering/python
|
97a79973c7727cd881974462db99a99d612b55f9
|
[
"MIT"
] | null | null | null |
example/06-modules/modules.py
|
iten-engineering/python
|
97a79973c7727cd881974462db99a99d612b55f9
|
[
"MIT"
] | null | null | null |
example/06-modules/modules.py
|
iten-engineering/python
|
97a79973c7727cd881974462db99a99d612b55f9
|
[
"MIT"
] | null | null | null |
# =============================================================================
# Python examples - modules
# =============================================================================
# -----------------------------------------------------------------------------
# Module
# -----------------------------------------------------------------------------
# Module
# - Mit Python können Definitionen (Funktionen, Klassen) in eine eigenen Datei (Modul) ausgelagert werden.
# - Die Definitionen eines Moduls können in andere Modlue oder das Hauptprogramm importiert und dort genutzt werden
# - Der Datei Name entspricht dabei dem Modulnamen mit dem Suffix ".py"
# - Innerhalb vom Modul ist der Modulname via die interen Varialble "__name__" verfügbar
import fibo
print ("Fibo sample:")
fibo.print_fib(100)
result = fibo.fib(100)
print(result)
print(("Show module details:"))
print(dir(fibo))
# -----------------------------------------------------------------------------
# Import
# -----------------------------------------------------------------------------
# Sample: `import module `
# - imports everything and keeps it in the module's namespace
# - module.func()
# - module.className.func()
# Sample: `from module import *`
# - imports everything under the current namespace
# - func()
# - className.func()
# > not recommended
# Sample: `from module import className`
# - selectively imports under the current namespace
# - className.func()
# - like standard modules: math, os, sys
# -----------------------------------------------------------------------------
# Import with custom name
# -----------------------------------------------------------------------------
# game.py
# import the draw module
# if visual_mode:
# # in visual mode, we draw using graphics
# import draw_visual as draw
# else:
# # in textual mode, we print out text
# import draw_textual as draw
#
# def main():
# result = play_game()
# # this can either be visual or textual depending on visual_mode
# draw.draw_game(result)
# -----------------------------------------------------------------------------
# Executing modules as scripts
# -----------------------------------------------------------------------------
# When you run a Python module with: python fibo.py <arguments>
# - the code in the module will be executed, just as if you imported it,
# - but with the __name__ set to "__main__".
# That means that by adding this code at the end of your module:
# if __name__ == "__main__":
# import sys
# fib(int(sys.argv[1]))
# you can make the file usable as a script as well as an importable module,
# because the code that parses the command line only runs if the module is executed as the “main” file!
# -----------------------------------------------------------------------------
# Further details
# -----------------------------------------------------------------------------
# Links:
# - https://docs.python.org/3/tutorial/modules.html
# - https://realpython.com/python-modules-packages/
# =============================================================================
# The end.
| 34.855556
| 115
| 0.476889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,957
| 0.940522
|
b1040fd46ded01c83aec3ec914b8371b0061edd6
| 5,010
|
py
|
Python
|
.github/docker/checker_image/scripts/check_copyright_headers.py
|
TomasRejhons/siren
|
9ef3ace7174cbdb48b9e45a2db104f3f5c4b9825
|
[
"MIT"
] | null | null | null |
.github/docker/checker_image/scripts/check_copyright_headers.py
|
TomasRejhons/siren
|
9ef3ace7174cbdb48b9e45a2db104f3f5c4b9825
|
[
"MIT"
] | null | null | null |
.github/docker/checker_image/scripts/check_copyright_headers.py
|
TomasRejhons/siren
|
9ef3ace7174cbdb48b9e45a2db104f3f5c4b9825
|
[
"MIT"
] | 1
|
2021-05-26T12:06:12.000Z
|
2021-05-26T12:06:12.000Z
|
#!/usr/bin/env python
#
# MIT License
#
# Copyright (c) 2021 silicon-village
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import argparse
import os
import re
import sys
import datetime
import io
from distutils.spawn import find_executable
from subprocess import check_output
copyright_pattern = re.compile(
r"\bCopyright \(c\)[^a-zA-Z]*\b\d{4}\b",
re.IGNORECASE)
year_pattern = re.compile(r"\b\d{4}\b")
file_exceptions = [
".copyrightignore"
]
class terminal_colors:
INFO = '\033[94m'
WARNING = '\033[33m'
ERROR = '\033[91m'
END = '\033[0m'
def get_ext(file_path):
file_name = os.path.basename(file_path)
file_name, file_ext = os.path.splitext(file_name)
return file_ext
def query_files(query, files):
failed_queries = {}
queries = {}
for filename in files:
with io.open(filename, "r+", encoding="utf-8") as f:
try:
matches = re.findall(query, f.read())
queries[filename] = matches if len(matches) > 0 else None
except re.error as error:
failed_queries[filename] = error
return queries, failed_queries
def check_files(check_files):
queries, failures = query_files(copyright_pattern, check_files)
current_year = datetime.datetime.now().year
missing = []
outdated = {}
for filename, tokens in queries.items():
if not tokens:
missing.append(filename)
continue
outdated[filename] = None
for token in tokens:
copyright_years = re.findall(year_pattern, token)
most_recent_year = int(
copyright_years[-1]) if copyright_years else 0
if (most_recent_year != current_year):
if not outdated[filename]:
outdated[filename] = []
outdated[filename].append(token)
outdated = {k: v for k, v in outdated.items() if v is not None}
if len(failures) > 0:
print(terminal_colors.ERROR +
"Failed to search:" + terminal_colors.END)
for filename, error in failures.items():
print(filename)
print()
if len(missing) > 0:
print(terminal_colors.ERROR +
"Missing copyright:" + terminal_colors.END)
for filename in missing:
print(filename)
print()
if len(outdated) > 0:
print(terminal_colors.ERROR +
"Outdated copyright:" + terminal_colors.END)
for filename, tokens in outdated.items():
print(filename)
for token in tokens:
print("\t", terminal_colors.WARNING +
token + terminal_colors.END)
print()
print("\n=== Files Checked ===")
for filename in check_files:
print(terminal_colors.INFO + filename + terminal_colors.END)
if len(outdated) > 0 or len(missing) > 0:
sys.exit(-1)
else:
sys.exit(0)
if __name__ == "__main__":
argument_parser = argparse.ArgumentParser(
description="Check that modified files include copyright headers with current year.")
argument_parser.add_argument(
"branch", type=str, help="Branch from which to compute the diff")
args = argument_parser.parse_args()
files = None
if not find_executable("git"):
print(terminal_colors.ERROR + "Missing git" + terminal_colors.END)
sys.exit(1)
try:
ignored = open(".copyrightignore").readlines()
for file in ignored:
file_exceptions.append(file.strip())
except FileNotFoundError:
pass
out = check_output(["git", "diff", args.branch,
"--name-only"])
files = out.decode('utf-8').split("\n")
if files:
file_to_check = list(filter(lambda x: os.path.isfile(x) and os.path.basename(
x) not in file_exceptions and get_ext(x) not in file_exceptions and len(x) > 0, files))
check_files(file_to_check)
| 28.465909
| 99
| 0.642515
| 112
| 0.022355
| 0
| 0
| 0
| 0
| 0
| 0
| 1,520
| 0.303393
|
b104d1fb0a99c316174f26991ded219303201426
| 1,584
|
py
|
Python
|
setup.py
|
finsberg/scholar_bot
|
b8a9fc22cfa1888d58a1881235e57a98769153fb
|
[
"MIT"
] | null | null | null |
setup.py
|
finsberg/scholar_bot
|
b8a9fc22cfa1888d58a1881235e57a98769153fb
|
[
"MIT"
] | null | null | null |
setup.py
|
finsberg/scholar_bot
|
b8a9fc22cfa1888d58a1881235e57a98769153fb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
import platform
import glob
from setuptools import setup, find_packages, Command
if sys.version_info < (3, 6):
print("Python 3.6 or higher required, please upgrade.")
sys.exit(1)
version = "0.1"
name = "scholar_bot"
description = ("Post updates on Slack about citations "
"for the Computational Phyisoligy department at Simula")
scripts = glob.glob("bin/*")
requirements = ['slackclient', 'scholarly', 'pyyaml']
if platform.system() == "Windows" or "bdist_wininst" in sys.argv:
# In the Windows command prompt we can't execute Python scripts
# without a .py extension. A solution is to create batch files
# that runs the different scripts.
batch_files = []
for script in scripts:
batch_file = script + ".bat"
f = open(batch_file, "w")
f.write(r'python "%%~dp0\%s" %%*\n' % os.path.split(script)[1])
f.close()
batch_files.append(batch_file)
scripts.extend(batch_files)
def run_install():
"Run installation"
# Call distutils to perform installation
setup(
name=name,
description=description,
version=version,
author='Henrik Finsberg',
license="MIT",
author_email="henrikn@simula.no",
platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
packages=["scholar_bot"],
package_dir={"scholar_bot": "scholar_bot"},
# install_requires=requirements,
scripts=scripts,
zip_safe=False,
)
if __name__ == "__main__":
run_install()
| 26.847458
| 71
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 663
| 0.418561
|
b105030052fdd1f7dc3bd7505e5951494ee00846
| 3,226
|
py
|
Python
|
time_series_rnn_without_wrapper.py
|
KT12/hands_on_machine_learning
|
6de2292b43d7c34b6509ad61dab2da4f7ec04894
|
[
"MIT"
] | null | null | null |
time_series_rnn_without_wrapper.py
|
KT12/hands_on_machine_learning
|
6de2292b43d7c34b6509ad61dab2da4f7ec04894
|
[
"MIT"
] | null | null | null |
time_series_rnn_without_wrapper.py
|
KT12/hands_on_machine_learning
|
6de2292b43d7c34b6509ad61dab2da4f7ec04894
|
[
"MIT"
] | null | null | null |
# Predict time series w/o using OutputProjectWrapper
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Create time series
t_min, t_max = 0, 30
resolution = 0.1
def time_series(t):
return t * np.sin(t) / 3 + 2 * np.sin(t * 5)
def next_batch(batch_size, n_steps):
t0 = np.random.rand(batch_size, 1) * (t_max - t_min - n_steps * resolution)
Ts = t0 + np.arange(0., n_steps + 1) * resolution
ys = time_series(Ts)
return ys[:,:-1].reshape(-1, n_steps, 1), ys[:,1:].reshape(-1, n_steps, 1)
t = np.linspace(t_min, t_max, (t_max - t_min) // resolution)
n_steps = 20
t_instance = np.linspace(12.2, 12.2 + resolution * (n_steps + 1), n_steps + 1)
n_inputs = 1
n_neurons = 100
n_outputs = 1
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons,
activation=tf.nn.relu)
rnn_outputs, states = tf.nn.dynamic_rnn(basic_cell, X,
dtype=tf.float32)
learning_rate = 0.001
stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])
stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)
outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])
loss = tf.reduce_sum(tf.square(outputs - y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
n_iterations = 1000
batch_size = 50
with tf.Session() as sess:
init.run()
for k in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if k % 100 == 0:
mse = loss.eval(feed_dict={X: X_batch, y: y_batch})
print(k, "\tMSE: ", mse)
X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))
y_pred = sess.run(outputs, feed_dict={X: X_new})
print(y_pred)
# Generat a creative new seq
n_iterations = 2000
batch_size = 50
with tf.Session() as sess:
init.run()
for k in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if k % 100 == 0:
mse = loss.eval(feed_dict={X: X_batch, y: y_batch})
print(k, "\tMSE: ", mse)
sequence1 = [0. for j in range(n_steps)]
for k in range(len(t) - n_steps):
X_batch = np.array(sequence1[-n_steps:]).reshape(1, n_steps, 1)
y_pred = sess.run(outputs, feed_dict={X: X_batch})
sequence1.append(y_pred[0, -1, 0])
sequence2 = [time_series(i * resolution + t_min + (t_max-t_min/3)) for i in range(n_steps)]
for j in range(len(t) - n_steps):
X_batch = np.array(sequence2[-n_steps:]).reshape(1, n_steps, 1)
y_pred = sess.run(outputs, feed_dict={X: X_batch})
sequence2.append(y_pred[0, -1, 0])
plt.figure(figsize=(11,4))
plt.subplot(121)
plt.plot(t, sequence1, 'b-')
plt.plot(t[:n_steps],sequence1[:n_steps], 'b-', linewidth=3)
plt.xlabel('Time')
plt.ylabel('Value')
plt.subplot(122)
plt.plot(t, sequence2, 'b-')
plt.plot(t[:n_steps], sequence2[:n_steps], 'b-', linewidth=3)
plt.xlabel('Time')
plt.show()
| 32.26
| 95
| 0.66522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 153
| 0.047427
|
b10569084242d8420b097b98d57fbf57c409ad50
| 5,536
|
py
|
Python
|
pydocteur/actions.py
|
AFPy/PyDocTeur
|
70e6e025468ad232797c4da0b9a834613d2a2ec4
|
[
"MIT"
] | 4
|
2020-11-30T10:14:32.000Z
|
2021-02-18T00:44:30.000Z
|
pydocteur/actions.py
|
AFPy/PyDocTeur
|
70e6e025468ad232797c4da0b9a834613d2a2ec4
|
[
"MIT"
] | 46
|
2020-11-27T09:21:02.000Z
|
2021-06-08T07:43:33.000Z
|
pydocteur/actions.py
|
AFPy/PyDocTeur
|
70e6e025468ad232797c4da0b9a834613d2a2ec4
|
[
"MIT"
] | 4
|
2020-11-27T06:52:11.000Z
|
2022-02-22T20:06:35.000Z
|
import json
import logging
import os
import random
import time
from functools import lru_cache
from github import Github
from github import PullRequest
from pydocteur.github_api import get_commit_message_for_merge
from pydocteur.github_api import get_trad_team_members
from pydocteur.pr_status import is_already_greeted
from pydocteur.pr_status import is_first_time_contributor
from pydocteur.settings import GH_TOKEN
from pydocteur.settings import REPOSITORY_NAME
from pydocteur.settings import VERSION
logger = logging.getLogger("pydocteur")
COMMENT_BODIES_FILEPATH = os.path.join(os.path.dirname(__file__), "../comment_bodies.json")
END_OF_BODY = """
---
<details>
<summary>Disclaimer</summary>
Je suis un robot fait par l'équipe de [l'AFPy et de Traduction](https://github.com/AFPy/PyDocTeur/graphs/contributors)
sur leur temps libre. Je risque de dire des bétises. Ne me blâmez pas, blamez les développeurs.
[Code source](https://github.com/afpy/pydocteur)
I'm a bot made by the [Translation and AFPy teams](https://github.com/AFPy/PyDocTeur/graphs/contributors) on their free
time. I might say or do dumb things sometimes. Don't blame me, blame the developer !
[Source code](https://github.com/afpy/pydocteur)
(state: {state})
`PyDocTeur {version}`
</details>
"""
def replace_body_variables(pr: PullRequest, body: str):
logger.debug("Replacing variables")
author = pr.user.login
reviewers_login = {review.user.login for review in pr.get_reviews()}
new_body = body.replace("@$AUTHOR", "@" + author)
if not reviewers_login:
reviewers_login = get_trad_team_members()
reviewers_login.discard(author)
reviewers = ", @".join(reviewers_login)
new_body = new_body.replace("@$REVIEWERS", "@" + reviewers)
new_body = new_body.replace("@$MERGEABLE_STATE", pr.mergeable_state)
return new_body
@lru_cache()
def get_comment_bodies(state):
logger.debug(f"Getting comment bodies for {state}")
with open(COMMENT_BODIES_FILEPATH, "r") as handle:
bodies = json.load(handle).get(state)
return bodies
def comment_pr(pr: PullRequest, state: str):
bodies = get_comment_bodies(state)
if not bodies:
logger.warning(f"PR #{pr.number}: No comment for state {state}")
return
body = random.choice(bodies)
body = replace_body_variables(pr, body)
logger.info(f"PR #{pr.number}: Commenting.")
pr.create_issue_comment(body + END_OF_BODY.format(state=state, version=VERSION))
def merge_and_thank_contributors(pr: PullRequest, state: str):
gh = Github(GH_TOKEN if GH_TOKEN else None)
repo = gh.get_repo(REPOSITORY_NAME)
contributor_usernames = [u.login for u in repo.get_collaborators()]
reviewer_usernames = [i.user.login for i in pr.get_reviews()]
if not any(x in reviewer_usernames for x in contributor_usernames):
logger.info("PR not reviewed by a contributor, not merging.")
return
logger.info(f"Testing if PR #{pr.number} can be merged")
if not pr.mergeable or pr.mergeable_state != "clean":
logger.warning(f"PR #{pr.number} cannot be merged. mergeable_state={pr.mergeable_state}")
unmergeable_comments = get_comment_bodies("unmergeable")
body = random.choice(unmergeable_comments)
body = replace_body_variables(pr, body)
pr.create_issue_comment(body + END_OF_BODY.format(state=state, version=VERSION))
return
logger.info(f"PR #{pr.number}: About to merge")
warnings = get_comment_bodies("automerge_approved_testok")
thanks = get_comment_bodies("automerge_approved_testok-done")
logger.info(f"PR #{pr.number}: Sending warning before merge")
warning_body = random.choice(warnings)
warning_body = replace_body_variables(pr, warning_body)
pr.create_issue_comment(warning_body + END_OF_BODY.format(state=state, version=VERSION))
logger.debug(f"PR #{pr.number}: Sleeping one second")
time.sleep(1)
message = get_commit_message_for_merge(pr)
pr.merge(merge_method="squash", commit_message=message)
logger.info(f"PR #{pr.number}: Merged.")
logger.info(f"PR #{pr.number}: Sending thanks after merge")
thanks_body = random.choice(thanks)
thanks_body = replace_body_variables(pr, thanks_body)
pr.create_issue_comment(thanks_body + END_OF_BODY.format(state=state, version=VERSION))
def maybe_greet_user(pr: PullRequest):
if is_first_time_contributor(pr) and not is_already_greeted(pr):
bodies = get_comment_bodies("greetings")
body = random.choice(bodies)
body = replace_body_variables(pr, body)
logger.info(f"PR #{pr.number}: Greeting {pr.user.login}")
pr.create_issue_comment(body + END_OF_BODY.format(state="greetings", version=VERSION))
# TODO: Check if changing state for incorrect title may not create a bug where PyDocteur might repeat itself
def comment_about_title(pr: PullRequest):
bodies = get_comment_bodies("incorrect_title")
body = random.choice(bodies)
body = replace_body_variables(pr, body)
logger.info(f"PR #{pr.number}: Sending incorrect title message")
pr.create_issue_comment(body + END_OF_BODY.format(state="incorrect_title", version=VERSION))
def comment_about_rerun_workflow(pr: PullRequest):
bodies = get_comment_bodies("rerun_workflow")
body = random.choice(bodies)
body = replace_body_variables(pr, body)
logger.info(f"PR #{pr.number}: Sending rerun workflow message")
pr.create_issue_comment(body + END_OF_BODY.format(state="rerun_workflow", version=VERSION))
| 38.713287
| 119
| 0.740426
| 0
| 0
| 0
| 0
| 218
| 0.03935
| 0
| 0
| 1,646
| 0.297112
|
b1083a2affb9a63631077241caffe2b17bde2cca
| 5,172
|
py
|
Python
|
visualise.py
|
ksang/fiery
|
b41e0138e388d9b846f174c09d60539b5b226f2d
|
[
"MIT"
] | null | null | null |
visualise.py
|
ksang/fiery
|
b41e0138e388d9b846f174c09d60539b5b226f2d
|
[
"MIT"
] | null | null | null |
visualise.py
|
ksang/fiery
|
b41e0138e388d9b846f174c09d60539b5b226f2d
|
[
"MIT"
] | null | null | null |
import os
from argparse import ArgumentParser
from glob import glob
import cv2
import numpy as np
import torch
import torchvision
import matplotlib as mpl
import matplotlib.pyplot as plt
from PIL import Image
from fiery.trainer import TrainingModule
from fiery.utils.network import NormalizeInverse
from fiery.utils.instance import predict_instance_segmentation_and_trajectories
from fiery.utils.visualisation import plot_instance_map, generate_instance_colours, make_contour, convert_figure_numpy
EXAMPLE_DATA_PATH = 'example_data'
def plot_prediction(image, output, cfg):
# Process predictions
consistent_instance_seg, matched_centers = predict_instance_segmentation_and_trajectories(
output, compute_matched_centers=True
)
# Plot future trajectories
unique_ids = torch.unique(consistent_instance_seg[0, 0]).cpu().long().numpy()[1:]
instance_map = dict(zip(unique_ids, unique_ids))
instance_colours = generate_instance_colours(instance_map)
vis_image = plot_instance_map(consistent_instance_seg[0, 0].cpu().numpy(), instance_map)
trajectory_img = np.zeros(vis_image.shape, dtype=np.uint8)
for instance_id in unique_ids:
path = matched_centers[instance_id]
for t in range(len(path) - 1):
color = instance_colours[instance_id].tolist()
cv2.line(trajectory_img, tuple(path[t]), tuple(path[t + 1]),
color, 4)
# Overlay arrows
temp_img = cv2.addWeighted(vis_image, 0.7, trajectory_img, 0.3, 1.0)
mask = ~ np.all(trajectory_img == 0, axis=2)
vis_image[mask] = temp_img[mask]
# Plot present RGB frames and predictions
val_w = 2.99
cameras = cfg.IMAGE.NAMES
image_ratio = cfg.IMAGE.FINAL_DIM[0] / cfg.IMAGE.FINAL_DIM[1]
val_h = val_w * image_ratio
fig = plt.figure(figsize=(4 * val_w, 2 * val_h))
width_ratios = (val_w, val_w, val_w, val_w)
gs = mpl.gridspec.GridSpec(2, 4, width_ratios=width_ratios)
gs.update(wspace=0.0, hspace=0.0, left=0.0, right=1.0, top=1.0, bottom=0.0)
denormalise_img = torchvision.transforms.Compose(
(NormalizeInverse(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
torchvision.transforms.ToPILImage(),)
)
for imgi, img in enumerate(image[0, -1]):
ax = plt.subplot(gs[imgi // 3, imgi % 3])
showimg = denormalise_img(img.cpu())
if imgi > 2:
showimg = showimg.transpose(Image.FLIP_LEFT_RIGHT)
plt.annotate(cameras[imgi].replace('_', ' ').replace('CAM ', ''), (0.01, 0.87), c='white',
xycoords='axes fraction', fontsize=14)
plt.imshow(showimg)
plt.axis('off')
ax = plt.subplot(gs[:, 3])
plt.imshow(make_contour(vis_image[::-1, ::-1]))
plt.axis('off')
plt.draw()
figure_numpy = convert_figure_numpy(fig)
plt.close()
return figure_numpy
def download_example_data():
from requests import get
def download(url, file_name):
# open in binary mode
with open(file_name, "wb") as file:
# get request
response = get(url)
# write to file
file.write(response.content)
if not os.path.exists(EXAMPLE_DATA_PATH):
os.makedirs(EXAMPLE_DATA_PATH, exist_ok=True)
url_list = ['https://github.com/wayveai/fiery/releases/download/v1.0/example_1.npz',
'https://github.com/wayveai/fiery/releases/download/v1.0/example_2.npz',
'https://github.com/wayveai/fiery/releases/download/v1.0/example_3.npz',
'https://github.com/wayveai/fiery/releases/download/v1.0/example_4.npz'
]
for url in url_list:
download(url, os.path.join(EXAMPLE_DATA_PATH, os.path.basename(url)))
def visualise(checkpoint_path):
trainer = TrainingModule.load_from_checkpoint(checkpoint_path, strict=True)
device = torch.device('cuda:0')
trainer = trainer.to(device)
trainer.eval()
# Download example data
download_example_data()
# Load data
for data_path in sorted(glob(os.path.join(EXAMPLE_DATA_PATH, '*.npz'))):
data = np.load(data_path)
image = torch.from_numpy(data['image']).to(device)
intrinsics = torch.from_numpy(data['intrinsics']).to(device)
extrinsics = torch.from_numpy(data['extrinsics']).to(device)
future_egomotions = torch.from_numpy(data['future_egomotion']).to(device)
# Forward pass
with torch.no_grad():
output = trainer.model(image, intrinsics, extrinsics, future_egomotions)
figure_numpy = plot_prediction(image, output, trainer.cfg)
os.makedirs('./output_vis', exist_ok=True)
output_filename = os.path.join('./output_vis', os.path.basename(data_path).split('.')[0]) + '.png'
Image.fromarray(figure_numpy).save(output_filename)
print(f'Saved output in {output_filename}')
if __name__ == '__main__':
parser = ArgumentParser(description='Fiery visualisation')
parser.add_argument('--checkpoint', default='./fiery.ckpt', type=str, help='path to checkpoint')
args = parser.parse_args()
visualise(args.checkpoint)
| 38.029412
| 118
| 0.672467
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 765
| 0.147912
|
b10a7ba9df13f93730fafc42256936a0555a720d
| 7,034
|
py
|
Python
|
autoelective/util.py
|
apomeloYM/PKUAutoElective
|
21b4ab000919f68080e7a942ddff4ca070cf41e7
|
[
"MIT"
] | null | null | null |
autoelective/util.py
|
apomeloYM/PKUAutoElective
|
21b4ab000919f68080e7a942ddff4ca070cf41e7
|
[
"MIT"
] | null | null | null |
autoelective/util.py
|
apomeloYM/PKUAutoElective
|
21b4ab000919f68080e7a942ddff4ca070cf41e7
|
[
"MIT"
] | 2
|
2020-02-07T04:02:14.000Z
|
2020-02-16T23:34:16.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# filename: util.py
import os
import csv
from copy import deepcopy
import hashlib
from functools import wraps
from collections import OrderedDict
from ._compat import json, JSONDecodeError
from .exceptions import NoInstanceError, ImmutableTypeError, ReadonlyPropertyError
__Util_Funcs__ = ["mkdir","json_load","json_dump","read_csv","to_bytes","to_utf8","MD5","SHA1",]
__Util_Class__ = ["ImmutableAttrsMixin",]
__Util_Decorator__ = ["singleton","noinstance","ReadonlyProperty",]
__Util_MetaClass__ = ["Singleton","NoInstance",]
__all__ = __Util_Funcs__ + __Util_Class__ + __Util_Decorator__ + __Util_MetaClass__
def to_bytes(s):
if isinstance(s, (str,int,float)):
return str(s).encode("utf-8")
elif isinstance(s, bytes):
return s
else:
raise TypeError
def to_utf8(s):
if isinstance(s, bytes):
return s.decode("utf-8")
elif isinstance(s, (str,int,float)):
return str(s)
else:
raise TypeError
def MD5(data):
return hashlib.md5(to_bytes(data)).hexdigest()
def SHA1(data):
return hashlib.sha1(to_bytes(data)).hexdigest()
def mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
def json_load(file, *args, **kwargs):
if not file:
return None
elif not os.path.exists(file):
return None
else:
with open(file, "r", encoding="utf-8-sig") as fp:
try:
return json.load(fp, *args, **kwargs)
except JSONDecodeError:
return None
def json_dump(obj, file, *args, **kwargs):
with open(file, "w", encoding="utf-8") as fp:
json.dump(obj, fp, *args, **kwargs)
def read_csv(file, encoding="utf-8-sig"):
with open(file, "r", encoding=encoding, newline="") as fp:
reader = csv.DictReader(fp)
return [_ for _ in reader]
def noinstance(cls):
""" 被修饰类不能被继承! """
@wraps(cls)
def wrapper(*args, **kwargs):
raise NoInstanceError("class %s cannot be instantiated" % cls.__name__)
return wrapper
def singleton(cls):
""" 被修饰类不能被继承! """
_inst = {}
@wraps(cls)
def get_inst(*args, **kwargs):
if cls not in _inst:
_inst[_cls] = _cls(*args, **kwargs)
return _inst[cls]
return get_inst
class Singleton(type):
"""
Singleton Metaclass
@link https://github.com/jhao104/proxy_pool/blob/428359c8dada998481f038dbdc8d3923e5850c0e/Util/utilClass.py
"""
_inst = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._inst:
cls._inst[cls] = super(Singleton, cls).__call__(*args)
return cls._inst[cls]
class NoInstance(type):
def __call__(cls, *args, **kwargs):
raise NoInstanceError("class %s cannot be instantiated" % cls.__name__)
def _is_readonly(obj, key):
raise ReadonlyPropertyError("'%s.%s' property is read-only" % (obj.__class__.__name__, key))
class ReadonlyProperty(property):
""" 只读属性 """
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
super().__init__(fget, None, None, doc) # 直接丢弃 fset, fdel
def __set__(self, obj, value):
_is_readonly(obj, self.fget.__name__)
def __delete__(self, obj):
_is_readonly(obj, self.fget.__name__)
def _is_immutable(self):
raise ImmutableTypeError('%r objects are immutable' % self.__class__.__name__)
class ImmutableDictMixin(object):
"""
@link https://github.com/pallets/werkzeug/blob/master/werkzeug/datastructures.py
Makes a :class:`dict` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
@classmethod
def fromkeys(cls, keys, value=None):
instance = super(cls, cls).__new__(cls)
instance.__init__(zip(keys, repeat(value)))
return instance
def __reduce_ex__(self, protocol):
return type(self), (dict(self),)
def _iter_hashitems(self):
return iteritems(self)
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
return rv
def setdefault(self, key, default=None):
_is_immutable(self)
def update(self, *args, **kwargs):
_is_immutable(self)
def pop(self, key, default=None):
_is_immutable(self)
def popitem(self):
_is_immutable(self)
def __setitem__(self, key, value):
_is_immutable(self)
def __delitem__(self, key):
_is_immutable(self)
def clear(self):
_is_immutable(self)
class ImmutableDict(ImmutableDictMixin, dict):
"""
@link https://github.com/pallets/werkzeug/blob/master/werkzeug/datastructures.py
An immutable :class:`dict`.
.. versionadded:: 0.5
"""
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
dict.__repr__(self),
)
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return dict(self)
def __copy__(self):
return self
class ImmutableAttrsMixin(object):
"""
very ugly !!!
一种不可变对象的实现
只允许创建时赋值,创建后不能再修改:
- 不能通过 __setattr__/__delattr__ 实现修改
- 不能通过 __dict__.__setitem__/__delitem__ 实现修改
关于复制:
- 调用 copy.copy 返回本身
- 调用 copy.deepcopy 返回新对象
"""
__cache_init = {}
def __init__(self, *args, **kwargs):
### 重置 __init__ ###
#print("ImmutableAttrsMixin.__init__")
self.__class__.__init__ = __class__.__cache_init[self.__class__.__name__]
def __new__(cls, *args, **kwargs):
#print("ImmutableAttrsMixin.__new__ Start")
### 暂时允许修改 ###
cls.__setattr__ = object.__setattr__
cls.__delattr__ = object.__delattr__
### 初始化对象 ###
obj = object.__new__(cls)
cls.__init__(obj, *args, **kwargs)
### 禁止通过 __dict__ 修改对象 ###
obj.__dict__ = ImmutableDict(obj.__dict__)
### 重新设置为不允许修改,并取消 __init__ 函数 ###
cls.__setattr__ = __class__.__setattr__
cls.__delattr__ = __class__.__delattr__
### 暂时取消 __init__ 函数,避免自动 __init__ 时报错 ###
__class__.__cache_init[cls.__name__] = cls.__init__
cls.__init__ = __class__.__init__
#print("ImmutableAttrsMixin.__new__ End")
return obj
def __setattr__(self, key, value):
#print("ImmutableAttrsMixin.__setitem__")
_is_immutable(self)
def __delattr__(self, key):
#print("ImmutableAttrsMixin.__delitem__")
_is_immutable(self)
def __copy__(self):
return self
def __deepcopy__(self, memo=None):
"""
@link https://github.com/pallets/werkzeug/blob/master/werkzeug/datastructures.py --> MultiDict
"""
return self.__class__(**deepcopy(self.__dict__, memo=memo))
| 27.802372
| 111
| 0.633068
| 4,709
| 0.643658
| 0
| 0
| 451
| 0.061646
| 0
| 0
| 2,163
| 0.295653
|
b10b445b6f929ecc345e5226229e53a873023020
| 1,827
|
py
|
Python
|
reference_data/uk_biobank_v3/1_extract_ukb_variables.py
|
thehyve/genetics-backend
|
81d09bf5c70c534a59940eddfcd9c8566d2b2ec1
|
[
"Apache-2.0"
] | 6
|
2019-06-01T11:17:41.000Z
|
2021-09-24T14:06:30.000Z
|
reference_data/uk_biobank_v3/1_extract_ukb_variables.py
|
opentargets/genetics-backend
|
1ab0314f9fe4b267f8ffb5ed94187d55fbb3431c
|
[
"Apache-2.0"
] | 7
|
2018-11-28T10:06:21.000Z
|
2020-01-26T18:55:39.000Z
|
reference_data/uk_biobank_v3/1_extract_ukb_variables.py
|
thehyve/genetics-backend
|
81d09bf5c70c534a59940eddfcd9c8566d2b2ec1
|
[
"Apache-2.0"
] | 4
|
2019-05-09T13:57:57.000Z
|
2021-08-03T18:19:16.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Ed Mountjoy
#
"""
bsub -q small -J interactive -n 1 -R "select[mem>8000] rusage[mem=8000] span[hosts=1]" -M8000 -Is bash
"""
import gzip
import sys
def main():
# Args
in_ukb = '/nfs/users/nfs_e/em21/otcoregen/uk_biobank_data/data/phenotypes/tsv/ukb27852.tsv.gz'
outf = 'temp_ukb27852.extraction.tsv'
sep = '\t'
field_prefixes = [
'f.eid',
'f.31.', # Reported sex
'f.22001.', # Genetic sex
'f.22006.', # White bristish
'f.22019.', # Sex chromosome aneuploidy. Karyotypes not XX or XY
'f.22027.', # Heterozygosity or missingness rate outliers
]
# Get list of column indices where header starts with `field_prefixes`
col_indices = []
with gzip.open(in_ukb, 'r') as in_h:
header = in_h.readline().decode().rstrip().split(sep)
for i, col in enumerate(header):
for field_prefix in field_prefixes:
if col.startswith(field_prefix):
col_indices.append(i)
break
# Compare field_prefixes to extracted column header and raise error if any
# were not found
header_extracted = [header[i] for i in col_indices]
for field_prefix in field_prefixes:
if not any([col.startswith(field_prefix) for col in header_extracted]):
sys.exit('ERROR: Field prefix "{}" was not found'.format(field_prefix))
# Extract `col_indices` and write to new file
with gzip.open(in_ukb, 'r') as in_h:
with open(outf, 'w') as out_h:
for line in in_h:
parts = line.decode().rstrip().split(sep)
out_row = [parts[i] for i in col_indices]
out_h.write(sep.join(out_row) + '\n')
return 0
if __name__ == '__main__':
main()
| 31.5
| 102
| 0.604269
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 756
| 0.413793
|
b10d15ba52e0f2579184cc4a6747cccecf9ad61c
| 6,088
|
py
|
Python
|
main.py
|
Staubtornado/juliandc
|
47e41f9e10088f94af44dcfab00073b788121777
|
[
"MIT"
] | null | null | null |
main.py
|
Staubtornado/juliandc
|
47e41f9e10088f94af44dcfab00073b788121777
|
[
"MIT"
] | null | null | null |
main.py
|
Staubtornado/juliandc
|
47e41f9e10088f94af44dcfab00073b788121777
|
[
"MIT"
] | null | null | null |
import asyncio
import discord
from discord.ext import commands, tasks
import os
import random
import dotenv
import difflib
import configparser
###
version = '4.0.0'
###
bot = commands.Bot(command_prefix = '!', owner_id = 272446903940153345, intents = discord.Intents.all())
bot.remove_command('help')
config = configparser.ConfigParser()
config.read('settings.cfg')
dotenv.load_dotenv()
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
bot.load_extension(f'cogs.{filename[:-3]}')
@bot.command(name='load')
@commands.is_owner()
async def load(ctx, extension):
try:
bot.load_extension(f"cogs.{extension}")
await ctx.message.add_reaction('✅')
except commands.ExtensionAlreadyLoaded:
await ctx.message.add_reaction('❌')
except commands.ExtensionNotFound:
await ctx.message.add_reaction('❓')
else:
await ctx.message.add_reaction('✅')
@bot.command(name='unload')
@commands.is_owner()
async def unload(ctx, extension):
try:
bot.unload_extension(f'cogs.{extension}')
await ctx.message.add_reaction('✅')
except commands.ExtensionNotLoaded:
await ctx.message.add_reaction('❌')
except commands.ExtensionNotFound:
await ctx.message.add_reaction('❓')
else:
await ctx.message.add_reaction('✅')
@bot.command(name='reload')
@commands.is_owner()
async def reload(ctx, extension):
try:
bot.unload_extension(f'cogs.{extension}')
bot.load_extension(f'cogs.{extension}')
await ctx.message.add_reaction('✅')
except commands.ExtensionNotLoaded:
await ctx.message.add_reaction('❌')
except commands.ExtensionNotFound:
await ctx.message.add_reaction('❓')
else:
await ctx.message.add_reaction('✅')
presence = [f'{version} Released', 'Belle Delphine <3', 'Fortnite is gay', 'Bugs are Features', 'By Staubtornado', 'Hentai']
@tasks.loop(seconds=20.0)
async def status_change():
await bot.wait_until_ready()
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.playing, name=f'!help | {random.choice(presence)}'))
status_change.start()
CommandOnCooldown_check = []
CommandNotFound_check = []
Else_check = []
@bot.event
async def on_command_error(ctx, error):
try:
if isinstance(error, commands.CommandOnCooldown):
if ctx.author.id in CommandOnCooldown_check:
return
else:
try:
await ctx.send(embed = discord.Embed(title = 'Cooldown...', description = f'Der Command kann erst in {round(error.retry_after, 2)} Sekunden wieder ausgeführt werden.', colour = int(config.get('COLOUR', 'rot'), base = 16)) .set_footer(text = f'Verursacht durch {ctx.author} | Du kannst diese Nachricht erst nach dem Cooldown wiedersehen.'))
except discord.Forbidden:
return
else:
CommandOnCooldown_check.append(ctx.author.id)
await asyncio.sleep(error.retry_after)
CommandOnCooldown_check.remove(ctx.author.id)
return
elif isinstance(error, commands.CommandNotFound):
if ctx.author.id in CommandNotFound_check:
return
else:
available_commands = []
for command in bot.all_commands:
try:
if await(bot.get_command(command).can_run(ctx)) is True:
available_commands.append(command)
except Exception:
pass
suggestion = ""
similarity_search = difflib.get_close_matches(str(ctx.message.content)[4:], available_commands)
for s in similarity_search:
suggestion += f'**-** `!{s}`\n'
embed = discord.Embed(title = 'Command nicht gefunden...', colour = int(config.get('COLOUR', 'rot'), base = 16))
if suggestion != '':
embed.description = f'Wir konnten keine Commands mit dem Namen `{str(ctx.message.content)[1:]}` finden. Villeicht meintest du:\n{suggestion}'
else:
embed.description = f'Wir konnten keine Commands mit dem Namen `{str(ctx.message.content)[1:]}` finden. Nutze `!help` für Hilfe.'
try:
await ctx.send(embed = embed)
except discord.Forbidden:
return
else:
CommandNotFound_check.append(ctx.author.id)
await asyncio.sleep(10)
CommandNotFound_check.remove(ctx.author.id)
return
# elif isinstance(error, commands.CheckFailure):
# return
else:
if ctx.author.id in Else_check:
return
else:
try:
await ctx.send(embed = discord.Embed(title = 'Unbekannter Fehler...', description = 'Ein unbekannter Fehler ist aufgetreten.', colour = int(config.get('COLOUR', 'rot'), base = 16)) .add_field(name = 'Details', value = str(error)))
except discord.Forbidden:
return
else:
Else_check.append(ctx.author.id)
await asyncio.sleep(10)
Else_check.remove(ctx.author.id)
return
except Exception as err:
return await ctx.send(embed = discord.Embed(title = 'Schwerwiegender Fehler', description = f'Ein schwerwiegender Fehler ist in unserem Error-Handler ausgetreten. Bitte konaktiere den Support und sende halte diesen Fehlercode bereit:\n`{error, err}`', colour = int(config.get('COLOUR', 'rot'), base = 16)))
@bot.event
async def on_ready():
print('BOT is online!')
bot.run(os.environ['DISCORD_TOKEN'])
| 41.135135
| 360
| 0.587714
| 0
| 0
| 0
| 0
| 5,291
| 0.865391
| 5,089
| 0.832352
| 1,250
| 0.204449
|
b10e5e4bf82f717f9759daccbbc32309f685a6ee
| 565
|
py
|
Python
|
lib/utils.py
|
MusaTamzid05/simple_similar_image_lib
|
3882cc3d6c3d8d61f67c71fcbe5a3cbad4e10697
|
[
"MIT"
] | null | null | null |
lib/utils.py
|
MusaTamzid05/simple_similar_image_lib
|
3882cc3d6c3d8d61f67c71fcbe5a3cbad4e10697
|
[
"MIT"
] | null | null | null |
lib/utils.py
|
MusaTamzid05/simple_similar_image_lib
|
3882cc3d6c3d8d61f67c71fcbe5a3cbad4e10697
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
def euclidean_dist(x, y):
return np.linalg.norm(x - y)
def limit_gpu():
gpus = tf.config.list_physical_devices('GPU')
if gpus:
try:
tf.config.set_logical_device_configuration(
gpus[0],
[tf.config.LogicalDeviceConfiguration(memory_limit=4000)])
logical_gpus = tf.config.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
print(e)
| 28.25
| 81
| 0.612389
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 40
| 0.070796
|
b10e8fe5318c13af9359ac8f09fb570418b7c0b2
| 2,226
|
py
|
Python
|
dataloaders/voc.py
|
psui3905/CCT
|
637cbac130b39f02733339c79cdf1d531e339e9c
|
[
"MIT"
] | 308
|
2020-06-09T13:37:17.000Z
|
2022-03-24T07:43:33.000Z
|
dataloaders/voc.py
|
lesvay/CCT
|
cf98ea7e6aefa7091e6c375a9025ba1e0f6e53ca
|
[
"MIT"
] | 55
|
2020-06-16T11:57:54.000Z
|
2022-03-09T12:04:58.000Z
|
dataloaders/voc.py
|
lesvay/CCT
|
cf98ea7e6aefa7091e6c375a9025ba1e0f6e53ca
|
[
"MIT"
] | 51
|
2020-06-08T02:42:14.000Z
|
2022-02-25T16:38:36.000Z
|
from base import BaseDataSet, BaseDataLoader
from utils import pallete
import numpy as np
import os
import scipy
import torch
from PIL import Image
import cv2
from torch.utils.data import Dataset
from torchvision import transforms
import json
class VOCDataset(BaseDataSet):
def __init__(self, **kwargs):
self.num_classes = 21
self.palette = pallete.get_voc_pallete(self.num_classes)
super(VOCDataset, self).__init__(**kwargs)
def _set_files(self):
self.root = os.path.join(self.root, 'VOCdevkit/VOC2012')
if self.split == "val":
file_list = os.path.join("dataloaders/voc_splits", f"{self.split}" + ".txt")
elif self.split in ["train_supervised", "train_unsupervised"]:
file_list = os.path.join("dataloaders/voc_splits", f"{self.n_labeled_examples}_{self.split}" + ".txt")
else:
raise ValueError(f"Invalid split name {self.split}")
file_list = [line.rstrip().split(' ') for line in tuple(open(file_list, "r"))]
self.files, self.labels = list(zip(*file_list))
def _load_data(self, index):
image_path = os.path.join(self.root, self.files[index][1:])
image = np.asarray(Image.open(image_path), dtype=np.float32)
image_id = self.files[index].split("/")[-1].split(".")[0]
if self.use_weak_lables:
label_path = os.path.join(self.weak_labels_output, image_id+".png")
else:
label_path = os.path.join(self.root, self.labels[index][1:])
label = np.asarray(Image.open(label_path), dtype=np.int32)
return image, label, image_id
class VOC(BaseDataLoader):
def __init__(self, kwargs):
self.MEAN = [0.485, 0.456, 0.406]
self.STD = [0.229, 0.224, 0.225]
self.batch_size = kwargs.pop('batch_size')
kwargs['mean'] = self.MEAN
kwargs['std'] = self.STD
kwargs['ignore_index'] = 255
try:
shuffle = kwargs.pop('shuffle')
except:
shuffle = False
num_workers = kwargs.pop('num_workers')
self.dataset = VOCDataset(**kwargs)
super(VOC, self).__init__(self.dataset, self.batch_size, shuffle, num_workers, val_split=None)
| 36.491803
| 114
| 0.637017
| 1,979
| 0.889039
| 0
| 0
| 0
| 0
| 0
| 0
| 289
| 0.129829
|
b10ed1a87457d0709ae65d88b218cf1992004525
| 16,418
|
py
|
Python
|
FWCore/Integration/test/ThinningTest1_cfg.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 6
|
2017-09-08T14:12:56.000Z
|
2022-03-09T23:57:01.000Z
|
FWCore/Integration/test/ThinningTest1_cfg.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 545
|
2017-09-19T17:10:19.000Z
|
2022-03-07T16:55:27.000Z
|
FWCore/Integration/test/ThinningTest1_cfg.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 14
|
2017-10-04T09:47:21.000Z
|
2019-10-23T18:04:45.000Z
|
# This process is the first step of a test that involves multiple
# processing steps. It tests the thinning collections and
# redirecting Refs, Ptrs, and RefToBases.
#
# Produce 15 thinned collections
#
# Collection A contains Things 0-8
# Collection B contains Things 0-3 and made from collection A
# Collection C contains Things 4-7 and made from collection A
#
# x Collection D contains Things 10-18
# Collection E contains Things 10-14 and made from collection D
# Collection F contains Things 14-17 and made from collection D
#
# Collection G contains Things 20-28
# x Collection H contains Things 20-23 and made from collection G
# x Collection I contains Things 24-27 and made from collection G
#
# x Collection J contains Things 30-38
# x Collection K contains Things 30-33 and made from collection J
# x Collection L contains Things 34-37 and made from collection J
#
# x Collection M contains Things 40-48
# x Collection N contains Things 40-43 and made from collection M
# Collection O contains Things 44-47 and made from collection M
#
# The collections marked with an x will get deleted in the next
# processing step.
#
# The Things kept are set by creating TracksOfThings which
# reference them and using those in the selection of a
# Thinning Producer.
#
# The ThinningTestAnalyzer checks that things are working as
# they are supposed to work.
import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
process.options = cms.untracked.PSet(
numberOfStreams = cms.untracked.uint32(1)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(3)
)
process.source = cms.Source("EmptySource")
process.WhatsItESProducer = cms.ESProducer("WhatsItESProducer")
process.DoodadESSource = cms.ESSource("DoodadESSource")
process.thingProducer = cms.EDProducer("ThingProducer",
offsetDelta = cms.int32(100),
nThings = cms.int32(50)
)
process.thingProducer2 = cms.EDProducer("ThingProducer",
offsetDelta = cms.int32(100),
nThings = cms.int32(50)
)
process.thingProducer2alias = cms.EDAlias(
thingProducer2 = cms.VPSet(
cms.PSet(type = cms.string('edmtestThings'))
)
)
process.trackOfThingsProducerA = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(0, 1, 2, 3, 4, 5, 6, 7, 8)
)
process.trackOfThingsProducerB = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(0, 1, 2, 3)
)
process.trackOfThingsProducerC = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(4, 5, 6, 7)
)
process.trackOfThingsProducerD = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(10, 11, 12, 13, 14, 15, 16, 17, 18)
)
process.trackOfThingsProducerDPlus = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(10, 11, 12, 13, 14, 15, 16, 17, 18, 21)
)
process.trackOfThingsProducerE = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(10, 11, 12, 13, 14)
)
process.trackOfThingsProducerF = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(14, 15, 16, 17)
)
process.trackOfThingsProducerG = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(20, 21, 22, 23, 24, 25, 26, 27, 28)
)
process.trackOfThingsProducerH = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(20, 21, 22, 23)
)
process.trackOfThingsProducerI = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(24, 25, 26, 27)
)
process.trackOfThingsProducerJ = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(30, 31, 32, 33, 34, 35, 36, 37, 38)
)
process.trackOfThingsProducerK = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(30, 31, 32, 33)
)
process.trackOfThingsProducerL = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(34, 35, 36, 37)
)
process.trackOfThingsProducerM = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(40, 41, 42, 43, 44, 45, 46, 47, 48)
)
process.trackOfThingsProducerN = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(40, 41, 42, 43)
)
process.trackOfThingsProducerO = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(44, 45, 46, 47)
)
process.trackOfThingsProducerD2 = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer2'),
keysToReference = cms.vuint32(10, 11, 12, 13, 14, 15, 16, 17, 18)
)
process.trackOfThingsProducerE2 = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer2'),
keysToReference = cms.vuint32(10, 11, 12, 13, 14)
)
process.trackOfThingsProducerF2 = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer2'),
keysToReference = cms.vuint32(14, 15, 16, 17)
)
process.thinningThingProducerA = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerA'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.thinningThingProducerB = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerA'),
trackTag = cms.InputTag('trackOfThingsProducerB'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerC = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerA'),
trackTag = cms.InputTag('trackOfThingsProducerC'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerD = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerD'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.thinningThingProducerE = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerD'),
trackTag = cms.InputTag('trackOfThingsProducerE'),
offsetToThinnedKey = cms.uint32(10),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerF = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerD'),
trackTag = cms.InputTag('trackOfThingsProducerF'),
offsetToThinnedKey = cms.uint32(10),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerG = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerG'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.thinningThingProducerH = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerG'),
trackTag = cms.InputTag('trackOfThingsProducerH'),
offsetToThinnedKey = cms.uint32(20),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerI = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerG'),
trackTag = cms.InputTag('trackOfThingsProducerI'),
offsetToThinnedKey = cms.uint32(20),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerJ = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerJ'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.thinningThingProducerK = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerJ'),
trackTag = cms.InputTag('trackOfThingsProducerK'),
offsetToThinnedKey = cms.uint32(30),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerL = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerJ'),
trackTag = cms.InputTag('trackOfThingsProducerL'),
offsetToThinnedKey = cms.uint32(30),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerM = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerM'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.aliasM = cms.EDAlias(
thinningThingProducerM = cms.VPSet(
cms.PSet(type = cms.string('edmtestThings')),
# the next one should get ignored
cms.PSet(type = cms.string('edmThinnedAssociation'))
)
)
process.thinningThingProducerN = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerM'),
trackTag = cms.InputTag('trackOfThingsProducerN'),
offsetToThinnedKey = cms.uint32(40),
expectedCollectionSize = cms.uint32(9)
)
process.aliasN = cms.EDAlias(
thinningThingProducerN = cms.VPSet(
cms.PSet(type = cms.string('edmtestThings')),
# the next one should get ignored
cms.PSet(type = cms.string('edmThinnedAssociation'))
)
)
process.thinningThingProducerO = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('aliasM'),
trackTag = cms.InputTag('trackOfThingsProducerO'),
offsetToThinnedKey = cms.uint32(40),
expectedCollectionSize = cms.uint32(9)
)
process.aliasO = cms.EDAlias(
thinningThingProducerO = cms.VPSet(
cms.PSet(type = cms.string('edmtestThings')),
# the next one should get ignored
cms.PSet(type = cms.string('edmThinnedAssociation'))
)
)
process.testA = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thingProducer'),
thinnedTag = cms.InputTag('thinningThingProducerA'),
associationTag = cms.InputTag('thinningThingProducerA'),
trackTag = cms.InputTag('trackOfThingsProducerA'),
expectedParentContent = cms.vint32( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49
),
expectedThinnedContent = cms.vint32(0, 1, 2, 3, 4, 5, 6, 7, 8),
expectedIndexesIntoParent = cms.vuint32(0, 1, 2, 3, 4, 5, 6, 7, 8),
expectedValues = cms.vint32(0, 1, 2, 3, 4, 5, 6, 7, 8)
)
process.testB = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thinningThingProducerA'),
thinnedTag = cms.InputTag('thinningThingProducerB'),
associationTag = cms.InputTag('thinningThingProducerB'),
trackTag = cms.InputTag('trackOfThingsProducerB'),
expectedParentContent = cms.vint32( 0, 1, 2, 3, 4, 5, 6, 7, 8),
expectedThinnedContent = cms.vint32(0, 1, 2, 3),
expectedIndexesIntoParent = cms.vuint32(0, 1, 2, 3),
expectedValues = cms.vint32(0, 1, 2, 3)
)
process.testC = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thinningThingProducerA'),
thinnedTag = cms.InputTag('thinningThingProducerC'),
associationTag = cms.InputTag('thinningThingProducerC'),
trackTag = cms.InputTag('trackOfThingsProducerC'),
expectedParentContent = cms.vint32( 0, 1, 2, 3, 4, 5, 6, 7, 8),
expectedThinnedContent = cms.vint32(4, 5, 6, 7),
expectedIndexesIntoParent = cms.vuint32(4, 5, 6, 7),
expectedValues = cms.vint32(4, 5, 6, 7)
)
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('testThinningTest1.root'),
outputCommands = cms.untracked.vstring(
'keep *',
'drop *_thingProducer2_*_*',
'drop *_thinningThingProducerM_*_*',
'drop *_thinningThingProducerN_*_*',
'drop *_thinningThingProducerO_*_*'
)
)
process.out2 = cms.OutputModule("EventStreamFileWriter",
fileName = cms.untracked.string('testThinningStreamerout.dat'),
compression_level = cms.untracked.int32(1),
use_compression = cms.untracked.bool(True),
max_event_size = cms.untracked.int32(7000000),
outputCommands = cms.untracked.vstring(
'keep *',
'drop *_thingProducer_*_*',
'drop *_thingProducer2_*_*',
'drop *_thinningThingProducerD_*_*',
'drop *_thinningThingProducerH_*_*',
'drop *_thinningThingProducerI_*_*',
'drop *_thinningThingProducerJ_*_*',
'drop *_thinningThingProducerK_*_*',
'drop *_thinningThingProducerL_*_*',
'drop *_thinningThingProducerM_*_*',
'drop *_thinningThingProducerN_*_*',
'drop *_thinningThingProducerO_*_*',
'drop *_aliasM_*_*',
'drop *_aliasN_*_*'
)
)
process.p = cms.Path(process.thingProducer * process.thingProducer2
* process.trackOfThingsProducerA
* process.trackOfThingsProducerB
* process.trackOfThingsProducerC
* process.trackOfThingsProducerD
* process.trackOfThingsProducerDPlus
* process.trackOfThingsProducerE
* process.trackOfThingsProducerF
* process.trackOfThingsProducerG
* process.trackOfThingsProducerH
* process.trackOfThingsProducerI
* process.trackOfThingsProducerJ
* process.trackOfThingsProducerK
* process.trackOfThingsProducerL
* process.trackOfThingsProducerM
* process.trackOfThingsProducerN
* process.trackOfThingsProducerO
* process.trackOfThingsProducerD2
* process.trackOfThingsProducerE2
* process.trackOfThingsProducerF2
* process.thinningThingProducerA
* process.thinningThingProducerB
* process.thinningThingProducerC
* process.thinningThingProducerD
* process.thinningThingProducerE
* process.thinningThingProducerF
* process.thinningThingProducerG
* process.thinningThingProducerH
* process.thinningThingProducerI
* process.thinningThingProducerJ
* process.thinningThingProducerK
* process.thinningThingProducerL
* process.thinningThingProducerM
* process.thinningThingProducerN
* process.thinningThingProducerO
* process.testA
* process.testB
* process.testC
)
process.endPath = cms.EndPath(process.out * process.out2)
| 40.339066
| 79
| 0.652394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,373
| 0.266354
|
b10ef155b141d1ff49de7abd5e3a562536e9e728
| 771
|
py
|
Python
|
tests/Bio/test_tandem.py
|
iwasakishuto/Keras-Imitation
|
8ac0cd7c8912d49d13b19a0182ad534c0781fbfe
|
[
"MIT"
] | 4
|
2020-04-25T08:50:36.000Z
|
2020-04-26T04:49:16.000Z
|
tests/Bio/test_tandem.py
|
iwasakishuto/Keras-Imitation
|
8ac0cd7c8912d49d13b19a0182ad534c0781fbfe
|
[
"MIT"
] | null | null | null |
tests/Bio/test_tandem.py
|
iwasakishuto/Keras-Imitation
|
8ac0cd7c8912d49d13b19a0182ad534c0781fbfe
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from kerasy.Bio.tandem import find_tandem
from kerasy.utils import generateSeq
len_sequences = 1000
def get_test_data():
sequence = generateSeq(size=len_sequences,
nucleic_acid='DNA',
weights=None,
seed=123)
sequence = "".join(sequence)
return sequence
def test_find_tandem():
sequence = get_test_data()
max_val_sais, tandem_lists_sais = find_tandem(sequence, method="SAIS")
tandem_sais = tandem_lists_sais[0]
max_val_dp, tandem_lists_dp = find_tandem(sequence, method="DP")
tandem_dp = tandem_lists_dp[0]
assert max_val_sais == max_val_dp
assert any([tandem_dp[i:]+tandem_dp[:i] == tandem_sais for i in range(len(tandem_dp))])
| 29.653846
| 91
| 0.660182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 0.041505
|
b10f2700bf5dd4688d783eebd9aacb68abc85ac5
| 679
|
py
|
Python
|
NEW_PRAC/HackerRank/Python/SetDifferenceString.py
|
side-projects-42/INTERVIEW-PREP-COMPLETE
|
627a3315cee4bbc38a0e81c256f27f928eac2d63
|
[
"MIT"
] | 13
|
2021-03-11T00:25:22.000Z
|
2022-03-19T00:19:23.000Z
|
NEW_PRAC/HackerRank/Python/SetDifferenceString.py
|
side-projects-42/INTERVIEW-PREP-COMPLETE
|
627a3315cee4bbc38a0e81c256f27f928eac2d63
|
[
"MIT"
] | 160
|
2021-04-26T19:04:15.000Z
|
2022-03-26T20:18:37.000Z
|
NEW_PRAC/HackerRank/Python/SetDifferenceString.py
|
side-projects-42/INTERVIEW-PREP-COMPLETE
|
627a3315cee4bbc38a0e81c256f27f928eac2d63
|
[
"MIT"
] | 12
|
2021-04-26T19:43:01.000Z
|
2022-01-31T08:36:29.000Z
|
# >>> s = set("Hacker")
# >>> print s.difference("Rank")
# set(['c', 'r', 'e', 'H'])
# >>> print s.difference(set(['R', 'a', 'n', 'k']))
# set(['c', 'r', 'e', 'H'])
# >>> print s.difference(['R', 'a', 'n', 'k'])
# set(['c', 'r', 'e', 'H'])
# >>> print s.difference(enumerate(['R', 'a', 'n', 'k']))
# set(['a', 'c', 'r', 'e', 'H', 'k'])
# >>> print s.difference({"Rank":1})
# set(['a', 'c', 'e', 'H', 'k', 'r'])
# >>> s - set("Rank")
# set(['H', 'c', 'r', 'e'])
if __name__ == "__main__":
eng = input()
eng_stu = set(map(int, input().split()))
fre = input()
fre_stu = set(map(int, input().split()))
eng_only = eng_stu - fre_stu
print(len(eng_only))
| 24.25
| 57
| 0.443299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 458
| 0.674521
|
b10ff91b57739eb21f6eb6d10c2777a5221bc00d
| 4,898
|
py
|
Python
|
src/dotacrunch/drawer.py
|
tpinetz/dotacrunch
|
9f53404ac3556e14bdc3e159f36d34e39c747898
|
[
"MIT"
] | 1
|
2019-09-20T04:03:13.000Z
|
2019-09-20T04:03:13.000Z
|
src/dotacrunch/drawer.py
|
tpinetz/dotacrunch
|
9f53404ac3556e14bdc3e159f36d34e39c747898
|
[
"MIT"
] | null | null | null |
src/dotacrunch/drawer.py
|
tpinetz/dotacrunch
|
9f53404ac3556e14bdc3e159f36d34e39c747898
|
[
"MIT"
] | null | null | null |
from PIL import Image, ImageDraw
from numpy import array, random, vstack, ones, linalg
from const import TOWERS
from copy import deepcopy
from os import path
class MapDrawer:
"""
a class for drawing Dota2Maps with replay-parsed data
"""
def __init__(self, towers, received_tables):
"""
@param towers: table containing info about towers
@param received_tables: the received_tables from the replay
"""
self.coordinates = []
libdir = path.abspath(path.dirname(__file__))
self.image = Image.open(libdir + "/assets/dota2map.png")
self.draw = ImageDraw.Draw(self.image)
self.map_w, self.map_h = self.image.size
# init information tables and respective columns
tower_info_table = received_tables.by_dt['DT_DOTA_BaseNPC_Tower']
position_x_index = tower_info_table.by_name['m_cellX']
position_y_index = tower_info_table.by_name['m_cellY']
position_vector_index = tower_info_table.by_name['m_vecOrigin']
name_index = tower_info_table.by_name['m_iName']
tower_list = deepcopy(TOWERS)
# getting world coordinates for every tower in TOWERS
for name, data in tower_list.iteritems():
for t in towers:
state = t.state
state_name = state.get(name_index)
if state_name == name:
data["worldX"] = state.get(position_x_index) + state.get(position_vector_index)[0] / 128.
if "worldY" not in data:
data["worldY"] = state.get(position_y_index) + state.get(position_vector_index)[1] / 128.
# caching vals, so ordering stays the same throughout the comprehensions
vals = tower_list.values()
x = [v["worldX"] for v in vals if "worldX" in v]
y = [v["worldY"] for v in vals if "worldY" in v]
x_map = [v["x"] for v in vals if "worldX" in v]
y_map = [v["y"] for v in vals if "worldY" in v]
# calculating scale and offset to convert worldcoordinates to coordinates on the map
Ax = vstack((x, ones(len(x)))).T
Ay = vstack((y, ones(len(y)))).T
self.scale_x, self.offset_x = linalg.lstsq(Ax, x_map)[0]
self.scale_y, self.offset_y = linalg.lstsq(Ay, y_map)[0]
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
# x_tomap = [(a * self.scale_x) + self.offset_x for a in x]
# y_tomap = [(a * self.scale_y) + self.offset_y for a in y]
# plotting conversion output for debugging purposes
# fig, axarr = plt.subplots(nrows = 2, ncols = 2)
# axarr[0][0].scatter(x_tomap, y_tomap)
# axarr[0][1].scatter(x_map, y_map)
# axarr[1][0].scatter(x, y)
# plt.savefig("output/towers.png", figsize=(12, 4), dpi=150)
def draw_circle_world_coordinates(self, worldX, worldY, r=20, color="red"):
"""
draws a circle at the specified world coordinates (from bottom-left) with radius r (in px) and the color as required by PIL
"""
x, y = self.convert_coordinates(worldX, worldY)
bounds = (x-r, self.map_h-(y+r), x+r, self.map_h-(y-r))
bounds = tuple(int(round(a)) for a in bounds)
self.draw.ellipse(bounds, fill = color)
def draw_circle_world_coordinates_list(self, coordinates, r=20, color="red"):
"""
same as draw_circle_world_coordinates, but for batch drawing
"""
for x, y in coordinates:
self.draw_circle_world_coordinates(x, y, r, color)
def draw_circle_map_coordinates(self, x, y, r=20, color="red"):
"""
draws a circle on the specified pixels (from bottom-left) with radius r (in px) and the color as required by PIL
"""
bounds = (x-r, self.map_h-(y+r), x+r, self.map_h-(y-r))
bounds = tuple(int(round(a)) for a in bounds)
self.draw.ellipse(bounds, fill = color)
def draw_circle_map_coordinates_list(self, coordinates, r=20, color="red"):
"""
same as draw_circle_map_coordinates, but for batch drawing
"""
for x, y in coordinates:
self.draw_circle_map_coordinates(x, y, r, color)
def save(self, filename, scale=4):
"""
saves the map
"""
scaled = self.image.resize((self.map_w / scale, self.map_h / scale), Image.ANTIALIAS)
scaled.save(filename)
def convert_coordinates(self, worldX, worldY):
"""
converts world coordinates to map coordinates by using the scale and offset defined in the __init__ method
"""
return (worldX * self.scale_x) + self.offset_x, (worldY * self.scale_y) + self.offset_y
| 42.964912
| 136
| 0.600857
| 4,733
| 0.966313
| 0
| 0
| 0
| 0
| 0
| 0
| 1,791
| 0.365659
|
b112b2802063ecfa7ce3db6c16ab4326c7eda2fb
| 1,746
|
py
|
Python
|
nsm.py
|
svepe/neural-stack
|
c48e6b94f00e77cedd9d692bdc2a6715bb007db5
|
[
"MIT"
] | null | null | null |
nsm.py
|
svepe/neural-stack
|
c48e6b94f00e77cedd9d692bdc2a6715bb007db5
|
[
"MIT"
] | 1
|
2017-07-26T07:18:42.000Z
|
2017-07-26T07:18:42.000Z
|
nsm.py
|
svepe/neural-stack
|
c48e6b94f00e77cedd9d692bdc2a6715bb007db5
|
[
"MIT"
] | null | null | null |
import numpy as np
import chainer.functions as F
from chainer import Variable
def neural_stack(V, s, d, u, v):
# strengths
s_new = d
for t in reversed(xrange(s.shape[1])):
x = s[:, t].reshape(-1, 1) - u
s_new = F.concat((s_new, F.maximum(Variable(np.zeros_like(x.data)), x)))
u = F.maximum(Variable(np.zeros_like(x.data)), -x)
s = F.fliplr(s_new)
# memory
V = F.concat((V, F.expand_dims(v, 1)))
# result
r = Variable(np.zeros_like(v.data))
ur = Variable(np.ones_like(u.data))
for t in reversed(xrange(s_new.shape[1])):
w = F.minimum(s[:, t].reshape(-1, 1), ur)
r += V[:, t] * F.broadcast_to(w, V[:, t].shape)
x = ur - s[:, t].reshape(-1, 1)
ur = F.maximum(Variable(np.zeros_like(x.data)), x)
return V, s, r
batch_size = 3
stack_element_size = 2
V = Variable(np.zeros((batch_size, 1, stack_element_size)))
s = Variable(np.zeros((batch_size, 1)))
d = Variable(np.ones((batch_size, 1)) * 0.4)
u = Variable(np.ones((batch_size, 1)) * 0.)
v = Variable(np.ones((batch_size, stack_element_size)))
V, s, r = neural_stack(V, s, d, u, v)
d = Variable(np.ones((batch_size, 1)) * 0.8)
u = Variable(np.ones((batch_size, 1)) * 0.)
v = Variable(np.ones((batch_size, stack_element_size)) * 2.)
V, s, r = neural_stack(V, s, d, u, v)
d = Variable(np.ones((batch_size, 1)) * 0.9)
u = Variable(np.ones((batch_size, 1)) * 0.9)
v = Variable(np.ones((batch_size, stack_element_size)) * 3.)
V, s, r = neural_stack(V, s, d, u, v)
d = Variable(np.ones((batch_size, 1)) * 0.1)
u = Variable(np.ones((batch_size, 1)) * 0.1)
v = Variable(np.ones((batch_size, stack_element_size)) * 3.)
V, s, r = neural_stack(V, s, d, u, v)
print V.data
print s.data
print r.data
| 29.59322
| 80
| 0.613402
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 0.015464
|
b11347dca32d00ada08a415a09ab2e6c4431c76c
| 2,354
|
py
|
Python
|
chaos_genius/celery_config.py
|
eltociear/chaos_genius
|
eb3bc27181c8af4144b95e685386814109173164
|
[
"MIT"
] | 1
|
2022-02-25T16:11:34.000Z
|
2022-02-25T16:11:34.000Z
|
chaos_genius/celery_config.py
|
eltociear/chaos_genius
|
eb3bc27181c8af4144b95e685386814109173164
|
[
"MIT"
] | null | null | null |
chaos_genius/celery_config.py
|
eltociear/chaos_genius
|
eb3bc27181c8af4144b95e685386814109173164
|
[
"MIT"
] | null | null | null |
from datetime import timedelta
from celery.schedules import crontab, schedule
CELERY_IMPORTS = ("chaos_genius.jobs")
CELERY_TASK_RESULT_EXPIRES = 30
CELERY_TIMEZONE = "UTC"
CELERY_ACCEPT_CONTENT = ["json", "msgpack", "yaml"]
CELERY_TASK_SERIALIZER = "json"
CELERY_RESULT_SERIALIZER = "json"
CELERYBEAT_SCHEDULE = {
"anomaly-scheduler": {
"task": "chaos_genius.jobs.anomaly_tasks.anomaly_scheduler",
"schedule": schedule(timedelta(minutes=10)),
"args": ()
},
'alerts-daily': {
'task': 'chaos_genius.jobs.alert_tasks.check_event_alerts',
'schedule': crontab(hour="3", minute="0"), # Daily: at 3am
'args': ('daily',)
},
"alert-digest-daily-scheduler": {
"task": "chaos_genius.jobs.alert_tasks.alert_digest_daily_scheduler",
"schedule": schedule(timedelta(minutes=10)),
"args": ()
},
# 'anomaly-task-every-minute': {
# 'task': 'chaos_genius.jobs.anomaly_tasks.add_together',
# 'schedule': crontab(minute="*"), # Every minutes
# 'args': (5,10,)
# },
# "anomaly-tasks-all-kpis": {
# "task": "chaos_genius.jobs.anomaly_tasks.anomaly_kpi",
# # "schedule": crontab(hour=[11]),
# "schedule": schedule(timedelta(minutes=1)), # for testing
# "args": ()
# },
# 'alerts-weekly': {
# 'task': 'chaos_genius.jobs.alert_tasks.check_event_alerts',
# 'schedule': crontab(day_of_week="0"), # Weekly: every sunday
# 'args': ('weekly',)
# },
# 'alerts-hourly': {
# 'task': 'chaos_genius.jobs.alert_tasks.check_event_alerts',
# 'schedule': crontab(hour="*"), # Hourly: at 0th minute
# 'args': ('hourly',)
# },
# 'alerts-every-15-minute': {
# 'task': 'chaos_genius.jobs.alert_tasks.check_event_alerts',
# 'schedule': crontab(minute="*/15"), # Every 15 minutes
# 'args': ('every_15_minute',)
# }
}
CELERY_ROUTES = {
"chaos_genius.jobs.anomaly_tasks.*": {"queue": "anomaly-rca"},
"chaos_genius.jobs.alert_tasks.*": {"queue": "alerts"},
}
# Scheduler runs every hour
# looks at tasks in last n hour
# if they are in processing in 24 hours, schedule them right away
# job expiry window
# add details of job into a table, then schedule it
# TODO: Use this for config
class Config:
enable_utc = True
| 32.694444
| 77
| 0.619371
| 35
| 0.014868
| 0
| 0
| 0
| 0
| 0
| 0
| 1,627
| 0.691164
|
b11425f92000045819fecda2309fb8977bc45199
| 2,102
|
py
|
Python
|
speaksee/data/utils.py
|
aimagelab/speaksee
|
63700a4062e2ae00132a5c77007604fdaf4bd00b
|
[
"BSD-3-Clause"
] | 29
|
2019-02-28T05:29:53.000Z
|
2021-01-25T06:55:48.000Z
|
speaksee/data/utils.py
|
aimagelab/speaksee
|
63700a4062e2ae00132a5c77007604fdaf4bd00b
|
[
"BSD-3-Clause"
] | 2
|
2019-10-26T02:29:59.000Z
|
2021-01-15T13:58:53.000Z
|
speaksee/data/utils.py
|
aimagelab/speaksee
|
63700a4062e2ae00132a5c77007604fdaf4bd00b
|
[
"BSD-3-Clause"
] | 11
|
2019-03-12T08:43:09.000Z
|
2021-03-15T03:20:43.000Z
|
def get_tokenizer(tokenizer):
if callable(tokenizer):
return tokenizer
if tokenizer == "spacy":
try:
import spacy
spacy_en = spacy.load('en')
return lambda s: [tok.text for tok in spacy_en.tokenizer(s)]
except ImportError:
print("Please install SpaCy and the SpaCy English tokenizer. "
"See the docs at https://spacy.io for more information.")
raise
except AttributeError:
print("Please install SpaCy and the SpaCy English tokenizer. "
"See the docs at https://spacy.io for more information.")
raise
elif tokenizer == "moses":
try:
from nltk.tokenize.moses import MosesTokenizer
moses_tokenizer = MosesTokenizer()
return moses_tokenizer.tokenize
except ImportError:
print("Please install NLTK. "
"See the docs at http://nltk.org for more information.")
raise
except LookupError:
print("Please install the necessary NLTK corpora. "
"See the docs at http://nltk.org for more information.")
raise
elif tokenizer == 'revtok':
try:
import revtok
return revtok.tokenize
except ImportError:
print("Please install revtok.")
raise
elif tokenizer == 'subword':
try:
import revtok
return lambda x: revtok.tokenize(x, decap=True)
except ImportError:
print("Please install revtok.")
raise
raise ValueError("Requested tokenizer {}, valid choices are a "
"callable that takes a single string as input, "
"\"revtok\" for the revtok reversible tokenizer, "
"\"subword\" for the revtok caps-aware tokenizer, "
"\"spacy\" for the SpaCy English tokenizer, or "
"\"moses\" for the NLTK port of the Moses tokenization "
"script.".format(tokenizer))
| 40.423077
| 77
| 0.555661
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 793
| 0.37726
|
b114d5a538b75c9a4b75747db2d55272076b7fcc
| 232
|
py
|
Python
|
oldcontrib/media/image/servee_registry.py
|
servee/django-servee-oldcontrib
|
836447ebbd53db0b53879a35468c02e57f65105f
|
[
"BSD-Source-Code"
] | null | null | null |
oldcontrib/media/image/servee_registry.py
|
servee/django-servee-oldcontrib
|
836447ebbd53db0b53879a35468c02e57f65105f
|
[
"BSD-Source-Code"
] | null | null | null |
oldcontrib/media/image/servee_registry.py
|
servee/django-servee-oldcontrib
|
836447ebbd53db0b53879a35468c02e57f65105f
|
[
"BSD-Source-Code"
] | null | null | null |
from servee import frontendadmin
from servee.frontendadmin.insert import ModelInsert
from oldcontrib.media.image.models import Image
class ImageInsert(ModelInsert):
model = Image
frontendadmin.site.register_insert(ImageInsert)
| 29
| 51
| 0.844828
| 49
| 0.211207
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b114f3af35aa6791557a994b86492206a441c7e5
| 974
|
py
|
Python
|
run.py
|
Lohitapallanti/Predicting-Titanic-Survive
|
681e513ec0abfb66797c827139d4e6d99c6b22bf
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
Lohitapallanti/Predicting-Titanic-Survive
|
681e513ec0abfb66797c827139d4e6d99c6b22bf
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
Lohitapallanti/Predicting-Titanic-Survive
|
681e513ec0abfb66797c827139d4e6d99c6b22bf
|
[
"Apache-2.0"
] | null | null | null |
from train import train
from processing import Processing
""" The Main run file, where the program execution and controller is based. """
class Run(train, Processing):
def final_function(self):
print('This project predicts the persons who will survive if the same RMS Titanic ')
print('incident happens again. It uses train.py to Learn automatically how to handle situations, \n based on the input. ')
print('\n\nEnter the choice number \n')
print('1. Train the software using different datasets(remember to keep it inside the "datasets" folder)\n')
print('2. Predict the survival of people, based on the experience of the software on train.py experiences')
ch = int(input('\n\n CHOICE : '))
if ch == 1:
self.asking_values()
self.assigningValues()
elif ch == 2:
print('reached here')
self.input_file_storage()
object = Run()
object.final_function()
| 37.461538
| 131
| 0.661191
| 790
| 0.811088
| 0
| 0
| 0
| 0
| 0
| 0
| 534
| 0.548255
|
b1155590dddadba4928d8c63159a637854f7865e
| 2,646
|
py
|
Python
|
scripts/pretty-printers/gdb/install.py
|
tobireinhard/cbmc
|
fc165c119985adf8db9a13493f272a2def4e79fa
|
[
"BSD-4-Clause"
] | 412
|
2016-04-02T01:14:27.000Z
|
2022-03-27T09:24:09.000Z
|
scripts/pretty-printers/gdb/install.py
|
tobireinhard/cbmc
|
fc165c119985adf8db9a13493f272a2def4e79fa
|
[
"BSD-4-Clause"
] | 4,671
|
2016-02-25T13:52:16.000Z
|
2022-03-31T22:14:46.000Z
|
scripts/pretty-printers/gdb/install.py
|
tobireinhard/cbmc
|
fc165c119985adf8db9a13493f272a2def4e79fa
|
[
"BSD-4-Clause"
] | 266
|
2016-02-23T12:48:00.000Z
|
2022-03-22T18:15:51.000Z
|
#!/usr/bin/env python3
import os
from shutil import copyfile
def create_gdbinit_file():
"""
Create and insert into a .gdbinit file the python code to set-up cbmc pretty-printers.
"""
print("Attempting to enable cbmc-specific pretty-printers.")
home_folder = os.path.expanduser("~")
if not home_folder:
print(home_folder + " is an invalid home folder, can't auto-configure .gdbinit.")
return
# This is the code that should be copied if you're applying the changes by hand.
gdb_directory = os.path.dirname(os.path.abspath(__file__))
code_block_start = "cbmc_printers_folder = "
code_block = \
[
"{0}'{1}'".format(code_block_start, gdb_directory),
"if os.path.exists(cbmc_printers_folder):",
" sys.path.insert(1, cbmc_printers_folder)",
" from pretty_printers import load_cbmc_printers",
" load_cbmc_printers()",
]
gdbinit_file = os.path.join(home_folder, ".gdbinit")
lines = []
imports = { "os", "sys" }
if os.path.exists(gdbinit_file):
with open(gdbinit_file, 'r') as file:
lines = [ line.rstrip() for line in file ]
line_no = 0
while line_no < len(lines):
if lines[line_no].startswith('import '):
imports.add(lines[line_no][len("import "):].strip())
lines.pop(line_no)
else:
if lines[line_no].startswith(code_block_start):
print(".gdbinit already contains our pretty printers, not changing it")
return
line_no += 1
while len(lines) != 0 and (lines[0] == "" or lines[0] == "python"):
lines.pop(0)
backup_file = os.path.join(home_folder, "backup.gdbinit")
if os.path.exists(backup_file):
print("backup.gdbinit file already exists. Type 'y' if you would like to overwrite it or any other key to exit.")
choice = input().lower()
if choice != 'y':
return
print("Backing up {0}".format(gdbinit_file))
copyfile(gdbinit_file, backup_file)
lines = [ "python" ] + list(map("import {}".format, sorted(imports))) + [ "", "" ] + code_block + [ "", "" ] + lines + [ "" ]
print("Adding pretty-print commands to {0}.".format(gdbinit_file))
try:
with open(gdbinit_file, 'w+') as file:
file.write('\n'.join(lines))
print("Commands added.")
except:
print("Exception occured writing to file. Please apply changes manually.")
if __name__ == "__main__":
create_gdbinit_file()
| 37.267606
| 129
| 0.588435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 945
| 0.357143
|
b1156db34bf35cdfc3d30e9b0d6bdddd6d15330a
| 5,613
|
py
|
Python
|
pyutils/solve.py
|
eltrompetero/maxent_fim
|
b5e8942a20aad67e4055c506248605df50ab082d
|
[
"MIT"
] | null | null | null |
pyutils/solve.py
|
eltrompetero/maxent_fim
|
b5e8942a20aad67e4055c506248605df50ab082d
|
[
"MIT"
] | null | null | null |
pyutils/solve.py
|
eltrompetero/maxent_fim
|
b5e8942a20aad67e4055c506248605df50ab082d
|
[
"MIT"
] | null | null | null |
# ====================================================================================== #
# Module for solving maxent problem on C elegans data set.
#
# Author : Eddie Lee, edlee@santafe.edu
# ====================================================================================== #
from .utils import *
from scipy.optimize import minimize
from scipy.special import logsumexp
from scipy.stats import multinomial
from scipy.interpolate import interp1d
def _indpt(X):
"""Solve independent spin model.
Parameters
----------
X : ndarray
Dimension (n_samples, n_neurons).
Returns
-------
ndarray
Solved field returned as fields for 0 state for all spins, then 1 state for all
spins, 2 state for all spins to form a total length of 3 * N.
"""
p = np.vstack([(X==i).mean(0) for i in range(3)])
p = p.T
h = np.zeros((p.shape[0], 3))
# solve each spin
def p_ind(h):
return np.exp(h - logsumexp(h))
for i in range(p.shape[0]):
pi = p[i]
def cost(h):
return np.linalg.norm(p_ind(h) - pi)
h[i] = minimize(cost, [0,0,0])['x']
# set the third field to zero (this is our normalized representation)
h -= h[:,2][:,None]
return h.T.ravel()
class Independent3():
def __init__(self, X=None, alpha=2.):
"""
Parameters
----------
X : ndarray
Data.
alpha : float, 2.
Cost exponent.
"""
if not X is None:
self.set_data(X)
self.alpha = alpha
def set_data(self, X):
"""
Parameters
----------
X : ndarray
Dimension (n_samples, n_neurons).
"""
self.X = X
self.p = np.vstack([(X==i).mean(0) for i in range(3)])
self.p = self.p.T
def solve(self, s=1, full_output=False):
"""Solve independent spin model with Gaussian prior.
Parameters
----------
s : float, 1
Std of Gaussian prior on fields.
full_output : bool, False
Returns
-------
ndarray
Solved field returned as fields for 0 state for all spins, then 1 state for
all spins, 2 state for all spins to form a total length of 3 * N.
list of dict (optional)
"""
# breakdown in number of occurrences per state
n = np.vstack([np.bincount(self.X[:,i], minlength=3) for i in range(self.X.shape[1])])
h = np.zeros((self.p.shape[0], 3))
# solve each spin separately
soln = []
for i in range(self.p.shape[0]):
soln.append( minimize(lambda h: self.cost(h, n[i], s), [0,0,0]) )
h[i] = soln[-1]['x']
# set the third field to zero (this is our normalized representation)
h -= h[:,2][:,None]
if full_output:
return h.T.ravel(), soln
return h.T.ravel()
def p_ind(self, h):
"""Set of probabilities for a single spin.
Parameters
----------
h : ndarray
Returns
-------
ndarray
The three probabilities.
"""
return np.exp(h - logsumexp(h))
def cost(self, h, n, s):
"""Log likelihood cost for a single spin.
Parameters
----------
h : ndarray
Fields.
n : ndarray
Number of observations per state.
s : float
Std of Gaussian prior.
Returns
-------
ndarray
"""
logpdf = multinomial.logpmf(n, n=n.sum(), p=self.p_ind(h))
return -logpdf / n.sum() + (np.abs(h)**self.alpha).sum() / 2 / s**self.alpha
def cost_with_s(self, s_range, n_cpus=None):
"""Optimal cost as a function of the prior width. There will be a separate set of
values for each spin.
Parameters
----------
s_range : ndarray
Returns
-------
ndarray
(n_spins, s_range.size)
"""
if n_cpus==1:
c = np.zeros((self.X.shape[1], s_range.size))
for i, s in enumerate(s_range):
soln = self.solve(s=s, full_output=True)[1]
c[:,i] = [s['fun'] for s in soln]
else:
def loop_wrapper(s):
soln = self.solve(s=s, full_output=True)[1]
return [s['fun'] for s in soln]
with threadpool_limits(limits=1, user_api='blas'):
with Pool() as pool:
c = np.vstack(list(pool.map(loop_wrapper, s_range))).T
return c
def optimize_s(self, prior_range=None, n_interp=32):
"""Find midpoint of hyperparameter s, the width of the prior.
Parameters
----------
prior_range : ndarray, None
By default set to 10^-1 and 10^-2.5
n_interp : int, 32
Returns
-------
float
"""
if prior_range is None:
prior_range = np.logspace(-1, 2.5, n_interp)
logl = self.cost_with_s(prior_range).mean(0)
# interpolate
spline = interp1d(np.log(prior_range), logl, kind='cubic', bounds_error=False)
# get the middle point
midval = (logl[0] + logl[-1])/2
smid = np.exp(minimize(lambda x: (spline(x) - midval)**2,
np.log(prior_range[prior_range.size//2]),
bounds=[np.log(prior_range[[0,-1]])])['x'])
return smid
#end Independent3
| 26.856459
| 94
| 0.492785
| 4,312
| 0.768217
| 0
| 0
| 0
| 0
| 0
| 0
| 2,691
| 0.479423
|
b1160a8726aaf21bb1cf8728387263736c4c3084
| 8,117
|
py
|
Python
|
lingvo/tasks/car/ops/nms_3d_op_test.py
|
Singed-jj/lingvo
|
a2a4ac8bd835ffc2f95fc38ee3e9bc17c30fcc56
|
[
"Apache-2.0"
] | null | null | null |
lingvo/tasks/car/ops/nms_3d_op_test.py
|
Singed-jj/lingvo
|
a2a4ac8bd835ffc2f95fc38ee3e9bc17c30fcc56
|
[
"Apache-2.0"
] | null | null | null |
lingvo/tasks/car/ops/nms_3d_op_test.py
|
Singed-jj/lingvo
|
a2a4ac8bd835ffc2f95fc38ee3e9bc17c30fcc56
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import time
import unittest
from lingvo import compat as tf
from lingvo.core import test_utils
from lingvo.tasks.car import ops
import numpy as np
from six.moves import range
class Nms3dOpTest(test_utils.TestCase):
def _GetData(self):
# Assignments and IoU scores derived from externally calculated test cases
# created with shapely.
bboxes = tf.constant(
[
[10.35, 8.429, -1.003, 3.7, 1.64, 1.49, 1.582],
[10.35, 8.429, -1.003, 3.7, 1.64, 1.49, 0.0
], # Box 0 rotated ~90 deg
[11.5, 8.429, -1.003, 3.7, 1.64, 1.49, 1.0], # Rotated to overlap
[13.01, 8.149, -0.953, 4.02, 1.55, 1.52, 1.592],
[13.51, 8.39, -1.0, 4.02, 1.55, 1.52, 1.592], # Slight translation
[13.51, 8.39, -1.0, 1.0, 1.0, 1.52, 1.592], # Smaller box
[13.51, 8.39, -1.0, 1.0, 1.0, 1.52, 1.9], # Smaller box
],
dtype=tf.float32)
scores = tf.constant([
[0.9, 0.09, 0.0],
[0.88, 0.109, 0.011],
[0.5, 0.01, 0.49],
[0.8, 0.1, 0.1],
[0.79, 0.12, 0.19],
[0.2, 0.79, 0.11],
[0.1, 0.9, 0.0],
],
dtype=tf.float32)
return bboxes, scores
def _TestNMSOp(self, bboxes_3d, class_scores, nms_iou_threshold,
score_threshold, max_boxes_per_class, expected_indices):
with self.session():
bbox_indices, bbox_scores, valid_mask = ops.non_max_suppression_3d(
bboxes_3d,
class_scores,
nms_iou_threshold=nms_iou_threshold,
score_threshold=score_threshold,
max_boxes_per_class=max_boxes_per_class)
bbox_idx, scores, mask = self.evaluate(
[bbox_indices, bbox_scores, valid_mask])
num_classes = len(expected_indices)
expected_shape = (num_classes, max_boxes_per_class)
self.assertEqual(bbox_idx.shape, expected_shape)
self.assertEqual(scores.shape, expected_shape)
self.assertEqual(mask.shape, expected_shape)
total_expected_valid_boxes = sum([len(exp) for exp in expected_indices])
self.assertEqual(mask.sum(), total_expected_valid_boxes)
for cls_idx in range(num_classes):
cls_mask = mask[cls_idx, :].astype(np.bool)
self.assertEqual(cls_mask.sum(), len(expected_indices[cls_idx]))
self.assertAllEqual(bbox_idx[cls_idx, cls_mask],
expected_indices[cls_idx])
def testMultiClassNMS(self):
bboxes_3d, class_scores = self._GetData()
expected_indices = [[0, 3], [6], [2]]
self._TestNMSOp(
bboxes_3d,
class_scores,
nms_iou_threshold=[0.1, 0.1, 0.1],
score_threshold=[0.3, 0.3, 0.3],
max_boxes_per_class=5,
expected_indices=expected_indices)
def testLowerScoreThreshold(self):
bboxes_3d, class_scores = self._GetData()
# Lower threshold means more boxes are included.
expected_indices = [[0, 3], [6, 1], [2, 4]]
self._TestNMSOp(
bboxes_3d,
class_scores,
nms_iou_threshold=[0.1, 0.1, 0.1],
score_threshold=[0.01, 0.01, 0.01],
max_boxes_per_class=5,
expected_indices=expected_indices)
def testHighIoUThreshold(self):
bboxes_3d, class_scores = self._GetData()
expected_indices = [[0, 1, 3, 4, 2, 5, 6], [6, 5, 4, 1, 3, 0, 2],
[2, 4, 5, 3, 1]]
# Increase IoU Threshold and max number of boxes so
# all non-zero score boxes are returned.
self._TestNMSOp(
bboxes_3d,
class_scores,
nms_iou_threshold=[0.999, 0.999, 0.999],
score_threshold=[0.01, 0.01, 0.01],
max_boxes_per_class=10,
expected_indices=expected_indices)
def testOneClassVsMultiClass(self):
# Check running on all 3 classes versus each independently.
bboxes_3d, class_scores = self._GetData()
num_classes = 3
max_boxes_per_class = 5
with self.session():
bbox_indices, bbox_scores, valid_mask = ops.non_max_suppression_3d(
bboxes_3d,
class_scores,
nms_iou_threshold=[0.1, 0.1, 0.1],
score_threshold=[0.3, 0.3, 0.3],
max_boxes_per_class=max_boxes_per_class)
multiclass_indices, multiclass_scores, multiclass_valid_mask = self.evaluate(
[bbox_indices, bbox_scores, valid_mask])
self.assertEqual(multiclass_indices.shape,
(num_classes, max_boxes_per_class))
self.assertEqual(multiclass_scores.shape,
(num_classes, max_boxes_per_class))
self.assertEqual(multiclass_valid_mask.shape,
(num_classes, max_boxes_per_class))
# For each class, get results for just that class and compare.
for cls_idx in range(num_classes):
bbox_idx, bbox_scores, valid_mask = ops.non_max_suppression_3d(
bboxes_3d,
class_scores[:, cls_idx:cls_idx + 1],
nms_iou_threshold=[0.1],
score_threshold=[0.3],
max_boxes_per_class=max_boxes_per_class)
per_class_indices, per_class_scores, per_class_valid_mask = self.evaluate(
[bbox_idx, bbox_scores, valid_mask])
self.assertEqual(per_class_indices.shape, (1, max_boxes_per_class))
self.assertEqual(per_class_scores.shape, (1, max_boxes_per_class))
self.assertEqual(per_class_valid_mask.shape, (1, max_boxes_per_class))
per_class_mask = per_class_valid_mask[0, :].astype(np.bool)
multiclass_mask = multiclass_valid_mask[cls_idx, :].astype(np.bool)
self.assertAllEqual(per_class_indices[0, per_class_mask],
multiclass_indices[cls_idx, multiclass_mask])
self.assertAllEqual(per_class_scores[0, per_class_mask],
multiclass_scores[cls_idx, multiclass_mask])
@unittest.skip('Speed benchmark')
def testSpeed(self):
num_bboxes_list = [500, 1000, 10000]
num_classes_list = [3, 10, 25]
for num_bboxes in num_bboxes_list:
for num_classes in num_classes_list:
bboxes_3d = tf.random.uniform((num_bboxes, 7),
minval=0.1,
maxval=2,
dtype=tf.float32)
# Make half zero so we can see behavior with very low values that
# will get filtered out quickly.
class_scores = tf.concat([
tf.random.uniform((num_bboxes // 2, num_classes),
minval=0,
maxval=1,
dtype=tf.float32),
tf.zeros((num_bboxes // 2, num_classes), dtype=tf.float32)
],
axis=0)
with self.session():
outputs = ops.non_max_suppression_3d(
bboxes_3d,
class_scores,
max_boxes_per_class=1000,
nms_iou_threshold=[0.1] * num_classes,
score_threshold=[0.3] * num_classes)
timings = []
for _ in range(10):
start = time.time()
_ = self.evaluate(outputs)
end = time.time()
timings.append(end - start)
avg = sum(timings) / len(timings)
print('[{},{},{},{},{}]'.format(num_bboxes, num_classes, min(timings),
avg, max(timings)))
if __name__ == '__main__':
tf.test.main()
| 39.402913
| 83
| 0.603795
| 7,184
| 0.885056
| 0
| 0
| 1,545
| 0.190341
| 0
| 0
| 1,281
| 0.157817
|
b1175a77d1f41faf9425e6e42edc2d9127d3fe7c
| 10,773
|
py
|
Python
|
BDS_2nd_downsampling.py
|
oxon-612/BDSR
|
c468061ed9e139be96d9da91c1b5419b122eeb4f
|
[
"MIT"
] | 1
|
2021-03-03T13:13:33.000Z
|
2021-03-03T13:13:33.000Z
|
BDS_2nd_downsampling.py
|
oxon-612/BDSR
|
c468061ed9e139be96d9da91c1b5419b122eeb4f
|
[
"MIT"
] | null | null | null |
BDS_2nd_downsampling.py
|
oxon-612/BDSR
|
c468061ed9e139be96d9da91c1b5419b122eeb4f
|
[
"MIT"
] | null | null | null |
#第二次降采样使用的代码
import numpy as np
import random
# import sys
import os
from sklearn import preprocessing
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
# import math
from scipy.stats import norm
# from sklearn.neighbors import KernelDensity
# import statistics
# from scipy.stats import ks_2samp
# from scipy.stats import ttest_1samp
# from scipy.stats import ttest_ind
# from scipy.stats import chisquare
# from scipy.spatial import ConvexHull
"""
1. Get down-sampled PCD by using dimension-reduction methods: Random, PCA, FA, KernelPCA, TruncatredSVD
2. For each dimension-reduction method, get 10%, 20%, ..., 90% of original data (down-sampling) and save the down-sampled data
"""
def BDS_Downsampling(_input_data, _output_dir, _digit=38, _which_dimension_reduction = ['PCA', 'FA', 'KernelPCA', 'TruncatedSVD']):
'''
A function to conduct the best-discripancy downsampling
:param _input_data: a multi-dimensional dataset with feacture vectors and a class label vector
:param _digit: how many digits after the decimal place of constant e, by default 38
:param _which_dimension_reduction: choose one or multiple dimensionality reduction technique(s) to produce a linear transformation T and to result in an one-dimensional vector E
['PCA', 'FA', 'KernelPCA', 'TruncatedSVD']
:return: mean EPEs over k iterations of the three classifiers
'''
def get_BDS(_r, _digit):
'''
A subfunction to gerenate a best-discrepancy number with Equation 3
:param _r: an integer
:param _digit: round the best-discrepancy number to a certain number of digits
:return: a best-discrepancy number
'''
_product = _r * 2.71828182845904523536028747135266249775
_product_decimal = round(_product - int(_product), _digit)
return float(str(_product_decimal))
def get_rank(_input_list):
'''
A subfunction to get a ranking vector of a sequence
:param _input_list: a one-dimensional list
:return: a ranking vector
'''
_array = np.array(_input_list)
_temp = _array.argsort()
_ranks = np.arange(len(_array))[_temp.argsort()]
return list(_ranks)
def dimension_redu(_data, _method):
'''
A subfunction to transform a multi-dimensional dataset from the high-diemsnional space to a one-dimensional space
:param _data: a multi-dimensional dataset
:param _method: one or multiple dimensionality-reduction techniques
:return: a one-dimensional vector
'''
min_max_scaler = preprocessing.MinMaxScaler()
# print(_data[:, :-2])
z_data = min_max_scaler.fit_transform(_data)
# print(z_data)
from sklearn import decomposition
# Choose one method
if _method == 'PCA':
dim_redu_method = decomposition.PCA(n_components=1)
elif _method == 'FA':
dim_redu_method = decomposition.FactorAnalysis(n_components=1, max_iter=5000)
elif _method == 'KernelPCA':
dim_redu_method = decomposition.KernelPCA(kernel='cosine', n_components=1)
elif _method == 'TruncatedSVD':
dim_redu_method = decomposition.TruncatedSVD(1)
dimension_redu_vector = dim_redu_method.fit_transform(z_data)
z_dimension_redu_vector = np.ndarray.tolist(min_max_scaler.fit_transform(dimension_redu_vector))
return z_dimension_redu_vector
def get_temporary_data(_data, _dim_vector):
'''
A subfunction to
1) attach the one-dimensional vector E to the original dataset D;
2) assendingly sort E as E_tilde and then sort D as D_tilde
:param _data: a multi-dimensional dataset D
:param _dim_vector: the one-dimensional vector E
:return: sorted dataset D_tilde
'''
_labels = _data[:, -1]
_features = _data[:, :-1]
#_features_minmax = np.ndarray.tolist(min_max_scaler.fit_transform(_features)) # normalize feature vectors
_features_minmax = np.ndarray.tolist(_features)
for i in range(len(_data)):
_features_minmax[i].append(_labels[i])
_features_minmax[i].append(_dim_vector[i][0])
# D is sorted along E_tilde and becomes D_tilde
_conjointed_data_sorted = sorted(_features_minmax, key=lambda a_entry: a_entry[-1]) # sort the dataset by the one-dimensional vector E_tilde
# E_tilde is removed from D_tilde
for cj in _conjointed_data_sorted: # delete the one-dimensional vector E_tilde
################################################################################################
# #
# this is the one-dimensional feature #
# #
################################################################################################
# print(cj[-1])
del cj[-1]
rearranged_data = np.array(_conjointed_data_sorted)
return rearranged_data
min_max_scaler = preprocessing.MinMaxScaler()
_duplicated_data = [i for i in _input_data] # Create a copy of the input data so that the original input data won't be affected by a k-fold CV function.
_data_size = len(_duplicated_data)
# Generate a BDS with n elements using Equation 3
_BD_seqence = []
for bd in range(_data_size):
_BD_seqence.append(get_BDS(bd + 1, _digit))
print("Generate a BDS with {} elements using Equation 3".format(len(_BD_seqence)))
# Generate the BDS's ranking vector R
_BDS_ranking = list(get_rank(_BD_seqence))
print("\n")
print("Generate the ranking vector of the BDS with {} elements".format(len(_BDS_ranking)))
# print(_BDS_ranking)
print("\n")
for dim_method in _which_dimension_reduction:
print("-" * 100)
print("Generate one-dimensional vector E based on D with a dimensionality-reduction technique {}".format(dim_method))
print("-" * 100)
_z_duplicated_data = min_max_scaler.fit_transform(_duplicated_data)
_z_dim_vector = dimension_redu(_z_duplicated_data, dim_method)
_temporary_data = get_temporary_data(_input_data, _z_dim_vector)
print('\t',"Ascendingly sort E as E_tilde")
print('\t',"Sort D as D_tilde using E_tilde")
# print(_temporary_data[:, -1])
_BDS_rearranged_data = []
for l in _BDS_ranking:
_BDS_rearranged_data.append(_temporary_data[l])
print('\t',"D_tilde is rearranged with R, the ranking vector of a BDS")
# _file_name='./Datasets/'+dim_method+"_Sleep"+".txt"
_file_name = _output_dir + dim_method + ".txt"
np.savetxt(_file_name, _BDS_rearranged_data)
"""
1. Read a data file
2. Dimension reduction
3. Get the lowest discrepancy
"""
def get_normalized_list(_list):
'''
normalize data to [0, 1]
'''
_norm_list = []
for _i in _list:
_j = (_i-min(_list))/(max(_list)-min(_list))
_norm_list.append(_j)
return _norm_list
def get_spec_norm_list(_list):
_zero_one_list = []
for _i in _list:
_j = (_i-min(_list))/(max(_list)-min(_list))
_zero_one_list.append(_j)
# [1, 100]
_range_min = 1
_range_max = 100
_norm_list = []
for _m in _zero_one_list:
_n = _m * (_range_max-_range_min) + 1
_norm_list.append(int(_n))
return _norm_list
if __name__ == '__main__':
folder = "FA"
CR1 = "0.2" # 这是BDS里面选中的一压 压缩率 BDSR_project.cpp 一次只能做一个一压压缩率
path_2 = "D:/MengjieXu/Science/BDSR2019-2020/test202009/BDSR/"
dataset = path_2 + folder + "/txt_file1/" + folder +"_down_" + CR1 + "_NC_bounding_upsampling_result2.txt"
raw_data = np.loadtxt(dataset)
BDS_Downsampling(_input_data=raw_data[:, 0:3], _output_dir=path_2 + folder + "/")
evaluation_table_dir = path_2 + folder + "/evaluation_data/"
up_to_how_many_to_keep = 0.4 #保留数据的50%
for how_many_to_keep in np.arange(start = 0.3, stop = up_to_how_many_to_keep, step = 0.1): #从1%开始,压缩率是1%,一直到10%,可以自己设置
################################
# #
# down-sampled PCD #
# #
################################
# for PCA, FA, KernelPCA,TruncatedSVD
down_method_list = ['FA', 'PCA', 'KernelPCA', 'TruncatedSVD'] #每一个都用上一个循环的压缩率过一遍
print("*" * 22)
print("* *")
print("* *")
print("* keep {} data *".format(how_many_to_keep))
print("* *")
print("* *")
print("*" * 22)
'''#不输出30倍的random文件的代码,因为名称相同,前面的均被覆盖
for down_method in down_method_list:
output_f_dir = "E:/XMJ/3Drebuilding/paper/test/test_2019_10/test32/down_sampled_data2/"
output_f_name = "{}_down_{}_PCD.txt".format(down_method, how_many_to_keep)
# random down-sampling
if down_method == 'Random':
rand_count = 0
for rand_seed in rand_seed_list:
rand_count += 1
random.seed(rand_seed)
down_data = random.sample(list(raw_data), int(how_many_to_keep*len(raw_data)))
np.savetxt(output_f_dir + output_f_name, down_data)
################################################################################################################
################################################################################################################
else:
bds_re_ordered_data = np.loadtxt("E:/XMJ/3Drebuilding/paper/test/test_2019_10/test20/" + down_method + ".txt")
down_data = bds_re_ordered_data[0:int(how_many_to_keep*len(bds_re_ordered_data))]
np.savetxt(output_f_dir + output_f_name, down_data)'''
#输出30倍random文件的代码,因为名称中加了rand_seed,使得名称各不相同,不会覆盖了
for down_method in down_method_list:
output_f_dir = path_2 + folder + "/down_sampled_data/"
output_f_name = "{}_down_{}_PCD.txt".format(down_method, how_many_to_keep)
bds_re_ordered_data = np.loadtxt(path_2 + folder + "/" + down_method + ".txt")
down_data = bds_re_ordered_data[0:int(how_many_to_keep*len(bds_re_ordered_data))]
np.savetxt(output_f_dir + output_f_name, down_data)
| 43.615385
| 182
| 0.597698
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,153
| 0.557387
|
b11770de6ba3e72b06e86a670a85a8fd098eb3aa
| 3,630
|
py
|
Python
|
model.py
|
e-yi/hin2vec_pytorch
|
7c3b6c4160476568985622117cf2263e7b78760e
|
[
"MIT"
] | 18
|
2019-10-17T03:12:07.000Z
|
2022-03-11T02:58:12.000Z
|
model.py
|
e-yi/hin2vec_pytorch
|
7c3b6c4160476568985622117cf2263e7b78760e
|
[
"MIT"
] | 5
|
2019-12-12T03:15:21.000Z
|
2021-04-02T07:54:38.000Z
|
model.py
|
e-yi/hin2vec_pytorch
|
7c3b6c4160476568985622117cf2263e7b78760e
|
[
"MIT"
] | 4
|
2019-12-26T07:36:38.000Z
|
2021-04-24T11:35:45.000Z
|
import torch
import numpy as np
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
def binary_reg(x: torch.Tensor):
# forward: f(x) = (x>=0)
# backward: f(x) = sigmoid
a = torch.sigmoid(x)
b = a.detach()
c = (x.detach() >= 0).float()
return a - b + c
class HIN2vec(nn.Module):
def __init__(self, node_size, path_size, embed_dim, sigmoid_reg=False, r=True):
super().__init__()
self.reg = torch.sigmoid if sigmoid_reg else binary_reg
self.__initialize_model(node_size, path_size, embed_dim, r)
def __initialize_model(self, node_size, path_size, embed_dim, r):
self.start_embeds = nn.Embedding(node_size, embed_dim)
self.end_embeds = self.start_embeds if r else nn.Embedding(node_size, embed_dim)
self.path_embeds = nn.Embedding(path_size, embed_dim)
# self.classifier = nn.Sequential(
# nn.Linear(embed_dim, 1),
# nn.Sigmoid(),
# )
def forward(self, start_node: torch.LongTensor, end_node: torch.LongTensor, path: torch.LongTensor):
# assert start_node.dim() == 1 # shape = (batch_size,)
s = self.start_embeds(start_node) # (batch_size, embed_size)
e = self.end_embeds(end_node)
p = self.path_embeds(path)
p = self.reg(p)
agg = torch.mul(s, e)
agg = torch.mul(agg, p)
# agg = F.sigmoid(agg)
# output = self.classifier(agg)
output = torch.sigmoid(torch.sum(agg, axis=1))
return output
def train(log_interval, model, device, train_loader: DataLoader, optimizer, loss_function, epoch):
model.train()
for idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data[:, 0], data[:, 1], data[:, 2])
loss = loss_function(output.view(-1), target)
loss.backward()
optimizer.step()
if idx % log_interval == 0:
print(f'\rTrain Epoch: {epoch} '
f'[{idx * len(data)}/{len(train_loader.dataset)} ({100. * idx / len(train_loader):.3f}%)]\t'
f'Loss: {loss.item():.3f}\t\t',
# f'data = {data}\t target = {target}',
end='')
print()
class NSTrainSet(Dataset):
"""
完全随机的负采样 todo 改进一下?
"""
def __init__(self, sample, node_size, neg=5):
"""
:param node_size: 节点数目
:param neg: 负采样数目
:param sample: HIN.sample()返回值,(start_node, end_node, path_id)
"""
print('init training dataset...')
l = len(sample)
x = np.tile(sample, (neg + 1, 1))
y = np.zeros(l * (1 + neg))
y[:l] = 1
# x[l:, 2] = np.random.randint(0, path_size - 1, (l * neg,))
x[l:, 1] = np.random.randint(0, node_size - 1, (l * neg,))
self.x = torch.LongTensor(x)
self.y = torch.FloatTensor(y)
self.length = len(x)
print('finished')
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.length
if __name__ == '__main__':
## test binary_reg
print('sigmoid')
a = torch.tensor([-1.,0.,1.],requires_grad=True)
b = torch.sigmoid(a)
c = b.sum()
print(a)
print(b)
print(c)
c.backward()
print(c.grad)
print(b.grad)
print(a.grad)
print('binary')
a = torch.tensor([-1., 0., 1.], requires_grad=True)
b = binary_reg(a)
c = b.sum()
print(a)
print(b)
print(c)
c.backward()
print(c.grad)
print(b.grad)
print(a.grad)
| 27.709924
| 110
| 0.571901
| 2,115
| 0.574416
| 0
| 0
| 0
| 0
| 0
| 0
| 830
| 0.225421
|
b117f2719a56a1e59d4109b5312d5d87fdc50a2d
| 2,689
|
py
|
Python
|
pygrep/classes/boyerMoore.py
|
sstadick/pygrep
|
13c53ac427adda9974ee9e62c22391bf0682008c
|
[
"Apache-2.0"
] | null | null | null |
pygrep/classes/boyerMoore.py
|
sstadick/pygrep
|
13c53ac427adda9974ee9e62c22391bf0682008c
|
[
"Apache-2.0"
] | null | null | null |
pygrep/classes/boyerMoore.py
|
sstadick/pygrep
|
13c53ac427adda9974ee9e62c22391bf0682008c
|
[
"Apache-2.0"
] | null | null | null |
import string
from helpers import *
class BoyerMoore(object):
""" Encapsulates pattern and associated Boyer-Moore preprocessing. """
def __init__(self, pattern, alphabet=string.ascii_letters + string.whitespace + string.punctuation):
self.pattern = pattern
self.patternLen = len(pattern)
self.alphabet = alphabet
# Create map from alphabet characters to integers
self.alphabet_map = {char: i for i, char in enumerate(self.alphabet)}
# Make bad character rule table
self.bad_char = dense_bad_char_tab(pattern, self.alphabet_map)
# Create good suffix rule table
_, self.big_l, self.small_l_prime = good_suffix_table(pattern)
def badCharacterRule(self, i, c):
""" Return # skips given by bad character rule at offset i """
assert c in self.alphabet_map
ci = self.alphabet_map[c]
assert i > (self.bad_char[i][ci]-1)
return i - (self.bad_char[i][ci]-1)
def goodSuffixRule(self, i):
""" Given a mismatch at offset i, return amount to shift
as determined by (weak) good suffix rule. """
length = len(self.big_l)
assert i < length
if i == length - 1:
return 0
i += 1 # i points to leftmost matching position of P
if self.big_l[i] > 0:
return length - self.big_l[i]
return length - self.small_l_prime[i]
def gaililRule(self):
""" Return amount to shift in case where P matches T """
return len(self.small_l_prime) - self.small_l_prime[1]
def search(self, text):
if len(self.pattern) == 0 or len(text) == 0 or len(text) < len(self.pattern):
return []
matches = []
i = 0 # place tracking variable
while i < len(text) - len(self.pattern) + 1:
shift = 1
misMatched = False
for j in range(self.patternLen-1, -1, -1):
if not self.pattern[j] == text[i + j]:
skipBC = self.badCharacterRule(j, text[i + j])
skipGS = self.goodSuffixRule(j)
shift = max(shift, skipBC, skipGS)
misMatched = True
break
if not misMatched:
matches.append(i)
skipMatched = self.gaililRule()
shift = max(shift, skipMatched)
i += shift
return matches
if __name__ == '__main__':
pattern = 'thou'
text = 'cow th ou cat art hat thou mow the lawn'
bm = BoyerMoore(pattern)
# print([char for char in text])
# print([(i, char) for i, char in enumerate(text)])
print(bm.search(text))
| 36.337838
| 104
| 0.579026
| 2,400
| 0.892525
| 0
| 0
| 0
| 0
| 0
| 0
| 623
| 0.231685
|
b118f2f3e6c0e9617cb2cf673e9a7f3e68d6f9ce
| 53
|
py
|
Python
|
basicts/archs/DGCRN_arch/__init__.py
|
zezhishao/GuanCang_BasicTS
|
bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c
|
[
"Apache-2.0"
] | 3
|
2022-02-22T12:50:08.000Z
|
2022-03-13T03:38:46.000Z
|
basicts/archs/DGCRN_arch/__init__.py
|
zezhishao/GuanCang_BasicTS
|
bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c
|
[
"Apache-2.0"
] | null | null | null |
basicts/archs/DGCRN_arch/__init__.py
|
zezhishao/GuanCang_BasicTS
|
bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c
|
[
"Apache-2.0"
] | null | null | null |
from basicts.archs.DGCRN_arch.DGCRN_arch import DGCRN
| 53
| 53
| 0.886792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b119bf6083a2cc2bfb9320284b71a47bcee04389
| 159
|
py
|
Python
|
kido/settings/production.example.py
|
alanhamlett/kid-o
|
18f88f7dc78c678e017fdc7e0dfb2711bcf2bf74
|
[
"BSD-3-Clause"
] | 34
|
2015-08-22T06:57:26.000Z
|
2021-11-08T10:47:23.000Z
|
kido/settings/production.example.py
|
alanhamlett/kid-o
|
18f88f7dc78c678e017fdc7e0dfb2711bcf2bf74
|
[
"BSD-3-Clause"
] | 15
|
2015-08-21T20:25:49.000Z
|
2022-03-11T23:25:44.000Z
|
kido/settings/production.example.py
|
dominino/kid-o
|
18f88f7dc78c678e017fdc7e0dfb2711bcf2bf74
|
[
"BSD-3-Clause"
] | 5
|
2016-08-22T08:23:45.000Z
|
2019-05-07T01:38:38.000Z
|
SECRET_KEY = None
DB_HOST = "localhost"
DB_NAME = "kido"
DB_USERNAME = "kido"
DB_PASSWORD = "kido"
COMPRESSOR_DEBUG = False
COMPRESSOR_OFFLINE_COMPRESS = True
| 19.875
| 34
| 0.773585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 29
| 0.18239
|
b11a302f53a38192c5dd68e4767ae96d3e146ef3
| 301
|
py
|
Python
|
run.py
|
Prakash2403/ultron
|
7d1067eb98ef52f6a88299534ea204e7ae45d7a7
|
[
"MIT"
] | 13
|
2017-08-15T15:50:13.000Z
|
2019-06-03T10:24:50.000Z
|
run.py
|
Prakash2403/ultron
|
7d1067eb98ef52f6a88299534ea204e7ae45d7a7
|
[
"MIT"
] | 3
|
2017-08-29T16:35:04.000Z
|
2021-06-01T23:49:16.000Z
|
run.py
|
Prakash2403/ultron
|
7d1067eb98ef52f6a88299534ea204e7ae45d7a7
|
[
"MIT"
] | 4
|
2017-08-16T09:33:59.000Z
|
2019-06-05T07:25:30.000Z
|
#! /usr/bin/python3
from default_settings import default_settings
from ultron_cli import UltronCLI
if __name__ == '__main__':
default_settings()
try:
UltronCLI().cmdloop()
except KeyboardInterrupt:
print("\nInterrupted by user.")
print("Goodbye")
exit(0)
| 23.153846
| 45
| 0.664452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 63
| 0.209302
|
b11a595d5c6b314526d2c13c66fd8ddfdd9ef9ec
| 2,689
|
py
|
Python
|
losses/dice_loss.py
|
CharlesAuthier/geo-deep-learning
|
e97ea1d362327cdcb2849cd2f810f1e914078243
|
[
"MIT"
] | 121
|
2018-10-01T15:27:08.000Z
|
2022-02-16T14:04:34.000Z
|
losses/dice_loss.py
|
CharlesAuthier/geo-deep-learning
|
e97ea1d362327cdcb2849cd2f810f1e914078243
|
[
"MIT"
] | 196
|
2018-09-26T19:32:29.000Z
|
2022-03-30T15:17:53.000Z
|
losses/dice_loss.py
|
CharlesAuthier/geo-deep-learning
|
e97ea1d362327cdcb2849cd2f810f1e914078243
|
[
"MIT"
] | 36
|
2018-09-25T12:55:55.000Z
|
2022-03-03T20:31:33.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def soft_dice_score(
output: torch.Tensor, target: torch.Tensor, smooth: float = 0.0, eps: float = 1e-7, dims=None) -> torch.Tensor:
assert output.size() == target.size()
if dims is not None:
intersection = torch.sum(output * target, dim=dims)
cardinality = torch.sum(output + target, dim=dims)
# print('cardinality', cardinality, 'intersection', intersection)
else:
intersection = torch.sum(output * target)
cardinality = torch.sum(output + target)
dice_score = (2.0 * intersection + smooth) / (cardinality + smooth).clamp_min(eps)
# print('dice_score', dice_score)
return dice_score
class DiceLoss(nn.Module):
def __init__(self, smooth=1.0, eps=1e-7, ignore_index=None, weight=None, mode='MULTICLASS_MODE'):
"""Implementation of Dice loss for image segmentation task.
https://github.com/qubvel/segmentation_models.pytorch
"""
super().__init__()
self.smooth = smooth
self.eps = eps
self.ignore_index = ignore_index
self.weight = weight
self.mode = mode
def forward(self, output, target):
bs = target.size(0)
num_classes = output.size(1)
dims = (0, 2)
# print(self.mode, self.ignore_index)
if self.mode == 'MULTICLASS_MODE':
output = output.log_softmax(dim=1).exp()
else:
output = F.logsigmoid(output).exp()
# output = output.log_softmax(dim=1).exp()
if self.mode == 'BINARY_MODE':
target = target.view(bs, 1, -1)
output = output.view(bs, 1, -1)
if self.ignore_index is not None:
mask = target != self.ignore_index
output = output * mask
target = target * mask
else:
target = target.view(bs, -1)
output = output.view(bs, num_classes, -1)
if self.ignore_index is not None:
mask = target != self.ignore_index
output = output * mask.unsqueeze(1)
target = F.one_hot((target * mask).to(torch.long), num_classes) # N,H*W -> N,H*W, C
target = target.permute(0, 2, 1) * mask.unsqueeze(1)
else:
target = F.one_hot(target, num_classes) # N,H*W -> N,H*W, C
target = target.permute(0, 2, 1) # H, C, H*W
scores = soft_dice_score(output, target.type_as(output), smooth=self.smooth, eps=self.eps, dims=dims)
loss = 1.0 - scores
mask = target.sum(dims) > 0
loss *= mask.to(loss.dtype)
return loss.mean()
| 35.381579
| 119
| 0.579398
| 1,960
| 0.728896
| 0
| 0
| 0
| 0
| 0
| 0
| 406
| 0.150985
|
b11a616d1b56aaeabf4b500c344345675c245118
| 2,766
|
py
|
Python
|
src/pytezos/jupyter.py
|
konchunas/pytezos
|
65576d18bdf1956fae8ea21241b6c43a38921b83
|
[
"MIT"
] | 98
|
2019-02-07T16:33:38.000Z
|
2022-03-31T15:53:41.000Z
|
src/pytezos/jupyter.py
|
konchunas/pytezos
|
65576d18bdf1956fae8ea21241b6c43a38921b83
|
[
"MIT"
] | 152
|
2019-05-20T16:38:56.000Z
|
2022-03-30T14:24:38.000Z
|
src/pytezos/jupyter.py
|
konchunas/pytezos
|
65576d18bdf1956fae8ea21241b6c43a38921b83
|
[
"MIT"
] | 34
|
2019-07-25T12:03:51.000Z
|
2021-11-11T22:23:38.000Z
|
import inspect
import re
from functools import update_wrapper
from typing import Optional
def is_interactive() -> bool:
try:
_ = get_ipython().__class__.__name__ # type: ignore
return True
except NameError:
return False
def get_attr_docstring(class_type, attr_name) -> Optional[str]:
if attr_name == 'get':
attr_name = '__call__'
attr = getattr(class_type, attr_name, None)
if attr and attr.__doc__:
return re.sub(r' {3,}', '', attr.__doc__)
return None
def default_attr_filter(x) -> bool: # pylint: disable=unused-argument
return True
def get_class_docstring(class_type, attr_filter=default_attr_filter, extended=False):
def format_attribute(x):
attr = getattr(class_type, x)
if isinstance(attr, property):
name = f'.{x}'
else:
if extended:
sig = str(inspect.signature(attr)).replace('self, ', '')
else:
sig = '()'
name = f'.{x}{sig}'
if extended:
doc = get_attr_docstring(class_type, x)
else:
doc = ''
return f'{name}{doc}'
def filter_attribute(x):
return all(
[
not x.startswith('_'),
attr_filter(x),
not isinstance(getattr(class_type, x), property),
]
)
return '\n'.join(
map(
format_attribute,
filter(
filter_attribute,
dir(class_type),
),
),
)
def inline_doc(method):
if not is_interactive():
return method
doc = [repr(method)]
if method.__doc__:
doc.append(re.sub(r' {3,}', '', method.__doc__))
class CustomReprDescriptor:
def __get__(self, instance, owner):
class MethodWrapper:
def __init__(self):
self.class_instance = instance
self.doc = '\n'.join(doc)
def __call__(self, *args, **kwargs):
return method(self.class_instance, *args, **kwargs)
def __repr__(self):
return self.doc
return update_wrapper(MethodWrapper(), method)
return CustomReprDescriptor()
class InlineDocstring(type):
def __new__(mcs, name, bases, attrs, **kwargs):
if is_interactive():
new_attrs = {}
for attr_name, attr in attrs.items():
if callable(attr) and attr.__doc__ and not attr_name.startswith('_'):
attr = inline_doc(attr)
new_attrs[attr_name] = attr
else:
new_attrs = attrs
return type.__new__(mcs, name, bases, new_attrs, **kwargs)
| 26.596154
| 85
| 0.544107
| 967
| 0.349602
| 0
| 0
| 0
| 0
| 0
| 0
| 145
| 0.052422
|
b11be2ae97985e6cfb1d4fb8b0941137d4427bee
| 2,492
|
py
|
Python
|
torch_template/training.py
|
dongqifong/inspiration
|
f3168217729063f79f18972a4abe9db821ad5b91
|
[
"MIT"
] | null | null | null |
torch_template/training.py
|
dongqifong/inspiration
|
f3168217729063f79f18972a4abe9db821ad5b91
|
[
"MIT"
] | null | null | null |
torch_template/training.py
|
dongqifong/inspiration
|
f3168217729063f79f18972a4abe9db821ad5b91
|
[
"MIT"
] | null | null | null |
import torch
def train_one_epoch(model, train_loader, loss_func, optimizer):
model.train()
running_loss = 0.0
for batch_idx, (x, y) in enumerate(train_loader):
out = model(x)
optimizer.zero_grad()
loss = loss_func(out, y)
loss.backward()
optimizer.step()
running_loss = running_loss + loss.item()
return round(running_loss / (batch_idx+1), 5)
def valid_one_epoch(model, valid_loader, loss_func):
model.eval()
running_loss = 0.0
with torch.no_grad():
for batch_idx, (x, y) in enumerate(valid_loader):
out = model(x)
loss = loss_func(out, y)
running_loss = running_loss + loss.item()
return round(running_loss / (batch_idx+1), 5)
class Trainer:
def __init__(self, model, train_loader, optimizer, loss_func, valid_loader: None) -> None:
self.show_period = 1
self.model = model
self.train_loader = train_loader
self.valid_loader = valid_loader
self.optimizer = optimizer
self.loss_func = loss_func
self.train_loss = []
self.valid_loss = []
pass
def train(self, epochs=1):
for epoch in range(epochs):
if self.valid_loader is not None:
self.valid_loss.append(valid_one_epoch(
self.model, self.valid_loader, self.loss_func))
self.train_loss.append(train_one_epoch(
self.model, self.valid_loader, self.loss_func, self.optimizer))
if epoch % self.show_period == 0 or epoch == 0:
if self.valid_loader:
show_progress(
epoch, epochs, self.train_loss[-1], self.valid_loss[-1])
else:
show_progress(
epoch, epochs, self.train_loss[-1])
return None
def save_model(self, model_name):
save_model(self.model, model_name)
def show_progress(epoch=None, epochs=None, train_loss=None, valid_loss=None):
if valid_loss is not None:
print(
f"[{epoch}/{epochs}], training_loss:[{train_loss}], valid_loss:[{valid_loss}]", end="\r")
else:
print(
f"[{epoch}/{epochs}], training_loss:[{train_loss}]", end="\r")
def save_model(model, model_name: str, mode="full"):
if mode == "all":
torch.save(model, model_name+"_full_model.pth")
else:
torch.save(model.state_dict(), model_name+"_state_dict.pth")
| 32.789474
| 101
| 0.597111
| 1,193
| 0.478732
| 0
| 0
| 0
| 0
| 0
| 0
| 182
| 0.073034
|
b11c83cde4ab47f5fe3448e7a1b6b3e0baac54ab
| 3,331
|
py
|
Python
|
pytext/__init__.py
|
NunoEdgarGFlowHub/pytext
|
2358b2d7c8c4e6800c73f4bd1c9731723e503ed6
|
[
"BSD-3-Clause"
] | 1
|
2019-02-25T01:50:03.000Z
|
2019-02-25T01:50:03.000Z
|
pytext/__init__.py
|
NunoEdgarGFlowHub/pytext
|
2358b2d7c8c4e6800c73f4bd1c9731723e503ed6
|
[
"BSD-3-Clause"
] | null | null | null |
pytext/__init__.py
|
NunoEdgarGFlowHub/pytext
|
2358b2d7c8c4e6800c73f4bd1c9731723e503ed6
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import uuid
from typing import Callable, Mapping, Optional
import numpy as np
from caffe2.python import workspace
from caffe2.python.predictor import predictor_exporter
from .builtin_task import register_builtin_tasks
from .config import PyTextConfig, pytext_config_from_json
from .config.component import create_featurizer
from .data.featurizer import InputRecord
from .utils.onnx_utils import CAFFE2_DB_TYPE, convert_caffe2_blob_name
register_builtin_tasks()
Predictor = Callable[[Mapping[str, str]], Mapping[str, np.array]]
def _predict(workspace_id, feature_config, predict_net, featurizer, input):
workspace.SwitchWorkspace(workspace_id)
features = featurizer.featurize(InputRecord(**input))
if feature_config.word_feat:
for blob_name in feature_config.word_feat.export_input_names:
converted_blob_name = convert_caffe2_blob_name(blob_name)
workspace.blobs[converted_blob_name] = np.array(
[features.tokens], dtype=str
)
workspace.blobs["tokens_lens"] = np.array([len(features.tokens)], dtype=np.int_)
if feature_config.dict_feat:
dict_feats, weights, lens = feature_config.dict_feat.export_input_names
converted_dict_blob_name = convert_caffe2_blob_name(dict_feats)
workspace.blobs[converted_dict_blob_name] = np.array(
[features.gazetteer_feats], dtype=str
)
workspace.blobs[weights] = np.array(
[features.gazetteer_feat_weights], dtype=np.float32
)
workspace.blobs[lens] = np.array(features.gazetteer_feat_lengths, dtype=np.int_)
if feature_config.char_feat:
for blob_name in feature_config.char_feat.export_input_names:
converted_blob_name = convert_caffe2_blob_name(blob_name)
workspace.blobs[converted_blob_name] = np.array(
[features.characters], dtype=str
)
workspace.RunNet(predict_net)
return {
str(blob): workspace.blobs[blob][0] for blob in predict_net.external_outputs
}
def load_config(filename: str) -> PyTextConfig:
"""
Load a PyText configuration file from a file path.
See pytext.config.pytext_config for more info on configs.
"""
with open(filename) as file:
config_json = json.loads(file.read())
if "config" not in config_json:
return pytext_config_from_json(config_json)
return pytext_config_from_json(config_json["config"])
def create_predictor(
config: PyTextConfig, model_file: Optional[str] = None
) -> Predictor:
"""
Create a simple prediction API from a training config and an exported caffe2
model file. This model file should be created by calling export on a trained
model snapshot.
"""
workspace_id = str(uuid.uuid4())
workspace.SwitchWorkspace(workspace_id, True)
predict_net = predictor_exporter.prepare_prediction_net(
filename=model_file or config.export_caffe2_path, db_type=CAFFE2_DB_TYPE
)
task = config.task
feature_config = task.features
featurizer = create_featurizer(task.featurizer, feature_config)
return lambda input: _predict(
workspace_id, feature_config, predict_net, featurizer, input
)
| 36.604396
| 88
| 0.727409
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 442
| 0.132693
|
b11d7725740230346fbe8555198c64720b464851
| 1,374
|
py
|
Python
|
modules/cudaobjdetect/misc/python/test/test_cudaobjdetect.py
|
ptelang/opencv_contrib
|
dd68e396c76f1db4d82e5aa7a6545580939f9b9d
|
[
"Apache-2.0"
] | 7,158
|
2016-07-04T22:19:27.000Z
|
2022-03-31T07:54:32.000Z
|
modules/cudaobjdetect/misc/python/test/test_cudaobjdetect.py
|
ptelang/opencv_contrib
|
dd68e396c76f1db4d82e5aa7a6545580939f9b9d
|
[
"Apache-2.0"
] | 2,184
|
2016-07-05T12:04:14.000Z
|
2022-03-30T19:10:12.000Z
|
modules/cudaobjdetect/misc/python/test/test_cudaobjdetect.py
|
ptelang/opencv_contrib
|
dd68e396c76f1db4d82e5aa7a6545580939f9b9d
|
[
"Apache-2.0"
] | 5,535
|
2016-07-06T12:01:10.000Z
|
2022-03-31T03:13:24.000Z
|
#!/usr/bin/env python
import os
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests, unittest
class cudaobjdetect_test(NewOpenCVTests):
def setUp(self):
super(cudaobjdetect_test, self).setUp()
if not cv.cuda.getCudaEnabledDeviceCount():
self.skipTest("No CUDA-capable device is detected")
@unittest.skipIf('OPENCV_TEST_DATA_PATH' not in os.environ,
"OPENCV_TEST_DATA_PATH is not defined")
def test_hog(self):
img_path = os.environ['OPENCV_TEST_DATA_PATH'] + '/gpu/caltech/image_00000009_0.png'
npMat = cv.cvtColor(cv.imread(img_path),cv.COLOR_BGR2BGRA)
cuMat = cv.cuda_GpuMat(npMat)
cuHog = cv.cuda.HOG_create()
cuHog.setSVMDetector(cuHog.getDefaultPeopleDetector())
loc, conf = cuHog.detect(cuMat)
self.assertTrue(len(loc) == len(conf) and len(loc) > 0 and len(loc[0]) == 2)
loc = cuHog.detectWithoutConf(cuMat)
self.assertTrue(len(loc) > 0 and len(loc[0]) == 2)
loc = cuHog.detectMultiScaleWithoutConf(cuMat)
self.assertTrue(len(loc) > 0 and len(loc[0]) == 4)
cuHog.setGroupThreshold(0)
loc, conf = cuHog.detectMultiScale(cuMat)
self.assertTrue(len(loc) == len(conf) and len(loc) > 0 and len(loc[0]) == 4)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
| 36.157895
| 92
| 0.659389
| 1,195
| 0.869723
| 0
| 0
| 963
| 0.700873
| 0
| 0
| 186
| 0.135371
|
b11ddd81227b3782058ba9f99a70d0ae0079cb41
| 32,677
|
py
|
Python
|
gizmo/mapper.py
|
emehrkay/gizmo
|
01db2f51118f7d746061ace0b491237481949bad
|
[
"MIT"
] | 19
|
2015-10-06T12:55:09.000Z
|
2021-01-09T09:53:38.000Z
|
gizmo/mapper.py
|
emehrkay/Gizmo
|
01db2f51118f7d746061ace0b491237481949bad
|
[
"MIT"
] | 2
|
2016-01-21T02:55:55.000Z
|
2020-08-16T23:05:07.000Z
|
gizmo/mapper.py
|
emehrkay/gizmo
|
01db2f51118f7d746061ace0b491237481949bad
|
[
"MIT"
] | 3
|
2016-01-21T02:18:41.000Z
|
2018-04-25T06:06:25.000Z
|
import logging
import inspect
import re
from collections import OrderedDict
from gremlinpy.gremlin import Gremlin, Param, AS
from .entity import (_Entity, Vertex, Edge, GenericVertex, GenericEdge,
ENTITY_MAP)
from .exception import (AstronomerQueryException, AstronomerMapperException)
from .traversal import Traversal
from .util import (camel_to_underscore, GIZMO_ID, GIZMO_LABEL, GIZMO_TYPE,
GIZMO_ENTITY, GIZMO_VARIABLE, entity_name)
logger = logging.getLogger(__name__)
ENTITY_MAPPER_MAP = {}
GENERIC_MAPPER = 'generic.mapper'
_count = -1
_query_count = 0
_query_params = {}
def next_query_variable():
global _count
_count += 1
return '{}_{}'.format(GIZMO_VARIABLE, _count)
def get_entity_mapper(entity=None, name=GENERIC_MAPPER):
if isinstance(entity, _Entity):
name = get_qualified_instance_name(entity)
else:
name = get_qualified_name(entity)
if name not in ENTITY_MAPPER_MAP:
name = GENERIC_MAPPER
return ENTITY_MAPPER_MAP[name](self)
def next_param_name(param):
param = re.sub('\W', '_', param)
if param not in _query_params:
_query_params[param] = -1
_query_params[param] += 1
return '{}_{}'.format(param, _query_params[param])
def next_param(param, value):
if isinstance(value, _Entity):
value = entity_name(value)
return Param(next_param_name(param), value)
def next_entity_param(entity, param, value):
name = entity_name(entity)
field = '{}_{}'.format(name, param)
return next_param(field, value)
class Mapper:
def __init__(self, request, gremlin=None, auto_commit=True,
graph_instance_name=None):
if not gremlin:
gremlin = Gremlin()
self.request = request
self.gremlin = gremlin
self.auto_commit = auto_commit
self.graph_instance_name = graph_instance_name
if not self.auto_commit and not self.graph_instance_name:
error = ('If auto_commit is set, we need to know the'
' graph instance name')
logger.exception(error)
raise ArgumentError(error)
self.reset()
def reset(self):
self.gremlin.reset()
global _query_count
global _count
global _query_params
_query_count = 0
_count = 0
_query_params = {}
self.queries = []
self.return_vars = []
self.entities = OrderedDict() # ensure FIFO for testing
self.del_entities = {}
self.params = {}
self.callbacks = {}
self._magic_method = None
def get_entity_variable(self, entity):
ret = None
for key, def_entity in self.entities.items():
if entity == def_entity:
return key
return ret
def get_mapper(self, entity=None, name=GENERIC_MAPPER):
if entity is not None:
name = entity_name(entity)
if name not in ENTITY_MAPPER_MAP:
name = GENERIC_MAPPER
return ENTITY_MAPPER_MAP[name](self)
def enqueue_mapper(self, mapper):
self.queries += mapper.queries
self.return_vars += mapper.return_vars
self.entities.update(mapper.entities)
self.params.update(mapper.params)
for entity, callbacks in mapper.callbacks.items():
exisiting = self.callbacks.get(entity, [])
self.callbacks[entity] = exisiting + callbacks
mapper.reset()
return self
def enqueue_script(self, gremlin=None, script=None, params=None):
if gremlin is not None:
script = [str(gremlin),]
params = gremlin.bound_params
gremlin.reset()
if script:
self.queries += script
if params:
self.params.update(params)
return self
def __getattr__(self, magic_method):
"""magic method that works in conjunction with __call__
method these two methods are used to shortcut the retrieval
of an entity's mapper and call a specific method against
this chain:
user = User()
user_mapper = mapper.get_mapper(user)
emails = user_mapper.get_emails(user)
can be shortened into:
user = User()
emails = mapper.get_emails(user)
"""
self._magic_method = magic_method
return self
def __call__(self, *args, **kwargs):
mapper = self.get_mapper(args[0])
return getattr(mapper, self._magic_method)(*args, **kwargs)
async def data(self, entity, *args):
"""utility method used to retrieve an entity's data. It
also allows for method chaining in order to augment the
resulting data.
class MyMapper(_GenericMapper):
async def add_two(self, entity, data):
data['two'] = 2
return data
async def add_three(self, entity, data):
data['three'] = 3
return data
entity = User()
data = await mapper.data(user, 'add_two', 'add_three')
the resulting data will have the data from the User class,
plus a two and a three member
"""
collection = isinstance(entity, Collection)
async def get_data(entity, data):
retrieved = data
for method in args:
mapper = self.get_mapper(entity)
async def wrapper(entity, data):
res = await getattr(mapper, method)(entity=entity,
data=data)
return res
retrieved = await wrapper(entity=entity,
data=retrieved)
return retrieved
if collection:
data = []
for coll_entity in entity:
mapper = self.get_mapper(coll_entity)
entity_data = await mapper.data(coll_entity)
res = await get_data(coll_entity, entity_data)
data.append(res)
else:
mapper = self.get_mapper(entity)
entity_data = await mapper.data(entity)
data = await get_data(entity, entity_data)
return data
def save(self, entity, bind_return=True, mapper=None,
callback=None, **kwargs):
if mapper is None:
mapper = self.get_mapper(entity)
logger.debug(('Saving entity: {} with mapper:'
' {}').format(entity.__repr__, mapper))
mapper.save(entity, bind_return, callback, **kwargs)
return self.enqueue_mapper(mapper)
def delete(self, entity, mapper=None, callback=None):
if mapper is None:
mapper = self.get_mapper(entity)
logger.debug(('Deleting entity: {} with mapper:'
' {}').format(entity.__repr__, mapper))
mapper.delete(entity, callback=callback)
# manually add the deleted entity to the self.entities
# collection for callbacks
from random import randrange
key = 'DELETED_%s_entity' % str(randrange(0, 999999999))
self.del_entities[key] = entity
return self.enqueue_mapper(mapper)
def create(self, data=None, entity=None, data_type='python'):
if data is None:
data = {}
if entity:
mapper = self.get_mapper(entity)
else:
name = data.get(GIZMO_ENTITY, GENERIC_MAPPER)
if isinstance(name, (list, tuple)):
name = name[0]['value']
mapper = self.get_mapper(name=name)
kwargs = {
'data': data,
'entity': entity,
'data_type': data_type,
}
return mapper.create(**kwargs)
def connect(self, out_v, in_v, label=None, data=None, edge_entity=None,
data_type='python'):
"""
method used to connect two vertices and create an Edge object
the resulting edge is not saved to to graph until it is passed to
save allowing further augmentation
"""
if not isinstance(out_v, Vertex):
if not isinstance(out_v, (str, int)):
err = 'The out_v needs to be either a Vertex or an id'
logger.exception(err)
raise AstronomerMapperException(err)
if not isinstance(in_v, Vertex):
if not isinstance(in_v, (str, int)):
err = 'The in_v needs to be either a Vertex or an id'
logger.exception(err)
raise AstronomerMapperException(err)
if data is None:
data = {}
data['outV'] = out_v
data['inV'] = in_v
data[GIZMO_TYPE] = 'edge'
data[GIZMO_LABEL[0]] = label
return self.create(data=data, entity=edge_entity, data_type=data_type)
def start(self, entity):
mapper = self.get_mapper(entity)
return mapper.start(entity)
def _build_queries(self):
if len(self.return_vars) > 0:
returns = []
for k in self.return_vars:
returns.append("'{}': {}".format(k, k))
ret = '[{}]'.format(', '.join(returns))
self.queries.append(ret)
return self
def get(self, entity):
mapper = self.get_mapper(entity)
return mapper.get(entity)
def apply_statement(self, statement):
self.gremlin.apply_statement(statement)
return self
async def send(self):
self._build_queries()
script = ";\n".join(self.queries)
params = self.params
entities = self.entities
callbacks = self.callbacks
entities.update(self.del_entities)
self.reset()
res = await self.query(script=script, params=params,
update_entities=entities, callbacks=callbacks)
return res
async def query(self, script=None, params=None, gremlin=None,
update_entities=None, callbacks=None, collection=None):
if gremlin is not None:
script = str(gremlin)
params = gremlin.bound_params
gremlin.reset()
if script is None:
script = ''
if params is None:
params = {}
if update_entities is None:
update_entities = {}
self.reset()
response = await self.request.send(script, params, update_entities)
for k, entity in update_entities.items():
cbs = callbacks.get(entity, [])
for c in cbs:
c(entity)
if not collection:
collection = Collection
return collection(self, response)
class _RootMapper(type):
"""
In the case of custom mappers, this metaclass will register the entity name
with the mapper object. This is done so that when entities are loaded by
name the associated mapper is used to CRUD it.
This only works when the Mapper.create method is used to
create the entity
"""
def __new__(cls, name, bases, attrs):
cls = super(_RootMapper, cls).__new__(cls, name, bases, attrs)
entity = attrs.pop('entity', None)
if entity:
map_name = entity_name(entity)
ENTITY_MAPPER_MAP[map_name] = cls
elif name == 'EntityMapper':
ENTITY_MAPPER_MAP[GENERIC_MAPPER] = cls
return cls
def __call__(cls, *args, **kwargs):
mapper = super(_RootMapper, cls).__call__(*args, **kwargs)
for field in dir(mapper):
if field.startswith('_'):
continue
val = getattr(mapper, field)
if inspect.isclass(val) and issubclass(val, EntityMapper):
if mapper.mapper:
instance = val(mapper.mapper)
setattr(mapper, field, instance)
return mapper
class EntityMapper(metaclass=_RootMapper):
VARIABLE = GIZMO_VARIABLE
unique = False
unique_fields = None
save_statements = None
def __init__(self, mapper=None):
self.mapper = mapper
self.gremlin = None
if self.mapper:
self.gremlin = mapper.gremlin
self.reset()
def reset(self):
self.queries = []
self.return_vars = []
self.entities = {}
self.params = {}
self.callbacks = {}
async def data(self, entity):
return entity.data
def get(self, entity):
trav = self.start(entity)
vertex = issubclass(self.entity, Vertex)
param_value = str(self.entity)
param_name = 'out_{}_{}'.format(entity.__class__.__name__, param_value)
entity_param = next_param(param_name, param_value)
if vertex:
trav.out().hasLabel(entity_param)
else:
trav.outE(entity_param)
return trav
def enqueue(self, query, bind_return=True):
for entry in query.queries:
script = entry['script']
if script in self.queries:
continue
if bind_return:
variable = next_query_variable()
script = '{} = {}'.format(variable, script)
if 'entity' in entry:
self.entities[variable] = entry['entity']
self.return_vars.append(variable)
self.queries.append(script)
self.params.update(entry['params'])
return self
def _enqueue_callback(self, entity, callback):
if callback:
listed = self.callbacks.get(entity, [])
if isinstance(callback, (list, tuple)):
listed += list(callback)
elif callback:
listed.append(callback)
self.callbacks[entity] = listed
return self
def on_create(self, entity):
pass
def on_update(self, entity):
pass
def on_delete(self, entity):
pass
def _build_save_statements(self, entity, query, **kwargs):
statement_query = Query(self.mapper)
query_gremlin = Gremlin(self.gremlin.gv)
for entry in query.queries:
query_gremlin.bind_params(entry['params'])
for statement in self.save_statements:
instance = statement(entity, self, query, **kwargs)
query_gremlin.apply_statement(instance)
statement_query._add_query(str(query_gremlin),
query_gremlin.bound_params, entity=entity)
return statement_query
def start(self, entity=None):
return Traversal(self.mapper, entity or self.entity)
def save(self, entity, bind_return=True, callback=None, *args, **kwargs):
"""callback and be a single callback or a list of them"""
method = '_save_edge' if entity[GIZMO_TYPE] == 'edge' else \
'_save_vertex'
if not isinstance(callback, (list, tuple)) and callback:
callback = [callback]
else:
callback = []
if entity[GIZMO_ID]:
callback.insert(0, self.on_update)
else:
callback.insert(0, self.on_create)
self._enqueue_callback(entity, callback)
return getattr(self, method)(entity=entity, bind_return=bind_return)
def _save_vertex(self, entity, bind_return=True):
"""
method used to save a entity. IF both the unique_type and unique_fields
params are set, it will run a sub query to check to see if an entity
exists that matches those values
"""
query = Query(self.mapper)
ref = self.mapper.get_entity_variable(entity)
"""
check to see if the entity has been used already in the current script
execution.
If it has use the reference
if it hasnt, go through the process of saving it
"""
if ref:
query._add_query(ref, params=None, entity=entity)
return self.enqueue(query, bind_return)
query.save(entity)
if not entity[GIZMO_ID] and self.unique_fields:
from .statement import MapperUniqueVertex
if not self.save_statements:
self.save_statements = []
if MapperUniqueVertex not in self.save_statements:
self.save_statements.append(MapperUniqueVertex)
if self.save_statements and len(self.save_statements):
statement_query = self._build_save_statements(entity, query)
return self.enqueue(statement_query, bind_return)
else:
return self.enqueue(query, bind_return)
def _save_edge(self, entity, bind_return=True):
query = Query(self.mapper)
save = True
edge_ref = self.mapper.get_entity_variable(entity)
out_v = entity.out_v
out_v_id = out_v[GIZMO_ID] if isinstance(out_v, Vertex) else None
in_v = entity.in_v
in_v_id = in_v[GIZMO_ID] if isinstance(in_v, Vertex) else None
out_v_ref = self.mapper.get_entity_variable(out_v)
in_v_ref = self.mapper.get_entity_variable(in_v)
if edge_ref:
query._add_query(edge_ref, params=None, entity=entity)
return self.enqueue(query, bind_return)
"""
both out_v and in_v are checked to see if the entities stored in each
respective variable has been used.
If they have not and they are Vertex instances with an empty _id,
send them to be saved.
if they have been used, use the reference variable in the create edge
logic
"""
query.save(entity)
if not entity[GIZMO_ID] and self.unique and in_v_id and out_v_id:
from .statement import MapperUniqueEdge
if not self.save_statements:
self.save_statements = []
if MapperUniqueEdge not in self.save_statements:
self.save_statements.append(MapperUniqueEdge)
if self.save_statements and len(self.save_statements):
statement_query = self._build_save_statements(entity, query,
out_v_id=out_v_id, in_v_id=in_v_id,
label=entity[GIZMO_LABEL[0]], direction=self.unique)
return self.enqueue(statement_query, False)
else:
return self.enqueue(query, bind_return)
def delete(self, entity, lookup=True, callback=None):
query = Query(self.mapper)
if not isinstance(callback, (list, tuple)) and callback:
callback = [callback]
else:
callback = []
query.delete(entity)
callback.insert(0, self.on_delete)
self._enqueue_callback(entity, callback)
return self.enqueue(query, False)
def create(self, data=None, entity=None, data_type='python'):
"""
Method used to create a new entity based on the data that is passed in.
If the kwarg entity is passed in, it will be used to create the
entity else if utils.GIZMO_ENTITY is in data, that will be used
finally, entity.GenericVertex or entity.GenericEdge will be used to
construct the entity
"""
check = True
if data is None:
data = {}
if entity is not None:
try:
label = data.get(GIZMO_LABEL[0], None)
entity = entity(data=data, data_type=data_type)
check = False
for f, r in entity._relationships.items():
r._mapper = self.mapper
r._entity = entity
except Exception as e:
pass
if check:
try:
if GIZMO_ENTITY in data:
name = data[GIZMO_ENTITY]
if isinstance(name, (list, tuple)):
name = name[0]['value']
entity = ENTITY_MAP[name](data=data, data_type=data_type)
for f, r in entity._relationships.items():
r._mapper = self.mapper
r._entity = entity
else:
raise
except Exception as e:
# all else fails create a GenericVertex unless _type is 'edge'
if data.get(GIZMO_TYPE, None) == 'edge':
entity = GenericEdge(data=data, data_type=data_type)
else:
entity = GenericVertex(data=data, data_type=data_type)
if GIZMO_ID in data:
entity[GIZMO_ID] = data[GIZMO_ID]
return entity
def delete(self, entity, lookup=True, callback=None):
query = Query(self.mapper)
if not isinstance(callback, (list, tuple)) and callback:
callback = [callback]
else:
callback = []
query.delete(entity)
callback.insert(0, self.on_delete)
self._enqueue_callback(entity, callback)
return self.enqueue(query, False)
class Query:
def __init__(self, mapper):
self.mapper = mapper
self.gremlin = Gremlin(self.mapper.gremlin.gv)
self.queries = []
self.fields = []
self.reset()
def reset(self):
self.fields = []
return self
def _add_query(self, script, params=None, entity=None):
if params is None:
params = {}
self.queries.append({
'script': script,
'params': params,
'entity': entity,
})
return self
def _add_gremlin_query(self, entity=None):
script = str(self.gremlin)
params = self.gremlin.bound_params
self._add_query(script, params, entity)
return self.reset()
def _field_changes(self, gremlin, entity, ignore=None):
ignore = ignore or []
entity_name = str(entity)
entity_alias = '{}_alias'.format(entity_name)
entity_alias = next_param(entity_alias, entity_alias)
def add_field(field, data):
values = data.get('values', data.get('value', None))
if not isinstance(values, (list, tuple,)):
values = [values, ]
for i, value in enumerate(values):
name = '{}_{}_{}'.format(entity_name, field, i)
prop = "'{}'".format(field)
gremlin.property(prop, Param(name, value))
def add_property(field, value, properties=None, ignore=None):
ignore = ignore or []
if field.startswith('T.'):
val_param = next_param('{}_{}'.format(entity_name,
field), value)
gremlin.unbound('property', field, val_param)
return
field_name = '{}_{}'.format(entity_name, field)
prop = next_param(field_name, field)
value_name = '{}_value'.format(field_name)
value_param = next_param(value_name, value)
params = [prop, value_param]
if properties:
for key, val in properties.items():
prop_key = next_param('{}_{}'.format(prop.name,
key), key)
prop_val = next_param('{}_{}_val'.format(prop.name,
key), val)
params += [prop_key, prop_val]
gremlin.property(*params)
for field, changes in entity.changes.items():
if field in ignore:
continue
if changes['immutable']:
for val in changes['values']['values']:
add_property(field, val)
elif changes['deleted']:
prop = next_param('{}_{}'.format(entity_name, field), field)
remove = Gremlin('').it.get().func('remove')
gremlin.AS(entity_alias).properties(prop)
gremlin.sideEffect.close(remove)
gremlin.select(entity_alias)
else:
for action, value in changes['values'].items():
if action == 'added':
for val in value:
add_property(field, val['value'],
val['properties'])
def _add_vertex(self, entity, set_variable=None):
entity.data_type = 'graph'
gremlin = self.gremlin
label = None
ignore = ['T.label', 'label']
if entity['label']:
label = next_entity_param(entity, 'label', entity['label'])
gremlin.unbound('addV', 'T.label', label)
else:
gremlin.addV()
if set_variable:
gremlin.set_ret_variable(set_variable, ignore=[GIZMO_ID, ])
self._field_changes(gremlin, entity, ignore=ignore)
gremlin.func('next')
entity.data_type = 'python'
return self._add_gremlin_query(entity)
def _update_entity(self, entity, set_variable=None):
entity.data_type = 'graph'
gremlin = self.gremlin
entity_type, entity_id = entity.get_rep()
if not entity_id:
error = (('The entity {} scheduled to be updated does not have'
' an id').format(str(entity)))
logger.exception(error)
raise Exception()
_id = next_param('{}_ID'.format(str(entity)), entity_id)
ignore = [GIZMO_ID, GIZMO_LABEL[1]]
alias = '{}_{}_updating'.format(entity_type, entity_id)
alias = next_param(alias, alias)
getattr(gremlin, entity_type.upper())(_id)
gremlin.AS(alias)
self._field_changes(gremlin, entity, ignore=ignore)
gremlin.select(alias).next()
entity.data_type = 'python'
return self._add_gremlin_query(entity)
def _add_edge(self, entity, set_variable=None):
if not entity[GIZMO_LABEL[0]]:
msg = 'A label is required in order to create an edge'
logger.exception(msg)
raise AstronomerQueryException(msg)
def get_or_create_ends():
"""this function will determine if the edge has both ends. If
either end is an _Entity object it will get the reference to
the object or save it and create a reference. Either the entity's
id or reference will be used when saving the edge.
"""
out_v = entity.out_v
out_v_ref = None
in_v = entity.in_v
in_v_ref = None
if out_v is None or in_v is None:
error = ('Both out and in vertices must be set before'
' saving the edge')
logger.exception(error)
raise AstronomerQueryException(error)
if isinstance(out_v, _Entity):
if out_v[GIZMO_ID]:
out_v = out_v[GIZMO_ID]
else:
out_v_ref = self.mapper.get_entity_variable(out_v)
if not out_v_ref:
self.mapper.save(out_v)
out_v_ref = self.mapper.get_entity_variable(out_v)
if out_v_ref:
out_v = out_v_ref
if isinstance(in_v, _Entity):
if in_v[GIZMO_ID]:
in_v = in_v[GIZMO_ID]
else:
in_v_ref = self.mapper.get_entity_variable(in_v)
if not in_v_ref:
self.mapper.save(in_v)
in_v_ref = self.mapper.get_entity_variable(in_v)
if in_v_ref:
in_v = in_v_ref
return {
'out': {
'is_ref': out_v_ref,
'v': out_v,
},
'in': {
'is_ref': in_v_ref,
'v': in_v,
},
}
ends = get_or_create_ends()
name = str(entity)
gremlin = self.gremlin
g = Gremlin(gremlin.gv)
label = next_param('{}_label'.format(name), entity[GIZMO_LABEL[0]])
"""
g.V($OUT_ID).next().addEdge($LABEL, g.V($IN_ID).next()).property(....)
"""
in_v = ends['in']
out_v = ends['out']
if in_v['is_ref']:
g.unbound('V', in_v['v'])
else:
in_id = next_param('{}_in'.format(name), in_v['v'])
g.V(in_id)
g.func('next')
if out_v['is_ref']:
gremlin.unbound('V', out_v['v'])
else:
out_id = next_param('{}_out'.format(name), out_v['v'])
gremlin.V(out_id)
ignore = [GIZMO_LABEL[0], GIZMO_LABEL[1], GIZMO_TYPE]
edge_args = [label, g]
# edge properites only get one value and no meta-properties
for field, changes in entity.changes.items():
if field in ignore:
continue
try:
if changes['immutable']:
value = changes['values']['values'][-1]
else:
value = changes['values'][-1]
except:
continue
field_param = next_param('{}_{}'.format(name, field), field)
field_value = next_param('{}_value'.format(field_param.name),
value)
edge_args += [field_param, field_value]
gremlin.func('next').addEdge(*edge_args)
return self._add_gremlin_query(entity)
def save(self, entity, set_variable=None):
if not entity[GIZMO_TYPE]:
msg = 'The entity does not have a type defined'
logger.exception(msg)
raise AstronomerQueryException(msg)
entity_type = entity[GIZMO_TYPE]
if not entity[GIZMO_ID]:
if entity_type == 'vertex':
self._add_vertex(entity, set_variable)
else:
self._add_edge(entity, set_variable)
else:
self._update_entity(entity, set_variable)
def delete(self, entity):
entity_type, _id = entity.get_rep()
if not _id:
msg = ('The entity does not have an id defined and'
' connot be deleted')
logger.exception(msg)
raise AstronomerQueryException(msg)
if not entity[GIZMO_TYPE]:
msg = 'The entity does not have a type defined'
logger.exception(msg)
raise AstronomerQueryException(msg)
delete = next_param('{}_ID'.format(str(entity)), _id)
getattr(self.gremlin, entity_type)(delete).next().func('remove')
return self._add_gremlin_query(entity)
class Collection(object):
def __init__(self, mapper, response=None):
self.mapper = mapper
if not response:
response = lambda: None
response.data = []
self.response = response
self._entities = {}
self._index = 0
self._data_type = 'python'
def first(self):
return self[0]
def last(self):
return self[-1]
def get_data(self):
return [x for x in self.response.data]
data = property(get_data)
@property
def entity_data(self):
"""
this will get the instance data instead of the
raw data. This will use the mapper to create each
entity. Which may have a custom data attribute
"""
return [x.data for x in self]
@property
async def mapper_data(self):
"""this will get the data from the entity's mapper if it has a
custom mapper
"""
data = []
if len(self):
mapper = self.mapper.get_mapper(self[0])
for entity in self:
data.append(await mapper.data(entity))
return data
def __len__(self):
return len(self.response.data)
def __getitem__(self, key):
entity = self._entities.get(key, None)
if entity is None:
try:
data = self.response[key]
if data is not None:
entity = self.mapper.create(data=data,
data_type=self._data_type)
entity.dirty = False
self._entities[key] = entity
else:
raise StopIteration()
except Exception as e:
raise StopIteration()
return entity
def __setitem__(self, key, value):
self._entities[key] = value
def __delitem__(self, key):
if key in self._entities:
del self._entities[key]
def __iter__(self):
return self
def __next__(self):
entity = self[self._index]
self._index += 1
return entity
| 29.896615
| 79
| 0.565903
| 31,116
| 0.952229
| 0
| 0
| 616
| 0.018851
| 3,300
| 0.100988
| 4,960
| 0.151789
|
b11de849f44d264e334f554dabd0e3fd62c6c1ae
| 849
|
py
|
Python
|
utils.py
|
Nicolas-Lefort/conv_neural_net_time_serie
|
3075d3f97cdd45f91612f8300af2b4af7f232c42
|
[
"MIT"
] | null | null | null |
utils.py
|
Nicolas-Lefort/conv_neural_net_time_serie
|
3075d3f97cdd45f91612f8300af2b4af7f232c42
|
[
"MIT"
] | null | null | null |
utils.py
|
Nicolas-Lefort/conv_neural_net_time_serie
|
3075d3f97cdd45f91612f8300af2b4af7f232c42
|
[
"MIT"
] | null | null | null |
import pandas_ta as ta
def clean(df, df_signal):
df = df.join(df_signal)
df.replace("", "NaN", inplace=True)
df.dropna(inplace=True)
df = df.reset_index(drop=True)
df_signal = df['signal']
df.drop(columns=['volume', 'high', 'low', 'open', 'signal', "close_timestamp", "close_date"], inplace=True)
return df, df_signal
def augment(df):
df.ta.rsi(close=df["close"], length=14, append=True)
df.ta.willr(close=df["close"], low=df["low"], high=df["high"], length=14, append=True)
df.ta.macd(close=df["close"], append=True)
return df
def split(df, train_size=0.9, val_size=0.05):
train_df = df[0:int(len(df) * train_size)]
val_df = df[int(len(df) * train_size):int(len(df) * (train_size + val_size))]
test_df = df[int(len(df) * (train_size + val_size)):]
return train_df, val_df, test_df
| 31.444444
| 111
| 0.64311
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 109
| 0.128386
|
b11f6265d46fdca364a4dd3bf4dcf5a12d2f410f
| 2,871
|
py
|
Python
|
praetorian_ssh_proxy/hanlers/menu_handler.py
|
Praetorian-Defence/praetorian-ssh-proxy
|
068141bf0cee9fcf10434fab2dc5c16cfdd35f5a
|
[
"MIT"
] | null | null | null |
praetorian_ssh_proxy/hanlers/menu_handler.py
|
Praetorian-Defence/praetorian-ssh-proxy
|
068141bf0cee9fcf10434fab2dc5c16cfdd35f5a
|
[
"MIT"
] | null | null | null |
praetorian_ssh_proxy/hanlers/menu_handler.py
|
Praetorian-Defence/praetorian-ssh-proxy
|
068141bf0cee9fcf10434fab2dc5c16cfdd35f5a
|
[
"MIT"
] | null | null | null |
import sys
class MenuHandler(object):
def __init__(self, client, client_channel, remote_checker):
self._client = client
self._client_channel = client_channel
self._buffer = ''
self._remote_checker = remote_checker
@staticmethod
def create_from_channel(client, client_channel, remote_checker) -> 'MenuHandler':
return MenuHandler(client, client_channel, remote_checker)
def serve_remote_menu(self):
if not self._remote_checker.is_remote_set:
self._client_channel.send('-------------------------------------------------\r\n')
self._client_channel.send('| Welcome to Praetorian SSH proxy |\r\n')
self._client_channel.send('-------------------------------------------------\r\n')
for counter, remote in enumerate(self._remote_checker.remote, 1):
self._client_channel.send('| ({:10}) {:30} {} |\r\n'.format(remote.project['name'], remote.name, counter))
self._client_channel.send('-------------------------------------------------\r\n')
self._client_channel.send('| {:43} {} |\r\n'.format('exit', len(self._remote_checker.remote) + 1))
self._client_channel.send('-------------------------------------------------\r\n')
self._client_channel.send('Choose your remote: ')
while True:
data = self._client_channel.recv(1024)
if not data:
continue
# BACKSPACE
if data == b'\x7f':
self._buffer = self._buffer[:-1]
self._client_channel.send('\b \b')
# EXIT (CTRL+C)
elif data == b'\x03':
self._client_channel.send(f'\n\rExiting ...\r\n')
self._client.close()
sys.exit(0)
# ENTER
elif data == b'\r':
if self._buffer in map(str, range(1, len(self._remote_checker.remote) + 1)):
self._remote_checker.set_remote(self._remote_checker.remote[int(self._buffer) - 1])
self._client_channel.send(f'\n\rChosen remote {self._remote_checker.remote.name}.\r\n')
return
elif self._buffer == str(len(self._remote_checker.remote) + 1):
self._client_channel.send(f'\n\rExiting ...\r\n')
self._client.close()
sys.exit(0)
else:
self._client_channel.send('\n\rWrong option.\r\n')
self._client_channel.send('Choose your option: ')
self._buffer = ''
else:
self._client_channel.send(data)
self._buffer += data.decode()
| 45.571429
| 122
| 0.492163
| 2,857
| 0.995124
| 0
| 0
| 166
| 0.05782
| 0
| 0
| 578
| 0.201324
|
b122b1664a2960a396de4fbb595bf3821559d96f
| 563
|
py
|
Python
|
orderedtable/urls.py
|
Shivam2k16/DjangoOrderedTable
|
da133a23a6659ce5467b8161edcf6db35f1c0b76
|
[
"MIT"
] | 2
|
2018-04-15T17:03:59.000Z
|
2019-03-23T04:45:00.000Z
|
orderedtable/urls.py
|
Shivam2k16/DjangoOrderedTable
|
da133a23a6659ce5467b8161edcf6db35f1c0b76
|
[
"MIT"
] | null | null | null |
orderedtable/urls.py
|
Shivam2k16/DjangoOrderedTable
|
da133a23a6659ce5467b8161edcf6db35f1c0b76
|
[
"MIT"
] | 1
|
2018-04-15T16:54:07.000Z
|
2018-04-15T16:54:07.000Z
|
from django.conf.urls import include, url
from django.contrib import admin
import orderedtable
from orderedtable import views
app_name="orderedtable"
urlpatterns = [
url(r'^$', views.home,name="home"),
url(r'^import-json/$', views.import_json,name="import_json"),
url(r'^project-list/$', views.project_list,name="project_list"),
url(r'^empty-list/$', views.delete_table,name="delete_table"),
url(r'^multiple-sorting/$', views.multiple_sorting,name="multiple_sorting"),
url(r'^sort-by = (?P<pk>[\w-]+)/$', views.sorted,name="sorted"),
]
| 33.117647
| 80
| 0.698046
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 195
| 0.346359
|
b122c90d808909aac59db15608fa9cd2e18fc259
| 363
|
py
|
Python
|
lang/Python/set-consolidation-1.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | 1
|
2018-11-09T22:08:38.000Z
|
2018-11-09T22:08:38.000Z
|
lang/Python/set-consolidation-1.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
lang/Python/set-consolidation-1.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | 1
|
2018-11-09T22:08:40.000Z
|
2018-11-09T22:08:40.000Z
|
def consolidate(sets):
setlist = [s for s in sets if s]
for i, s1 in enumerate(setlist):
if s1:
for s2 in setlist[i+1:]:
intersection = s1.intersection(s2)
if intersection:
s2.update(s1)
s1.clear()
s1 = s2
return [s for s in setlist if s]
| 30.25
| 50
| 0.465565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b123669e9c0103e63c00a8b4dcdbc0e0596f1442
| 2,242
|
py
|
Python
|
call_google_translate.py
|
dadap/klingon-assistant-data
|
5371f8ae6e3669f48a83087a4937af0dee8d23d1
|
[
"Apache-2.0"
] | null | null | null |
call_google_translate.py
|
dadap/klingon-assistant-data
|
5371f8ae6e3669f48a83087a4937af0dee8d23d1
|
[
"Apache-2.0"
] | 5
|
2018-07-11T09:17:19.000Z
|
2018-10-14T10:33:51.000Z
|
call_google_translate.py
|
dadap/klingon-assistant-data
|
5371f8ae6e3669f48a83087a4937af0dee8d23d1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Calls Google Translate to produce translations.
# To use, set "language" and "dest_language" below. (They are normally the same,
# unless Google uses a different language code than we do.) Then fill in
# the definition_[language] fields with "TRANSLATE" or
# "TRANSLATE: [replacement definition]". The latter is to allow for a better
# translation when the original definition is ambiguous, e.g., if the definition
# is "launcher", a better translation might result from
# "TRANSLATE: rocket launcher".
from googletrans import Translator
import fileinput
import re
import time
# TODO: Refactor this and also use in renumber.py.
# Ignore mem-00-header.xml and mem-28-footer.xml because they don't contain entries.
filenames = ['mem-01-b.xml', 'mem-02-ch.xml', 'mem-03-D.xml', 'mem-04-gh.xml', 'mem-05-H.xml', 'mem-06-j.xml', 'mem-07-l.xml', 'mem-08-m.xml', 'mem-09-n.xml', 'mem-10-ng.xml', 'mem-11-p.xml', 'mem-12-q.xml', 'mem-13-Q.xml', 'mem-14-r.xml', 'mem-15-S.xml', 'mem-16-t.xml', 'mem-17-tlh.xml', 'mem-18-v.xml', 'mem-19-w.xml', 'mem-20-y.xml', 'mem-21-a.xml', 'mem-22-e.xml', 'mem-23-I.xml', 'mem-24-o.xml', 'mem-25-u.xml', 'mem-26-suffixes.xml', 'mem-27-extra.xml']
translator = Translator()
language = "zh-HK"
dest_language = "zh-TW"
limit = 250
for filename in filenames:
with fileinput.FileInput(filename, inplace=True) as file:
definition = ""
for line in file:
definition_match = re.search(r"definition\">?(.+)<", line)
definition_translation_match = re.search(r"definition_(.+)\">TRANSLATE(?:: (.*))?<", line)
if (definition_match):
definition = definition_match.group(1)
if (limit > 0 and \
definition != "" and \
definition_translation_match and \
language.replace('-','_') == definition_translation_match.group(1)):
if definition_translation_match.group(2):
definition = definition_translation_match.group(2)
translation = translator.translate(definition, src='en', dest=dest_language)
line = re.sub(r">(.*)<", ">%s [AUTOTRANSLATED]<" % translation.text, line)
# Rate-limit calls to Google Translate.
limit = limit - 1
time.sleep(0.1)
print(line, end='')
| 44.84
| 460
| 0.666369
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,212
| 0.540589
|
b123989fc301ccc896657660002120b9f5336e64
| 6,451
|
py
|
Python
|
xenavalkyrie/xena_object.py
|
xenadevel/PyXenaValkyrie
|
9bb1d0b058c45dc94a778fd674a679b53f03a34c
|
[
"Apache-2.0"
] | 4
|
2018-07-13T08:09:38.000Z
|
2022-02-09T01:36:13.000Z
|
xenavalkyrie/xena_object.py
|
xenadevel/PyXenaValkyrie
|
9bb1d0b058c45dc94a778fd674a679b53f03a34c
|
[
"Apache-2.0"
] | 1
|
2019-07-31T04:56:43.000Z
|
2019-08-01T07:11:21.000Z
|
xenavalkyrie/xena_object.py
|
xenadevel/PyXenaValkyrie
|
9bb1d0b058c45dc94a778fd674a679b53f03a34c
|
[
"Apache-2.0"
] | 3
|
2019-05-30T23:47:02.000Z
|
2022-02-04T12:32:14.000Z
|
"""
Base classes and utilities for all Xena Manager (Xena) objects.
:author: yoram@ignissoft.com
"""
import time
import re
import logging
from collections import OrderedDict
from trafficgenerator.tgn_utils import TgnError
from trafficgenerator.tgn_object import TgnObject, TgnObjectsDict
logger = logging.getLogger(__name__)
class XenaAttributeError(TgnError):
pass
class XenaObjectsDict(TgnObjectsDict):
def __getitem__(self, key):
""" Override default implementation and allow access with index as well. """
if TgnObjectsDict.__getitem__(self, key) is not None:
return TgnObjectsDict.__getitem__(self, key)
else:
for obj in self:
if obj.index == key:
return OrderedDict.__getitem__(self, obj)
class XenaObject(TgnObject):
""" Base class for all Xena objects. """
def __init__(self, **data):
if data['parent']:
self.session = data['parent'].session
self.chassis = data['parent'].chassis
if 'objRef' not in data:
data['objRef'] = '{}/{}/{}'.format(data['parent'].ref, data['objType'], data['index'].split('/')[-1])
if 'name' not in data:
data['name'] = data['index']
super(XenaObject, self).__init__(**data)
def obj_index(self):
"""
:return: object index.
"""
return str(self._data['index'])
index = property(obj_index)
def obj_id(self):
"""
:return: object ID.
"""
return int(self.index.split('/')[-1]) if self.index else None
id = property(obj_id)
def _create(self):
self.api.create(self)
def reserve(self, force=False):
""" Reserve object.
XenaManager-2G -> [Relinquish]/Reserve Chassis/Module/Port.
:param force: True - take forcefully, False - fail if port is reserved by other user
"""
reservation = self.get_attribute(self.cli_prefix + '_reservation')
if reservation == 'RESERVED_BY_YOU':
return
elif reservation == 'RESERVED_BY_OTHER' and not force:
reservedby = self.get_attribute(self.cli_prefix + '_reservedby')
raise TgnError('Resource {} reserved by {}'.format(self, reservedby))
self.relinquish()
self.send_command(self.cli_prefix + '_reservation', 'reserve')
def relinquish(self):
""" Relinquish object.
XenaManager-2G -> Relinquish Chassis/Module/Port.
"""
if self.get_attribute(self.cli_prefix + '_reservation') != 'RELEASED':
self.send_command(self.cli_prefix + '_reservation relinquish')
def release(self):
""" Release object.
XenaManager-2G -> Release Chassis/Module/Port.
"""
if self.get_attribute(self.cli_prefix + '_reservation') == 'RESERVED_BY_YOU':
self.send_command(self.cli_prefix + '_reservation release')
def send_command(self, command, *arguments):
""" Send command with no output.
:param command: command to send.
:param arguments: list of command arguments.
"""
self.api.send_command(self, command, *arguments)
def send_command_return(self, command, *arguments):
""" Send command and wait for single line output. """
return self.api.send_command_return(self, command, *arguments)
def send_command_return_multilines(self, command, *arguments):
""" Send command and wait for multiple lines output. """
return self.api.send_command_return_multilines(self, command, *arguments)
def set_attributes(self, **attributes):
""" Sets list of attributes.
:param attributes: dictionary of {attribute: value} to set.
"""
try:
self.api.set_attributes(self, **attributes)
except Exception as e:
if '<notwritable>' in repr(e).lower() or '<badvalue>' in repr(e).lower():
raise XenaAttributeError(e)
else:
raise e
def get_attribute(self, attribute):
""" Returns single object attribute.
:param attribute: requested attribute to query.
:returns: returned value.
:rtype: str
"""
try:
return self.api.get_attribute(self, attribute)
except Exception as e:
if '#syntax error' in repr(e).lower() or 'keyerror' in repr(e).lower():
raise XenaAttributeError(e)
else:
raise e
def get_attributes(self):
""" Returns all object's attributes.
:returns: dictionary of <name, value> of all attributes.
:rtype: dict of (str, str)
"""
return self.api.get_attributes(self)
def wait_for_states(self, attribute, timeout=40, *states):
for _ in range(timeout):
if self.get_attribute(attribute).lower() in [s.lower() for s in states]:
return
time.sleep(1)
raise TgnError('{} failed to reach state {}, state is {} after {} seconds'.
format(attribute, states, self.get_attribute(attribute), timeout))
def read_stat(self, captions, stat_name):
return dict(zip(captions, self.api.get_stats(self, stat_name)))
#
# Private methods.
#
def _build_index_command(self, command, *arguments):
return ('{} {}' + len(arguments) * ' {}').format(self.index, command, *arguments)
def _extract_return(self, command, index_command_value):
return re.sub('{}\s*{}\s*'.format(self.index, command.upper()), '', index_command_value)
def _get_index_len(self):
return len(self.index.split())
def _get_command_len(self):
return len(self.index.split())
class XenaObject21(XenaObject):
""" Base class for all Xena objects with index_len = 2 and command_len = 1. """
#
# Private methods.
#
def _build_index_command(self, command, *arguments):
module, port, sid = self.index.split('/')
return ('{}/{} {} [{}]' + len(arguments) * ' {}').format(module, port, command, sid, *arguments)
def _extract_return(self, command, index_command_value):
module, port, sid = self.index.split('/')
return re.sub('{}/{}\s*{}\s*\[{}\]\s*'.format(module, port, command.upper(), sid), '', index_command_value)
def _get_index_len(self):
return 2
def _get_command_len(self):
return 1
| 32.746193
| 115
| 0.611843
| 6,110
| 0.94714
| 0
| 0
| 0
| 0
| 0
| 0
| 1,975
| 0.306154
|
b124d44c02271ffc2f5af0ccc84d1e1a14ca372b
| 2,051
|
py
|
Python
|
test/ryu/vsw-602_mp_port_desc.py
|
iMasaruOki/lagopus
|
69c303b65acbc2d4661691c190c42946654de1b3
|
[
"Apache-2.0"
] | 281
|
2015-01-06T13:36:14.000Z
|
2022-03-14T03:29:46.000Z
|
test/ryu/vsw-602_mp_port_desc.py
|
iMasaruOki/lagopus
|
69c303b65acbc2d4661691c190c42946654de1b3
|
[
"Apache-2.0"
] | 115
|
2015-01-06T11:09:21.000Z
|
2020-11-26T11:44:23.000Z
|
test/ryu/vsw-602_mp_port_desc.py
|
lagopus/lagopus
|
69c303b65acbc2d4661691c190c42946654de1b3
|
[
"Apache-2.0"
] | 108
|
2015-01-06T05:12:01.000Z
|
2022-01-02T03:28:50.000Z
|
from ryu.base.app_manager import RyuApp
from ryu.controller.ofp_event import EventOFPSwitchFeatures
from ryu.controller.ofp_event import EventOFPPortDescStatsReply
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.ofproto.ofproto_v1_2 import OFPG_ANY
from ryu.ofproto.ofproto_v1_3 import OFP_VERSION
from ryu.lib.mac import haddr_to_bin
class App(RyuApp):
OFP_VERSIONS = [OFP_VERSION]
def __init__(self, *args, **kwargs):
super(App, self).__init__(*args, **kwargs)
@set_ev_cls(EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
[self.install_sample(datapath, n) for n in [0]]
def create_meter_mod(self, datapath, command, flags_, meter_id, bands):
ofproto = datapath.ofproto
ofp_parser = datapath.ofproto_parser
meter_mod = ofp_parser.OFPMeterMod(datapath, command, flags_,
meter_id, bands)
return meter_mod
def install_sample(self, datapath, table_id):
parser = datapath.ofproto_parser
ofproto = datapath.ofproto
req = parser.OFPPortDescStatsRequest(datapath, 0)
datapath.send_msg(req)
@set_ev_cls(EventOFPPortDescStatsReply, MAIN_DISPATCHER)
def port_desc_stats_reply_handler(self, ev):
ports = []
for p in ev.msg.body:
ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x '
'state=0x%08x curr=0x%08x advertised=0x%08x '
'supported=0x%08x peer=0x%08x curr_speed=%d '
'max_speed=%d' %
(p.port_no, p.hw_addr,
p.name, p.config,
p.state, p.curr, p.advertised,
p.supported, p.peer, p.curr_speed,
p.max_speed))
self.logger.info('OFPPortDescStatsReply received: %s', ports)
| 41.02
| 75
| 0.644564
| 1,603
| 0.78157
| 0
| 0
| 918
| 0.447587
| 0
| 0
| 186
| 0.090687
|
b12709adc431ec818c3f1dc683d016b6ef1c240b
| 508
|
py
|
Python
|
mamba/exceptions.py
|
bmintz/mamba-lang
|
f63e205dc4de5e8ba3308e2b47b1675a9b508e70
|
[
"MIT"
] | 20
|
2015-01-15T19:40:33.000Z
|
2021-09-22T15:26:27.000Z
|
mamba/exceptions.py
|
bmintz/mamba-lang
|
f63e205dc4de5e8ba3308e2b47b1675a9b508e70
|
[
"MIT"
] | 3
|
2015-03-25T21:53:48.000Z
|
2017-05-07T12:22:20.000Z
|
mamba/exceptions.py
|
bmintz/mamba-lang
|
f63e205dc4de5e8ba3308e2b47b1675a9b508e70
|
[
"MIT"
] | 11
|
2017-09-15T21:41:04.000Z
|
2021-09-22T15:15:58.000Z
|
class InterpreterException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class SymbolNotFound(InterpreterException):
pass
class UnexpectedCharacter(InterpreterException):
pass
class ParserSyntaxError(InterpreterException):
pass
class DuplicateSymbol(InterpreterException):
pass
class InterpreterRuntimeError(InterpreterException):
pass
class InvalidParamCount(InterpreterRuntimeError):
pass
| 16.933333
| 52
| 0.761811
| 490
| 0.964567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b128e2f322061ebf320f3ab6964b531facfd7042
| 21,812
|
py
|
Python
|
test/phytozome_test.py
|
samseaver/GenomeFileUtil
|
b17afb465569a34a12844283918ec654911f96cf
|
[
"MIT"
] | null | null | null |
test/phytozome_test.py
|
samseaver/GenomeFileUtil
|
b17afb465569a34a12844283918ec654911f96cf
|
[
"MIT"
] | null | null | null |
test/phytozome_test.py
|
samseaver/GenomeFileUtil
|
b17afb465569a34a12844283918ec654911f96cf
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
import os # noqa: F401
import json # noqa: F401
import time
import shutil
import re
import sys
import datetime
import collections
#import simplejson
from os import environ
try:
from ConfigParser import ConfigParser # py2
except:
from configparser import ConfigParser # py3
from pprint import pprint # noqa: F401
from GenomeFileUtil.GenomeFileUtilImpl import GenomeFileUtil
from GenomeFileUtil.core.GenomeInterface import GenomeInterface
from GenomeFileUtil.GenomeFileUtilImpl import SDKConfig
from GenomeFileUtil.GenomeFileUtilServer import MethodContext
from GenomeFileUtil.core.FastaGFFToGenome import FastaGFFToGenome
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.WorkspaceClient import Workspace as workspaceService
class FastaGFFToGenomeUploadTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('setting up class')
token = environ.get('KB_AUTH_TOKEN', None)
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'provenance': [
{'service': 'GenomeFileUtil',
'method': 'please_never_use_it_in_production',
'method_params': []}
],
'authenticated': 1})
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('GenomeFileUtil'):
cls.cfg[nameval[0]] = nameval[1]
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL, token=token)
cls.serviceImpl = GenomeFileUtil(cls.cfg)
cls.dfu = DataFileUtil(os.environ['SDK_CALLBACK_URL'], token=token)
cls.scratch = cls.cfg['scratch']
cls.shockURL = cls.cfg['shock-url']
cls.gfu_cfg = SDKConfig(cls.cfg)
cls.wsName = "Phytozome_Genomes"
cls.prepare_data()
# @classmethod
# def tearDownClass(cls):
# if hasattr(cls, 'wsName'):
# cls.wsClient.delete_workspace({'workspace': cls.wsName})
# print('Test workspace was deleted')
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_GenomeFileUtil_" + str(suffix)
ret = self.getWsClient().create_workspace({'workspace': wsName}) # noqa
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
@classmethod
def prepare_data(cls):
cls.dtn_root = "/kb/module/genomes/Phytozome/"
def compile_ontology(self, Annotation_File, Identifier_Column):
annotations = dict()
#hardcoded header for now
annotation_header=[]
ontology_column=9
if not os.path.isfile(Annotation_File):
return annotations
with open(Annotation_File) as f:
for line in f:
line=line.strip()
if(line.startswith("#pacId")):
#Store header
annotation_header=line.split('\t')
continue
annotation_items=line.split('\t')
#Skip empty lines
if(len(annotation_items) <= 1 or len(annotation_items)<=ontology_column):
continue
#Skip empty ontology
if(annotation_items[ontology_column]==""):
continue
annotation_dict=dict()
for entry in annotation_items[ontology_column].split(","):
if(entry == ''):
continue
entry=entry.replace("GO:GO:","GO:")
annotation_dict[entry]=1
annotations[annotation_items[Identifier_Column]]=annotation_dict
return annotations
def compile_functions(self, Functions_File, Identifier_Column=0,Functions_Column=1,EC_Column=-1):
functions = dict()
if not os.path.isfile(Functions_File):
return functions
with open(Functions_File) as f:
for line in f:
line=line.strip()
function_items=line.split('\t')
if(len(function_items) <= Functions_Column):
Functions_Column -= 1
if(function_items[Functions_Column] == ""):
continue
Function = function_items[Functions_Column]
if(EC_Column != -1):
Function+=" (EC "+function_items[EC_Column]+")"
if(function_items[Identifier_Column] not in functions):
functions[function_items[Identifier_Column]]=dict()
functions[function_items[Identifier_Column]][Function]=1
return functions
def compile_synonyms(self, Synonyms_File, Identifier_Column=0, Synonym_Column=1):
synonyms=dict()
if not os.path.isfile(Synonyms_File):
return synonyms
with open(Synonyms_File) as f:
for line in f:
line=line.strip()
synonyms_items=line.split('\t')
if(len(synonyms_items) <= Synonym_Column or synonyms_items[Synonym_Column] == ""):
continue
Synonym = synonyms_items[Synonym_Column]
if(synonyms_items[Identifier_Column] not in synonyms):
synonyms[synonyms_items[Identifier_Column]]=dict()
synonyms[synonyms_items[Identifier_Column]][Synonym]=1
return synonyms
def test_phytozome_to_genome(self):
#Read Species Names
SN_File = os.path.join('data','Phytozome_Names.txt')
Species_Names_Dict={}
with open(SN_File) as f:
for line in f:
line=line.strip()
array = line.split('\t')
Species_Names_Dict[array[0]]=array[1]
GM_File = os.path.join('data','Accepted_Phytozome_Versions_GeneModels.txt')
Species_Dict=dict()
with open(GM_File) as f:
for line in f:
line=line.strip('\r\n')
(phytozome_release,species,phytozome_identifier,genemodel_version,tax_id)=line.split('\t')
if(species not in Species_Dict):
Species_Dict[species]=dict()
Species_Dict[species][genemodel_version]={'release':phytozome_release,
'identifier':phytozome_identifier,
'tax_id':tax_id}
Species_Versions_to_Skip=dict()
with open(os.path.join('data','Phytozome_Upload_Summary.txt')) as f:
for line in f:
line=line.strip()
array = line.split('\t')
Species_Version=array[0]
Species_Versions_to_Skip[Species_Version]=1
# Begin iterating through species to load them
summary_file=open(os.path.join('data','Phytozome_Upload_Summary.txt'),'w')
for species in sorted(Species_Dict):
for version in sorted(Species_Dict[species]):
Genome_Name = species+"_"+version
if(Genome_Name in Species_Versions_to_Skip):
continue
phytozome_version=Species_Dict[species][version]['release']
sp=species
#Special exception: Zmays PH207
if(species == "Zmays" and "PH207" in version):
sp=species+version.split('_')[0]
path = os.path.join(self.dtn_root,'Phytozome'+phytozome_version,sp)
if(os.path.isdir(path) is not True):
print("Path Not Found: "+path)
continue
has_assembly=False
for files in os.listdir(path):
if(files=="assembly"):
has_assembly=True
if(has_assembly is False):
for version_in_path in os.listdir(path):
if(version_in_path == version):
for files in os.listdir(os.path.join(path,version_in_path)):
if(files=="assembly"):
path=os.path.join(path,version_in_path)
has_assembly=True
#Assembly file retrieval, should only find one, if any
assembly_file = os.listdir(os.path.join(path,'assembly'))[0]
#Annotation file retrieval, at least one, maybe two or three
gff_file = ""
functions_file = ""
ontology_file = ""
names_file = ""
for ann_file in os.listdir(os.path.join(path,'annotation')):
if('gene' in ann_file):
gff_file = ann_file
elif('defline' in ann_file):
functions_file = ann_file
elif('info' in ann_file):
ontology_file = ann_file
elif('synonym' in ann_file):
names_file = ann_file
Fa_Path = os.path.join(path,'assembly',assembly_file)
Gff_Path = os.path.join(path,'annotation',gff_file)
phytozome_version = phytozome_version.split('_')[0]
tax_id = Species_Dict[species][version]['tax_id']
input_params = {'fasta_file': {'path': Fa_Path},
'gff_file': {'path': Gff_Path},
'genome_name': Genome_Name,
'workspace_name': self.getWsName(),
'source': 'JGI Phytozome '+phytozome_version,
'source_id' : version,
'type': 'Reference',
'scientific_name': Species_Names_Dict[species],
'taxon_id': tax_id,
'genetic_code':1}
result = self.getImpl().fasta_gff_to_genome(self.getContext(), input_params)[0]
# Load Genome Object in order to add additional data
Genome_Result = self.dfu.get_objects({'object_refs':[self.wsName+'/'+Genome_Name]})['data'][0]
Genome_Object = Genome_Result['data']
############################################################
# Functions
###########################################################
Functions_Path = os.path.join(path,'annotation',functions_file)
Functions = self.compile_functions(Functions_Path,0,2,1)
print("Functions compiled")
summary_file.write(Genome_Name+'\t'+functions_file+'\t'+str(len(list(Functions)))+'\n')
Found_Count={'features':0,'mrnas':0,'cdss':0}
if(len(list(Functions))>0):
for key in Found_Count:
print("Searching: "+key+"\t"+Genome_Object[key][0]['id'])
for entity in Genome_Object[key]:
if(entity['id'] in Functions):
entity["functions"]=sorted(Functions[entity['id']].keys())
Found_Count[key]+=1
# If no features were annotated, and mrnas were annotated
# use parent_gene to do transfer annotation
parent_feature_functions = collections.defaultdict(dict)
if(Found_Count['features']==0 and Found_Count['mrnas']!=0):
#Create lookup dict
parent_feature_index = dict([(f['id'], i) for i, f in enumerate(Genome_Object['features'])])
for mrna in Genome_Object['mrnas']:
if('functions' in mrna):
parent_feature = parent_feature_index[mrna['parent_gene']]
for function in mrna['functions']:
parent_feature_functions[parent_feature][function]=1
for index in parent_feature_functions:
Genome_Object['features'][index]['functions']=sorted(parent_feature_functions[index].keys())
Found_Count['features']+=1
summary_file.write(Genome_Name+'\t'+functions_file+'\t'+str(Found_Count)+'\n')
############################################################
# Ontology
###########################################################
#Parse Annotation File
Annotation_Path = os.path.join(path,'annotation',ontology_file)
Feature_Ontology = self.compile_ontology(Annotation_Path,1)
mRNA_Ontology = self.compile_ontology(Annotation_Path,2)
print("Ontology compiled")
summary_file.write(Genome_Name+'\t'+ontology_file+'\t'+str(len(Feature_Ontology.keys()))+'\n')
summary_file.write(Genome_Name+'\t'+ontology_file+'\t'+str(len(mRNA_Ontology.keys()))+'\n')
#Retrieve OntologyDictionary
Ontology_Dictionary = self.dfu.get_objects({'object_refs':["KBaseOntology/gene_ontology"]})['data'][0]['data']['term_hash']
time_string = str(datetime.datetime.fromtimestamp(time.time()).strftime('%Y_%m_%d_%H_%M_%S'))
Found_Count={'features':0,'mrnas':0,'cdss':0}
if(len(Feature_Ontology.keys())!=0 or len(mRNA_Ontology.keys())!=0):
for key in Found_Count:
print("Searching: "+key+"\t"+Genome_Object[key][0]['id'])
for entity in Genome_Object[key]:
if(entity['id'] in Feature_Ontology):
ontology_terms = dict()
ontology_terms["GO"]=dict()
for Ontology_Term in Feature_Ontology[entity["id"]].keys():
if(Ontology_Term not in Ontology_Dictionary):
continue
if(Ontology_Term not in ontology_terms["GO"]):
OntologyEvidence=[{"method":"GFF_Fasta_Genome_to_KBaseGenomes_Genome",
"timestamp":time_string,"method_version":"1.0"},
{"method":"Phytozome annotation_info.txt",
"timestamp":time_string,"method_version":"11"}]
OntologyData={"id":Ontology_Term,
"ontology_ref":"KBaseOntology/gene_ontology",
"term_name":Ontology_Dictionary[Ontology_Term]["name"],
"term_lineage":[],
"evidence":OntologyEvidence}
ontology_terms["GO"][Ontology_Term]=OntologyData
entity["ontology_terms"]=ontology_terms
Found_Count[key]+=1
if(entity['id'] in mRNA_Ontology):
ontology_terms = dict()
ontology_terms["GO"]=dict()
for Ontology_Term in mRNA_Ontology[entity["id"]].keys():
if(Ontology_Term not in Ontology_Dictionary):
continue
if(Ontology_Term not in ontology_terms["GO"]):
OntologyEvidence=[{"method":"GFF_Fasta_Genome_to_KBaseGenomes_Genome",
"timestamp":time_string,"method_version":"1.0"},
{"method":"Phytozome annotation_info.txt",
"timestamp":time_string,"method_version":"11"}]
OntologyData={"id":Ontology_Term,
"ontology_ref":"KBaseOntology/gene_ontology",
"term_name":Ontology_Dictionary[Ontology_Term]["name"],
"term_lineage":[],
"evidence":OntologyEvidence}
ontology_terms["GO"][Ontology_Term]=OntologyData
entity["ontology_terms"]=ontology_terms
Found_Count[key]+=1
summary_file.write(Genome_Name+'\t'+ontology_file+'\t'+str(Found_Count)+'\n')
############################################################
# Synonyms
###########################################################
Synonyms_Path = os.path.join(path,'annotation',names_file)
Synonyms = self.compile_synonyms(Synonyms_Path,0,1)
print("Synonyms compiled")
summary_file.write(Genome_Name+'\t'+names_file+'\t'+str(len(list(Synonyms)))+'\n')
Found_Count={'features':0,'mrnas':0,'cdss':0}
if(len(list(Synonyms))>0):
for key in Found_Count:
print("Searching: "+key+"\t"+Genome_Object[key][0]['id'])
for entity in Genome_Object[key]:
if(entity['id'] in Synonyms):
if("aliases" not in entity):
entity["aliases"]=list()
for synonym in sorted(Synonyms[entity['id']]):
entity["aliases"].append(["JGI",synonym])
Found_Count[key]+=1
# If no features were annotated, and mrnas were annotated
# use parent_gene to do transfer annotation
parent_feature_synonyms = collections.defaultdict(dict)
if(Found_Count['features']==0 and Found_Count['mrnas']!=0):
#Create lookup dict
parent_feature_index = dict([(f['id'], i) for i, f in enumerate(Genome_Object['features'])])
for mrna in Genome_Object['mrnas']:
if(mrna['id'] in Synonyms):
if("aliases" not in mrna):
mrna["aliases"]=list()
for synonym in sorted(Synonyms[mrna['id']]):
mrna["aliases"].append(["JGI",synonym])
if('aliases' in mrna):
parent_feature = parent_feature_index[mrna['parent_gene']]
for synonym in mrna['aliases']:
parent_feature_synonyms[parent_feature][synonym[1]]=1
for index in parent_feature_synonyms:
if("aliases" not in Genome_Object['features'][index]):
Genome_Object['features'][index]['aliases']=list()
for synonym in sorted(parent_feature_synonyms[index].keys()):
Genome_Object['features'][index]['aliases'].append(("JGI",synonym))
Found_Count['features']+=1
summary_file.write(Genome_Name+'\t'+names_file+'\t'+str(Found_Count)+'\n')
############################################################
# Saving
###########################################################
#Save Genome Object
#genome_string = simplejson.dumps(Genome_Object, sort_keys=True, indent=4, ensure_ascii=False)
#genome_file = open(self.scratch+'/'+Genome_Name+'.json', 'w+')
#genome_file.write(genome_string)
#genome_file.close()
#Retaining metadata
Genome_Meta = Genome_Result['info'][10]
Workspace_ID = Genome_Result['info'][6]
save_result = self.getImpl().save_one_genome(self.getContext(),
{'workspace' : self.wsName,
'name' : Genome_Name,
'data' : Genome_Object,
'upgrade' : 1})
#Saving metadata
Genome_Result = self.dfu.get_objects({'object_refs':[self.wsName+'/'+Genome_Name]})['data'][0]
Genome_Object = Genome_Result['data']
self.dfu.save_objects({'id':Workspace_ID,
'objects' : [ {'type': 'KBaseGenomes.Genome',
'data': Genome_Object,
'meta' : Genome_Meta,
'name' : Genome_Name} ]})
summary_file.flush()
summary_file.close()
| 46.606838
| 139
| 0.496011
| 20,995
| 0.962544
| 0
| 0
| 1,274
| 0.058408
| 0
| 0
| 4,031
| 0.184807
|
b129413908fca02566b29b673b606e60be14141b
| 7,824
|
py
|
Python
|
icetray_version/trunk/resources/scripts/make_plots.py
|
hershalpandya/airshowerclassification_llhratio_test
|
a2a2ce5234c8f455fe56c332ab4fcc65008e9409
|
[
"MIT"
] | null | null | null |
icetray_version/trunk/resources/scripts/make_plots.py
|
hershalpandya/airshowerclassification_llhratio_test
|
a2a2ce5234c8f455fe56c332ab4fcc65008e9409
|
[
"MIT"
] | null | null | null |
icetray_version/trunk/resources/scripts/make_plots.py
|
hershalpandya/airshowerclassification_llhratio_test
|
a2a2ce5234c8f455fe56c332ab4fcc65008e9409
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# In[1]:
import numpy as np
get_ipython().magic(u'matplotlib inline')
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
import sys
sys.path.append('../../python/')
from general_functions import load_5D_PDF_from_file
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import tables
import glob
def plot_2d_hist(hist,xedges,yedges,
xlim,ylim,
xlabel='',ylabel='',title='',cmap='coolwarm',
vmin=1e-5,vmax=1e-1,same_plot=False,alpha=1.0):
hist=hist.T
hist=np.ma.masked_where(hist==0,hist)
#label='nentries: %i'%np.sum(hist)
if not same_plot:
plt.figure()#dpi=320)
plt.pcolormesh(xedges,yedges,hist,alpha=alpha,
cmap=cmap,norm=LogNorm(vmin=vmin,vmax=vmax))
#cbar=plt.colorbar()
#plt.scatter([2.0],[2],color=None,s=0,label=label)
#plt.legend()
plt.xlim(xlim)
plt.ylim(ylim)
#plt.xlabel(xlabel)
#plt.ylabel(ylabel)
#plt.title(title)
return plt
def plot_3dhist(bkg_hist,bincenters,azim,elev,outputname,vmin,vmax):
Q,T,R = np.meshgrid(bincenters[1],bincenters[0],bincenters[2])
c= bkg_hist/np.sum(bkg_hist)
Q=Q.T
T=T.T
R=R.T
c=c.T
#print np.shape(Q.T), np.shape(T.T), np.shape(R.T), np.shape(bkg_hist.T)
reshape_ = np.prod(np.shape(Q))
Q = Q.reshape(reshape_)
T = T.reshape(reshape_)
R = R.reshape(reshape_)
c= c.reshape(reshape_)
select=(c!=0)#&(np.random.rand(len(c))>0.5)
Q=Q[select]
T=T[select]
R=R[select]
c=np.log10(c[select])
alpha=np.ones_like(c)
alpha[c<-2]=0.70
alpha[c<-3]=0.60
alpha[c<-4]=0.50
alpha[c<-5]=0.40
alpha[c<-6]=0.30
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
cmap = cm.jet
m = cm.ScalarMappable(norm=norm, cmap=cmap)
c= m.to_rgba(c)
c.T[3]=alpha
fig=plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(R,T,Q,zdir='Q',c=c,s=30,edgecolors=c)
ax.azim = azim
ax.elev = elev
ax.set_xlabel('R')
ax.set_ylabel('Q')
ax.set_zlabel('T')
ax.set_xlim([0,3.5])
ax.set_ylim([-3.2,4])
ax.set_zlim([-5.2,4.2])
#fig.colorbar(myax)
fig.savefig(outputname,bbox_inches='tight')
plt.close()
return
def hist_2d_proj(hist3d,axis=0):
if axis==0:
axes=[0,1,2]
if axis==1:
axes=[1,0,2]
if axis==2:
axes=[2,0,1]
hist3d=np.transpose(hist3d,axes=axes)
proj_hist=np.zeros_like(hist3d[0])
print np.shape(proj_hist)
for i in range(len(hist3d)):
proj_hist += hist3d[i]
return proj_hist
def hist_1d_proj(hist2d,axis=0):
if axis==0:
axes=[0,1]
if axis==1:
axes=[1,0]
hist2d=np.transpose(hist2d,axes=axes)
proj_hist=np.zeros_like(hist2d[0])
print np.shape(proj_hist)
for i in range(len(hist2d)):
proj_hist += hist2d[i]
return proj_hist
def plot_2D_projected_hist(hist3d,edges,axis=0,
xlabel='',ylabel='',
event_overlay=False, event=None):
projected_hist = hist_2d_proj(hist3d,axis)
if axis==0:
xedges= edges[1]
yedges= edges[2]
if axis==1:
xedges= edges[0]
yedges= edges[2]
if axis==2:
xedges= edges[0]
yedges= edges[1]
xlim = [xedges[0]-0.25,xedges[-1]+0.25]
ylim = [yedges[0]-0.25,yedges[-1]+0.25]
projected_hist /=np.sum(projected_hist)
projected_hist = projected_hist.T
plot_2d_hist(projected_hist,yedges,xedges,ylim,xlim,xlabel,ylabel,cmap='jet')
if event_overlay:
xcenters=(xedges[:-1]+xedges[1:])/2.0
ycenters=(yedges[:-1]+yedges[1:])/2.0
xscatter=[]
yscatter=[]
zscatter=[]
for r,row in enumerate(hist_2d_proj(event,axis)):
for c,element in enumerate(row):
if element!=0:
xscatter.append(xcenters[r])
yscatter.append(ycenters[c])
zscatter.append(element)
xscatter=np.array(xscatter)
yscatter=np.array(yscatter)
zscatter=np.array(zscatter)
plt.scatter(yscatter,xscatter,marker='s',s=10*zscatter,edgecolor='k',facecolor='r',
alpha=0.6)
return
# In[3]:
sig_pdf_file='../../files/PDF_12360_0123x.hd5'
bkg_pdf_file='../../files/PDF_12362_0123x.hd5'
temp=load_5D_PDF_from_file(SigPDFFileName=sig_pdf_file, BkgPDFFileName=bkg_pdf_file)
sig_hist=temp[0]
bkg_hist=temp[1]
binedges=temp[2]
distinct_regions_binedges=temp[3]
labels=temp[4]
sig_n_events=temp[5]
bkg_n_events = temp[6]
# In[4]:
# find the logE and coszen bins select those bins in sig/bkg pdfs
logEbincenters = np.array((binedges[0][1:] + binedges[0][:-1] )/2.)
coszenbincenters = np.array((binedges[1][1:] + binedges[1][:-1] )/2.)
logE=-0.01
dE = np.absolute(logEbincenters - logE)
Ebin=np.where(np.amin(dE)==dE)[0][0]
coszen=0.96
dcZ = np.absolute(coszenbincenters - coszen)
cZbin = np.where(np.amin(dcZ)==dcZ)[0][0]
sig_hist_3dslice = sig_hist[Ebin][cZbin]
bkg_hist_3dslice = bkg_hist[Ebin][cZbin]
binedges_3dslice = binedges[2:]
# In[7]:
plot_2D_projected_hist(sig_hist_3dslice,binedges_3dslice,axis=2)
# In[27]:
sig_hdf_files=glob.glob('../../files/Events_12360_?x.hd5.hd5')
bkg_hdf_files=glob.glob('../../files/Events_12362_?x.hd5.hd5')
# In[30]:
def load_hdf_file(tfiles):
d={}
for tfile in tfiles:
f=tables.open_file(tfile)
for name in f.root.IceTopLLHR.colnames:
if tfile==tfiles[0]:
d[name]= eval('f.root.IceTopLLHR.cols.'+name+'[:]')
else:
d[name]=np.concatenate( (d[name],eval('f.root.IceTopLLHR.cols.'+name+'[:]')) )
if tfile==tfiles[0]:
d['log_s125']=np.log10(f.root.LaputopParams.cols.s125[:])
d['cos_zen']=np.cos(f.root.Laputop.cols.zenith[:])
else:
d['log_s125']=np.concatenate( (d['log_s125'],np.log10(f.root.LaputopParams.cols.s125[:])) )
d['cos_zen']=np.concatenate( (d['cos_zen'], np.cos(f.root.Laputop.cols.zenith[:])) )
return d
# In[31]:
llhr={}
llhr['sig']=load_hdf_file(sig_hdf_files)
llhr['bkg']=load_hdf_file(bkg_hdf_files)
# In[45]:
low_E=1.5
high_E=1.6
low_z=0.8
high_z=.85
for key in llhr.keys():
cut1=llhr[key]['isGood']==1.0
cut2=llhr[key]['tanks_have_nans']==0.
cut3=llhr[key]['log_s125']>=low_E
cut4=llhr[key]['log_s125']<high_E
cut5=llhr[key]['cos_zen']>=low_z
cut6=llhr[key]['cos_zen']<high_z
select=cut1&cut2&cut3&cut4&cut5&cut6
print len(select)
print len(select[select])
hist_this ='llh_ratio'
range=[-10,15]
bins=35
#hist_this='n_extrapolations_bkg_PDF'
#range=[0,20]
#bins=20
plt.hist(llhr[key][hist_this][select],range=range,bins=bins,label=key,histtype='step')
plt.legend()
# In[34]:
llhr['sig'].keys()
# In[2]:
def load_results_hist(tfile):
f=tables.open_file(tfile)
labels=f.root.labels[:]
nevents=f.root.n_events[:]
edges0=f.root.binedges_0[:]
edges1=f.root.binedges_1[:]
edges2=f.root.binedges_2[:]
hist=f.root.hist[:]
f.close()
return hist, [edges0,edges1,edges2], nevents,labels
# In[3]:
sig_hist, edges, sig_nevents, labels = load_results_hist('../../files/results_sig_Ezenllhr.hd5')
bkg_hist, edges, bkg_nevents, labels = load_results_hist('../../files/results_bkg_Ezenllhr.hd5')
# In[4]:
sig_onedhist=hist_2d_proj(sig_hist,axis=1)[3]
bkg_onedhist=hist_2d_proj(bkg_hist,axis=1)[3]
# In[5]:
plt.bar(edges[2][:-1],sig_onedhist,alpha=1.,label='rand')
plt.bar(edges[2][:-1],bkg_onedhist,alpha=0.3,label='data')
plt.yscale('log')
#plt.xlim([-1,1])
plt.legend()
# In[54]:
| 23.709091
| 103
| 0.61797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,112
| 0.142127
|
b12945ba640ad4a03105665c4e82e2d609d22997
| 3,171
|
py
|
Python
|
tests/test_vector.py
|
slode/triton
|
d440c510f4841348dfb9109f03858c75adf75564
|
[
"MIT"
] | null | null | null |
tests/test_vector.py
|
slode/triton
|
d440c510f4841348dfb9109f03858c75adf75564
|
[
"MIT"
] | null | null | null |
tests/test_vector.py
|
slode/triton
|
d440c510f4841348dfb9109f03858c75adf75564
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2013 Stian Lode
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import math
import fixtures
from triton.vector import Vector
from triton.vector3d import Vector3d
from triton.vector2d import Vector2d
from pytest import approx
def test_addition():
a = Vector2d(3,0)
b = Vector2d(0,2)
c = a + b
assert c == [3,2]
a += b
assert a ==c
d = 1 + a
assert d == [a.x+1, a.y+1]
def test_subtraction():
a = Vector2d(3,0)
b = Vector2d(0,2)
c = a - b
assert c == [3,-2]
a -= b
assert a ==c
d = 1 - a
assert d == [1 - a.x, 1 - a.y]
def test_multiplication():
a = Vector2d(3,1)
b = Vector2d(1,2)
c = a * b
assert c == [3,2]
a *= b
assert a ==c
d = -1 * a
assert d == [-1 * a.x, -1 * a.y]
def test_division():
a = Vector2d(3.0,1.0)
b = Vector2d(1.0,2.0)
c = a / b
assert c == [3, 0.5]
a /= b
assert a ==c
d = 1 / a
assert d == [1 / a.x, 1 / a.y]
def test_length():
a = Vector2d(3,0)
b = Vector2d(0,4)
assert (a-b).length() == approx(5)
def test_perp():
a = Vector2d(1,9)
b = a.perp()
assert b == [-9, 1]
c = a.dot(b)
assert c == 0
def test_eq():
a = Vector2d(3,2)
b = Vector2d(3,2)
assert a ==b
assert a == [3,2]
def test_normalize():
a = Vector2d(5,2);
a.normalize()
assert a.length() == 1
def test_angle():
a = Vector2d(4,4)
b = Vector2d(4,-4)
c = b.angle_diff(a)
assert c == approx(math.pi/2)
d = b.angle_diff(b)
assert d == approx(0)
e = Vector2d(-4, -4)
f = e.angle_diff(a)
assert f == approx(math.pi)
g = a.angle_diff(e)
assert g == approx(-math.pi)
h = a.angle()
assert h == approx(math.pi/4)
def test_normalize():
a = Vector2d(8,9)
b = a.unit_vector()
assert b.length() == 1
a.normalize()
assert a.length() == 1
def test_cross():
a = Vector3d(-0, 10, 0)
b = Vector3d(10, 0, 0)
c = b.cross(a)
assert c == [0, 0, 100]
d = a.cross(b)
assert d == [0, 0, -100]
a = Vector2d(-0, 10)
b = Vector2d(10, 0)
c = b.cross(a)
assert c == 100
| 24.022727
| 79
| 0.602964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,073
| 0.338379
|
b129d2583e5ec5edf4eaa2db0112f68dbe43bc35
| 3,912
|
py
|
Python
|
build.py
|
Lvue-YY/Lvue-YY
|
630b1ea5d4db9b5d9373d4e9dbbfa9f8fc9baf2e
|
[
"Apache-2.0"
] | 1
|
2020-07-28T15:48:06.000Z
|
2020-07-28T15:48:06.000Z
|
build.py
|
Lvue-YY/Lvue-YY
|
630b1ea5d4db9b5d9373d4e9dbbfa9f8fc9baf2e
|
[
"Apache-2.0"
] | null | null | null |
build.py
|
Lvue-YY/Lvue-YY
|
630b1ea5d4db9b5d9373d4e9dbbfa9f8fc9baf2e
|
[
"Apache-2.0"
] | null | null | null |
import httpx
import pathlib
import re
import datetime
from bs4 import BeautifulSoup
root = pathlib.Path(__file__).parent.resolve()
def formatGMTime(timestamp):
UTC_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
timeStr = datetime.datetime.strptime(timestamp, UTC_FORMAT) + datetime.timedelta(hours=2, minutes=30)
dateStr = timeStr.strftime("%Y-%m-%d")
return dateStr
def get_events():
events = httpx.get("https://api.github.com/users/Love-YY/events").json()[:5]
results = []
for event in events:
tempEvent = {}
if (event["type"] == "WatchEvent"):
tempEvent["action"] = "*starred*"
tempEvent["target"] = event["repo"]["name"]
tempEvent["time"] = formatGMTime(event["created_at"])
tempEvent["url"] = event["repo"]["url"].replace("api.", "").replace("repos/", "")
elif (event["type"] == "ReleaseEvent"):
tempEvent["action"] = "*released*"
tempEvent["target"] = event["payload"]["release"]["name"]
tempEvent["time"] = formatGMTime(event["payload"]["release"]["published_at"])
tempEvent["url"] = event["payload"]["release"]["html_url"]
elif (event["type"] == "PushEvent"):
tempEvent["action"] = "*pushed*"
tempEvent["target"] = event["repo"]["name"]
tempEvent["time"] = formatGMTime(event["created_at"])
tempEvent["url"] = event["payload"]["commits"][0]["url"].replace("api.", "").replace("repos/", "")
elif (event["type"] == "IssuesEvent"):
tempEvent["action"] = "*" + event["payload"]["action"] + " issue*"
tempEvent["target"] = event["repo"]["name"]
tempEvent["time"] = formatGMTime(event["created_at"])
tempEvent["url"] = event["payload"]["issue"]["url"].replace("api.", "").replace("repos/", "")
else:
tempEvent["action"] = "*" + event["type"].replace("Event", "").lower() + "d*"
tempEvent["target"] = event["repo"]["name"]
tempEvent["time"] = formatGMTime(event["created_at"])
tempEvent["url"] = event["repo"]["url"].replace("api.", "").replace("repos/", "")
results.append(tempEvent)
return results
def get_blogs():
html = httpx.get("https://www.flynoodle.xyz/blog/").text
soup = BeautifulSoup(html, "html.parser")
soup_all = soup.find_all("div", class_="abstract-item")[:5]
results = []
for item in soup_all:
temp = {}
temp["title"] = item.find("div", class_="title").get_text()
temp["url"] = "https://www.flynoodle.xyz" + item.find("a").get("href")
temp["date"] = item.find("i", class_="reco-date").find("span").get_text()
results.append(temp)
return results
def replace_chunk(content, marker, chunk, inline=False):
r = re.compile(
r"<!\-\- {} starts \-\->.*<!\-\- {} ends \-\->".format(marker, marker),
re.DOTALL,
)
if not inline:
chunk = "\n{}\n".format(chunk)
chunk = "<!-- {} starts -->{}<!-- {} ends -->".format(marker, chunk, marker)
return r.sub(chunk, content)
if __name__ == "__main__":
readme = root / "README.md"
readme_contents = readme.open().read()
events = get_events()
events_md = "\n".join(
["* {action} <a href={url} target='_blank'>{target}</a> - {time}".format(**item) for item in events]
)
rewritten = replace_chunk(readme_contents, "event", events_md)
entries = get_blogs()
blogs_md = "\n".join(
["* <a href={url} target='_blank'>{title}</a> - {date}".format(**entry) for entry in entries]
)
rewritten = replace_chunk(rewritten, "blog", blogs_md)
time = (datetime.datetime.now() + datetime.timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S')
time_md = "Automatically updated on " + time
rewritten = replace_chunk(rewritten, "time", time_md)
readme.open("w").write(rewritten)
| 37.615385
| 110
| 0.57362
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,146
| 0.292945
|
b12c849d2ef4e720802c1f093c8c0678dd35a0b0
| 1,061
|
py
|
Python
|
app/models/news_article_test.py
|
engineer237/News-application
|
66d7e8d70c5c023292dea4f5b87bd11ab5fb102e
|
[
"MIT"
] | null | null | null |
app/models/news_article_test.py
|
engineer237/News-application
|
66d7e8d70c5c023292dea4f5b87bd11ab5fb102e
|
[
"MIT"
] | null | null | null |
app/models/news_article_test.py
|
engineer237/News-application
|
66d7e8d70c5c023292dea4f5b87bd11ab5fb102e
|
[
"MIT"
] | null | null | null |
import unittest # module for testing
from models import news_article
class TestNewsArticles(unittest.TestCase):
'''
TestNewsArticles for testing class news_articles
'''
def setUp(self):
'''
Set up method to run before each test cases.
'''
# id, title ,poster, url_link, description, published_date, content
self.new_article = news_article.News_Article(
'123',
'BBc News',
"https://www.independent.co.uk/news/world/americas/miss-usa-2019-cheslie-kryst-death-b2003891.html",
"A woman who jumped to her death from a New York City high rise apartment building has been identified as former Miss USA Cheslie Kryst.\r\nThe 2019 pageant winner who had a ninth floor apartment in Man… [+2506 chars]",
"2022-01-31T05:07:00Z",
"With a calm that belied their underdog status, the Bengals intercepted Patrick Mahomes and completed a field-goal drive in overtime to end Kansas City’s streak of Super Bowl appearances.")
if __name__ == "__main__":
unittest.main()
| 48.227273
| 227
| 0.697455
| 947
| 0.889202
| 0
| 0
| 0
| 0
| 0
| 0
| 775
| 0.7277
|
b12dc2d34aac9627697ee3968231db8487e21dff
| 2,216
|
py
|
Python
|
samples/at.bestsolution.framework.grid.personsample.model/utils/datafaker.py
|
BestSolution-at/framework-grid
|
cdab70e916e20a1ce6bc81fa69339edbb34a2731
|
[
"Apache-2.0"
] | 4
|
2015-01-19T11:35:38.000Z
|
2021-05-20T04:31:26.000Z
|
samples/at.bestsolution.framework.grid.personsample.model/utils/datafaker.py
|
BestSolution-at/framework-grid
|
cdab70e916e20a1ce6bc81fa69339edbb34a2731
|
[
"Apache-2.0"
] | 3
|
2015-01-22T10:42:51.000Z
|
2015-02-04T13:06:56.000Z
|
samples/at.bestsolution.framework.grid.personsample.model/utils/datafaker.py
|
BestSolution-at/framework-grid
|
cdab70e916e20a1ce6bc81fa69339edbb34a2731
|
[
"Apache-2.0"
] | 3
|
2015-01-15T09:45:13.000Z
|
2016-03-08T11:29:58.000Z
|
#! /usr/bin/env python3
import sys
import random
import os
from faker import Factory as FFactory
OUTFILE = "samples.xmi"
NUM_SAMPLES = 10
NUM_COUNTRIES = 4
TEMPLATE = """<?xml version="1.0" encoding="ASCII"?>
<person:Root
xmi:version="2.0"
xmlns:xmi="http://www.omg.org/XMI"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:person="http://www.bestsolution.at/framework/grid/personsample/1.0"
xsi:schemaLocation="http://www.bestsolution.at/framework/grid/personsample/1.0 ../model/Person.xcore#/EPackage">
{0}
</person:Root>
"""
TEMPLATE_COUNTRY = """<countries name="{0}"/>"""
TEMPLATE_PERSON = """<persons firstname="{0}"
lastname="{1}"
gender="{2}"
married="{3}"
birthdate="{4}">
<address
street="{5}"
number="{6}"
zipcode="{7}"
city="{8}"
country="//@countries.{9}"/>
</persons>
"""
COUNTRIES = []
PERSONS = []
def fake_xmi():
faker = FFactory.create()
for i in range(NUM_SAMPLES):
PERSONS.append(
TEMPLATE_PERSON.format(
faker.first_name(),
faker.last_name(),
"MALE" if faker.boolean() is True else "FEMALE",
faker.boolean(),
faker.iso8601(),
faker.street_name(),
faker.building_number(),
faker.postcode(),
faker.city(),
random.randint(0, NUM_COUNTRIES - 1)
)
)
for i in range(NUM_COUNTRIES):
COUNTRIES.append(
TEMPLATE_COUNTRY.format(faker.country())
)
with open(OUTFILE, "w") as text_file:
text_file.write(
TEMPLATE.format(
os.linesep.join(
[os.linesep.join(PERSONS), os.linesep.join(COUNTRIES)]
)
)
)
if __name__ == "__main__":
if "-n" in sys.argv:
position_param = sys.argv.index("-n")
NUM_SAMPLES = int(sys.argv[position_param + 1])
sys.argv.pop(position_param)
sys.argv.pop(position_param)
if len(sys.argv) > 1:
OUTFILE = sys.argv.pop()
print("Writing samples to {0}.".format(OUTFILE))
fake_xmi()
| 24.622222
| 116
| 0.553249
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 776
| 0.350181
|
b12e9ab06bf81720fa6f6bbe4f8fd67e00e19bb0
| 977
|
py
|
Python
|
tests/test_utility.py
|
ericbdaniels/pygeostat
|
94d9cba9265945268f08302f86ce5ba1848fd601
|
[
"MIT"
] | null | null | null |
tests/test_utility.py
|
ericbdaniels/pygeostat
|
94d9cba9265945268f08302f86ce5ba1848fd601
|
[
"MIT"
] | null | null | null |
tests/test_utility.py
|
ericbdaniels/pygeostat
|
94d9cba9265945268f08302f86ce5ba1848fd601
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
__author__ = 'pygeostat development team'
__date__ = '2020-01-04'
__version__ = '1.0.0'
import os, sys
try:
import pygeostat as gs
except ImportError:
sys.path.append(os.path.abspath(os.path.join(os.path.dirname( __file__ ), r'..')))
import pygeostat as gs
import unittest
import warnings
import subprocess
class GetExecutableTest(unittest.TestCase):
'''
Test suite for loading executable files from a protected repository
'''
def setUp(self):
self.wrong_token = 'wrong_token'
def test_token_error(self):
'''
Test if the proper error handleing is in place for wrong access token
'''
with self.assertRaises(Exception):
gs.get_executable(access_token=self.wrong_token)
if __name__ == '__main__':
subprocess.call([sys.executable, '-m', 'unittest', str(__file__), '-v'])
| 22.204545
| 86
| 0.684749
| 439
| 0.449335
| 0
| 0
| 0
| 0
| 0
| 0
| 313
| 0.320368
|
b12fefdc2ed55826f47db62ac7208620f95060a4
| 10,654
|
py
|
Python
|
rockets/rocket.py
|
rsewell97/open-starship
|
ecb5f848b8ce2d7119defec0960b6ccdc176a9db
|
[
"Unlicense"
] | null | null | null |
rockets/rocket.py
|
rsewell97/open-starship
|
ecb5f848b8ce2d7119defec0960b6ccdc176a9db
|
[
"Unlicense"
] | null | null | null |
rockets/rocket.py
|
rsewell97/open-starship
|
ecb5f848b8ce2d7119defec0960b6ccdc176a9db
|
[
"Unlicense"
] | null | null | null |
import time
import multiprocessing as mp
import numpy as np
from scipy.spatial.transform import Rotation
from world import Earth
class Rocket(object):
def __init__(self, planet=Earth()):
self.planet = planet
self.propellant_mass = 1e6
self.dry_mass = 40e3
self.max_thrust = 2e5 * 2
self.Isp = 350
self.fuel_level = 0.08
self.radius = 3.5
self.height = 40
self.max_v_gimbal = 30
self.max_gimbal = 20
self.min_throttle = 0
self.reenters_engine_first = True
self.rated_for_human_reentry = False
# IN WORLD COORDINATES
self.pos = np.array([0, self.planet.R, 0])
self.vel = np.zeros(3)
self.rot = Rotation.identity()
self.drot = np.zeros(3)
# IN LOCAL COORDINATES
self.acc = np.zeros(3)
self.ddrot = np.zeros(3)
self.gimbal = np.zeros(3) # y, z, x
self.v_gimbal = np.zeros(3) # y, z, x
self.clock = 0
self.plotter = None
if self.planet.is3D:
self.control_vec = np.zeros(3)
else:
self.control_vec = np.zeros(2)
@property
def mass(self):
return self.dry_mass + self.fuel_level*self.propellant_mass
@property
def inertia(self):
# assume Ixx is uniform disc lamina
Ixx = 0.5*self.mass*self.radius**2
# assume Iyy is uniform rod
Iyy = self.mass*(self.height**2) / 12
# likewise for Izz
Izz = Iyy
# assume no gyration effects
return np.array([Ixx, Iyy, Izz])
@property
def finenessRatio(self):
return self.height/self.radius/2
@property
def deltaV(self):
return self.Isp * np.log(self.mass/self.dry_mass)
@property
def altitude(self):
return self.orbitalDist - self.planet.R
@property
def latitude(self):
return np.arcsin(self.pos[2] / self.orbitalDist) * 180/np.pi
@property
def longitude(self):
lon = np.arcsin(self.pos[1] / np.linalg.norm(self.pos[:2])) * 180/np.pi
if self.pos[0] >= 0:
return lon
else:
if lon > 0:
return 180-lon
else:
return -180+lon
@property
def horizonAxes(self):
y = self.pos/self.orbitalDist # radial axis
z = -self.orbitalAngMomentum/np.linalg.norm(self.orbitalAngMomentum) #norm to orbital plane
x = np.cross(y, z) # forward horizon dir
return np.array([x, y, z])
@property
def localAxes(self):
return self.toWorld(np.eye(3))
@property
def eulerAngles(self):
l = self.toLocal(self.horizonAxes)
return Rotation.from_matrix(l).as_euler('yzx', degrees=True)
@property
def heading(self):
return self.localAxes[0]
@property
def attackAngle(self):
# in range 0 -> pi
return np.arccos(np.clip(np.dot(self.vel/self.speed, self.heading), -1, 1))
@property
def speed(self):
return np.linalg.norm(self.vel)
@property
def machNum(self):
return self.speed / self.planet.c
@property
def Gs(self):
return np.linalg.norm(self.acc) / self.planet.g(self.altitude)
@property
def radialSpeed(self):
return np.dot(self.vel, self.horizonAxes[1])
@property
def tangentialSpeed(self):
return np.linalg.norm(self.tangentialVel)
@property
def tangentialVel(self):
return np.cross(self.horizonAxes[1], self.orbitalAngMomentum/(self.mass*self.orbitalDist))
@property
def orbitalAngMomentum(self):
return np.cross(self.pos, self.vel*self.mass)
@property
def orbitalDist(self):
return np.linalg.norm(self.pos)
@property
def currentTraj(self):
b = 2*self.radialSpeed / self.planet.g(self.altitude)
c = -2*self.altitude / self.planet.g(self.altitude)
pos_time_to_hit_ground = (-b + (b**2 - 4*c) ** 0.5) / 2
neg_time_to_hit_ground = (-b - (b**2 - 4*c) ** 0.5) / 2
ts = np.linspace(2*neg_time_to_hit_ground, 2 *
pos_time_to_hit_ground, 100)
ts = np.expand_dims(ts, 1)
w = self.radialSpeed * self.horizonAxes[1]
x = 0.5*self.planet.g(self.altitude) * self.horizonAxes[1] * 1.4
traj = np.apply_along_axis(lambda t: self.pos + (w+self.tangentialVel)*t - x*t**2, 1, ts)
return traj
@property
def hullTemperature(self):
# TODO
return None
@property
def states(self):
horizon_ang_vel = self.drot + self.toLocal(self.orbitalAngMomentum/(self.mass*self.orbitalDist**2))
return [
self.altitude, self.radialSpeed, self.tangentialSpeed,
self.eulerAngles.ravel(), horizon_ang_vel.ravel(), self.gimbal.ravel()
]
def calcUpdate(self, dt, gimbal_cntrl=[5, 0], throttle=0.):
# perform control actions and calculate rocket mechanics
# assume rocket is modelled as a featureless cylinder
# TRANSFORM INTO LOCAL COORDINATES
x,y,z = np.eye(3)
v = self.toLocal(self.vel)
# FORCES
# thrust
gimbal_cntrl = np.deg2rad(gimbal_cntrl)
gimbal = Rotation.from_rotvec([0, gimbal_cntrl[0], gimbal_cntrl[1]])
thrust = gimbal.apply(throttle * self.max_thrust * x)
self.fuel_level -= np.linalg.norm(thrust) / (self.Isp * self.planet.g(self.altitude) * self.propellant_mass)
# aero (cylinder)
common_factor = 0.5 * self.planet.rho(self.altitude) * self.speed**2
drag = -x*0.62*common_factor* np.pi*self.radius**2 * np.cos(self.attackAngle)
# lift is perpendicular to ship, calculated using eqn (32): https://apps.dtic.mil/dtic/tr/fulltext/u2/603829.pdf
lift_normal = self.normalised(np.cross(x, np.cross(x, v)))
lift = lift_normal*0.1*common_factor* 2*self.radius*self.height * np.sin(self.attackAngle)**3
# TORQUES
# aero restoring torque
restoring_axis = self.normalised(np.cross(x, v))
aero_torque = 0.05 * self.height/8 * common_factor * restoring_axis * self.finenessRatio
# gimballing torque
thrust_torque = np.cross(-self.height*x/2, thrust)
force_sum = drag + lift + np.dot(thrust, x)
torque_sum = thrust_torque + aero_torque # vec
self.acc = force_sum / self.mass # local coord
self.ddrot = torque_sum / self.inertia # local coord
def _update(self, dt):
self.calcUpdate(dt)
self.update(dt)
grav = self.planet.g(self.altitude) * self.pos/self.orbitalDist
self.vel = self.vel + (self.toWorld(self.acc)-grav)*dt
self.pos = self.pos + self.vel*dt
self.drot = self.drot + self.ddrot*dt
upd = Rotation.from_rotvec(self.drot*dt)
self.rot = Rotation.from_matrix(upd.apply(self.rot.as_matrix()))
self.clock += dt
if self.plotter is not None:
to_send = {}
for k, v in self.plotting_info.items():
if type(v) == list:
to_send[k] = [getattr(self, i) for i in v]
else:
to_send[k] = getattr(self, v)
self.plotter.update(to_send)
def update(self, dt):
# base function to add specific rocket dynamics: modiry self.acc and self.ddrot here
pass
def runSim(self, dt=1, T=None):
if T is None:
T = np.inf
prev_time = time.time()
while self.speed != 0:
self._update(dt)
if not self.failConditions():
break
elif self.clock > T:
break
print('\r', end='')
print(f'Simulation speed: {dt/(time.time()-prev_time):.1f}x, update freq: {1/(time.time()-prev_time):.1f}Hz', end='', flush=True)
prev_time = time.time()
def startAtApoapsis(self, apoapsis=100e3, periapsis=-20e5, inclination=0):
apoapsis += self.planet.R
periapsis += self.planet.R
a = (apoapsis + periapsis) / 2
# e = ((apoapsis - a) + (periapsis - a)) / (2*a)
v = (self.planet.mu*(2/apoapsis - 1/a)) ** 0.5
inclination = np.deg2rad(inclination)
self.pos = np.array([0, apoapsis*np.cos(inclination), apoapsis*np.sin(inclination)])
self.vel = np.cross(self.pos, np.array([0, -np.sin(inclination), np.cos(inclination)]))
self.vel = self.vel/self.speed * v
self.rot = Rotation.from_euler('yzx', [self.reenters_engine_first*np.pi,
0, -np.pi/2+inclination])
world_ang_vel = self.orbitalAngMomentum/(self.mass*self.orbitalDist**2)
self.drot = -self.toLocal(world_ang_vel)
def startFreefall(self, altitude):
self.pos = np.array([0, altitude+self.planet.R, 0])
self.vel = np.array([0, -0.1, 0.1])
self.rot = Rotation.from_euler('yzx', [self.reenters_engine_first*np.pi/2 + 0.01,
-np.pi/2, 0]).as_quat()
self.drot = np.zeros(3)
def startAt(self, pos, vel):
self.pos = np.asarray(pos)
self.vel = np.asarray(vel)
def typicalEntry(self):
raise NotImplementedError("This will be added in a future update")
def failConditions(self):
# _tmp = abs(self.reenters_engine_first*np.pi - self.attackAngle)
# if _tmp > 2*np.pi/3 and self.speed > 3000:
# print('\nFlamey end is not pointing down')
# return False # pointing wrong-end-first
# if self.speed > 7900:
# print('\nYou made a meteor')
# return False # nothing has survived reentry at this speed
# if self.Gs > 8 and self.rated_for_human_reentry:
# print(f'\n{self.Gs:.1f}G - congrats everyone is unconscious')
# return False # too much G force for occupants to handle
if self.altitude <= 0 and self.speed > 2:
print(f'\nRUD on landing pad {self.speed:.1f}m/s')
return False # crash land
return True
def attachPlotter(self, obj):
self.plotter = obj
self.plotting_info = self.plotter.reqDataFormat()
def toLocal(self, vec):
return self.rot.inv().apply(vec)
def toWorld(self, vec):
return self.rot.apply(vec)
@staticmethod
def normalised(v):
n = np.sqrt(np.dot(v,v))
if n == 0:
return np.zeros_like(v)
else:
return v / np.sqrt(np.dot(v,v))
| 33.71519
| 141
| 0.583255
| 10,521
| 0.987516
| 0
| 0
| 3,766
| 0.353482
| 0
| 0
| 1,488
| 0.139666
|
b1311d08ef54f651d8ccb73e1a63e7ab49ee598f
| 868
|
py
|
Python
|
examples/complex/tcp_message.py
|
0x7c48/mitmproxy
|
f9d8f3bae3f4e681d5f4d406b7e06b099e60ecba
|
[
"MIT"
] | 74
|
2016-03-20T17:39:26.000Z
|
2020-05-12T13:53:23.000Z
|
examples/complex/tcp_message.py
|
0x7c48/mitmproxy
|
f9d8f3bae3f4e681d5f4d406b7e06b099e60ecba
|
[
"MIT"
] | 7
|
2020-06-16T06:35:02.000Z
|
2022-03-15T20:15:53.000Z
|
examples/complex/tcp_message.py
|
0x7c48/mitmproxy
|
f9d8f3bae3f4e681d5f4d406b7e06b099e60ecba
|
[
"MIT"
] | 5
|
2016-12-14T14:56:57.000Z
|
2020-03-08T20:58:31.000Z
|
"""
tcp_message Inline Script Hook API Demonstration
------------------------------------------------
* modifies packets containing "foo" to "bar"
* prints various details for each packet.
example cmdline invocation:
mitmdump --rawtcp --tcp-host ".*" -s examples/complex/tcp_message.py
"""
from mitmproxy.utils import strutils
from mitmproxy import ctx
from mitmproxy import tcp
def tcp_message(flow: tcp.TCPFlow):
message = flow.messages[-1]
old_content = message.content
message.content = old_content.replace(b"foo", b"bar")
ctx.log.info(
"[tcp_message{}] from {} to {}:\n{}".format(
" (modified)" if message.content != old_content else "",
"client" if message.from_client else "server",
"server" if message.from_client else "client",
strutils.bytes_to_escaped_str(message.content))
)
| 31
| 68
| 0.644009
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 386
| 0.4447
|
b1318eb081bf81d3b2433e9aac0b4bedfc511b35
| 186
|
py
|
Python
|
notes/notebook/apps.py
|
spam128/notes
|
100008b7e0a2afa5677c15826588105027f52883
|
[
"MIT"
] | null | null | null |
notes/notebook/apps.py
|
spam128/notes
|
100008b7e0a2afa5677c15826588105027f52883
|
[
"MIT"
] | null | null | null |
notes/notebook/apps.py
|
spam128/notes
|
100008b7e0a2afa5677c15826588105027f52883
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class NotebookConfig(AppConfig):
name = "notes.notebook"
verbose_name = _("Notebook")
| 20.666667
| 54
| 0.763441
| 93
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 26
| 0.139785
|
b1319080e17c411506273e715ba06f2cae72f330
| 409
|
py
|
Python
|
tests/unit/test_app_init.py
|
isabella232/typeseam
|
3e9d090ec84f2110ae69051364bb0905feb2f02c
|
[
"BSD-3-Clause"
] | 2
|
2016-02-02T01:14:33.000Z
|
2016-04-22T03:45:50.000Z
|
tests/unit/test_app_init.py
|
codeforamerica/typeseam
|
3e9d090ec84f2110ae69051364bb0905feb2f02c
|
[
"BSD-3-Clause"
] | 114
|
2015-12-21T23:57:01.000Z
|
2016-08-18T01:47:31.000Z
|
tests/unit/test_app_init.py
|
isabella232/typeseam
|
3e9d090ec84f2110ae69051364bb0905feb2f02c
|
[
"BSD-3-Clause"
] | 2
|
2016-01-21T09:22:02.000Z
|
2021-04-16T09:49:56.000Z
|
from unittest import TestCase
from unittest.mock import Mock, patch
from typeseam.app import (
load_initial_data,
)
class TestModels(TestCase):
@patch('typeseam.app.os.environ.get')
def test_load_initial_data(self, env_get):
ctx = Mock(return_value=Mock(
__exit__=Mock(),
__enter__=Mock()))
app = Mock(app_context=ctx)
load_initial_data(app)
| 24.058824
| 46
| 0.662592
| 282
| 0.689487
| 0
| 0
| 249
| 0.608802
| 0
| 0
| 29
| 0.070905
|
b1334d852e2065801f7e2f8ab3a80a2b0c5761be
| 2,090
|
py
|
Python
|
execution/execution.py
|
nafetsHN/environment
|
46bf40e5b4bdf3259c5306497cc70c359ca197d2
|
[
"MIT"
] | null | null | null |
execution/execution.py
|
nafetsHN/environment
|
46bf40e5b4bdf3259c5306497cc70c359ca197d2
|
[
"MIT"
] | null | null | null |
execution/execution.py
|
nafetsHN/environment
|
46bf40e5b4bdf3259c5306497cc70c359ca197d2
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('../')
from abc import ABCMeta, abstractmethod
# https://www.python-course.eu/python3_abstract_classes.php
import logging
import oandapyV20
from oandapyV20 import API
import oandapyV20.endpoints.orders as orders
from oandapyV20.contrib.requests import MarketOrderRequest
class ExecutionHandler(object):
"""
Provides an abstract base class to handle all execution in the backtesting
and live trading system.
"""
__metaclass__ = ABCMeta
@abstractmethod
def execute_order(self):
"""
Send the order to the brokerage
"""
raise NotImplementedError("Should implement execute_order()")
class SimulatedExecution(object):
"""
Provides a simulated execution handling environment. This class actually
does nothing - it simply receives and order to execute.
Instead, the Portfolio object actually provides fill handling. This will
be modified in later versions.
"""
def execute_order(self, event):
pass
class OANDAExecutionHandler(ExecutionHandler):
def __init__(self, domain, access_token, account_id):
self.domain = domain
self.access_token = access_token
self.account_id = account_id
self.client = self.create_OADAN_client()
self.logger = logging.getLogger(__name__)
def create_OADAN_client(self):
return API(self.access_token)
def execute_order(self, event):
print("execute order")
instrument = "%s_%s" % (event.instrument[:3], event.instrument[3:])
units = event.units
#Market order
mo = MarketOrderRequest(instrument=instrument, units=units)
print(mo)
# Create order request
request = orders.OrderCreate(self.account_id, data=mo.data)
print(request)
# perform the request
rv = self.client.request(request)
print(rv)
self.logger.debug(rv)
| 29.43662
| 79
| 0.635407
| 1,745
| 0.834928
| 0
| 0
| 183
| 0.08756
| 0
| 0
| 631
| 0.301914
|
b133b22a086276eadb705450f1bd4e54352efb5b
| 3,360
|
py
|
Python
|
conda/update_versions.py
|
PicoJr/StereoPipeline
|
146110a4d43ce6cb5e950297b8dca3f3b5e3f3b4
|
[
"Apache-2.0"
] | 323
|
2015-01-10T12:34:24.000Z
|
2022-03-24T03:52:22.000Z
|
conda/update_versions.py
|
PicoJr/StereoPipeline
|
146110a4d43ce6cb5e950297b8dca3f3b5e3f3b4
|
[
"Apache-2.0"
] | 252
|
2015-07-27T16:36:31.000Z
|
2022-03-31T02:34:28.000Z
|
conda/update_versions.py
|
PicoJr/StereoPipeline
|
146110a4d43ce6cb5e950297b8dca3f3b5e3f3b4
|
[
"Apache-2.0"
] | 105
|
2015-02-28T02:37:27.000Z
|
2022-03-14T09:17:30.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
'''
Use dependency versions from a conda enviornment .yaml file to update
a recipe/meta.yaml file of a given package. Such an input file can
be created from the given environment with:
conda env export > myenv.yaml
'''
import sys, os, re
if len(sys.argv) < 3:
print("Usage: " + os.path.basename(sys.argv[0]) + " input.yaml mypackage-feedstock")
sys.exit(1)
inFile = sys.argv[1]
outDir = sys.argv[2]
outFile = outDir + "/recipe/meta.yaml"
if not os.path.exists(outFile):
print("Cannot open file: " + outFile)
sys.exit(1)
# parse the versions from the conda env
conda_env = {}
print("Reading: " + inFile)
inHandle = open(inFile, 'r')
lines = inHandle.readlines()
for line in lines:
# Wipe comments
m = re.match('^(.*?)\#', line)
if m:
line = m.group(1)
# Match the package
m = re.match('^\s*-\s*(.*?)\s*=+\s*(.*?)(=|\s|$)', line)
if not m:
continue
package = m.group(1)
version = m.group(2)
if re.match('^\s*$', package):
continue # ignore empty lines
conda_env[package] = version
#print("got ", package, version)
# Update the lines in the output ile
outHandle = open(outFile, 'r')
lines = outHandle.readlines()
for it in range(len(lines)):
line = lines[it]
# Ignore comments
m = re.match('^\#', line)
if m:
continue
# Match the package
m = re.match('^(\s+-[\t ]+)([^\s]+)(\s*)(.*?)$', line)
if not m:
continue
pre = m.group(1)
package = m.group(2)
spaces = m.group(3).rstrip("\n")
old_version = m.group(4).rstrip("\n")
if spaces == "":
# Ensure there's at least one space
spaces = " "
if old_version == "":
# If there was no version before, don't put one now
continue
if not package in conda_env:
continue
version = conda_env[package]
if old_version != version:
if ('[linux]' in old_version) or ('[osx]' in old_version):
# In this case the user better take a closer look
print("For package " + package + ", not replacing " +
old_version + " with " + version + ", a closer look is suggested.")
else:
print("For package " + package + ", replacing version "
+ old_version + " with " + version)
lines[it] = pre + package + spaces + version + ".\n"
# Save the updated lines to disk
print("Updating: " + outFile)
outHandle = open(outFile, "w")
outHandle.writelines(lines)
outHandle.close()
| 29.734513
| 88
| 0.622024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,743
| 0.51875
|
b133ecf4dd2609e5dbd8da4502d3368bb3abe2c9
| 172
|
py
|
Python
|
test.py
|
uuidd/SimilarCharacter
|
22e5f4b0b2798d903435aeb989ff2d0a4ad59d70
|
[
"MIT"
] | 199
|
2019-09-09T08:44:19.000Z
|
2022-03-24T12:42:04.000Z
|
test.py
|
uuidd/SimilarCharacter
|
22e5f4b0b2798d903435aeb989ff2d0a4ad59d70
|
[
"MIT"
] | 4
|
2020-08-06T08:03:28.000Z
|
2022-01-06T15:14:36.000Z
|
test.py
|
uuidd/SimilarCharacter
|
22e5f4b0b2798d903435aeb989ff2d0a4ad59d70
|
[
"MIT"
] | 58
|
2019-10-10T06:56:43.000Z
|
2022-03-21T02:58:01.000Z
|
import cv2
import ProcessWithCV2
img1 = cv2.imread("D:/py/chinese/7.png")
img2 = cv2.imread("D:/py/chinese/8.png")
a = ProcessWithCV2.dHash(img1, img2, 1)
print(a)
| 21.5
| 41
| 0.686047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 42
| 0.244186
|
b134a6803ce8be92cdcf0e2af682a4bd189585d7
| 3,782
|
py
|
Python
|
scripts/common/alignments.py
|
SilasK/genome_sketch
|
83366703669d749957e1935d6794b93023ed063d
|
[
"MIT"
] | 1
|
2021-03-26T11:41:55.000Z
|
2021-03-26T11:41:55.000Z
|
scripts/common/alignments.py
|
SilasK/FastDrep
|
83366703669d749957e1935d6794b93023ed063d
|
[
"MIT"
] | null | null | null |
scripts/common/alignments.py
|
SilasK/FastDrep
|
83366703669d749957e1935d6794b93023ed063d
|
[
"MIT"
] | null | null | null |
import pandas as pd
import os
MINIMAP_HEADERS = [
"Contig2",
"Length2",
"Start2",
"End2",
"Strand",
"Contig1",
"Length1",
"Start1",
"End1",
"Nmatches",
"Allength",
"Quality",
]
MINIMAP_DATATYPES = [str, int, int, int, str, str, int, int, int, int, int, int]
assert len(MINIMAP_HEADERS) == len(MINIMAP_DATATYPES)
minimap_dtypes_map = {"i": int, "f": float}
def parse_minimap_tag(tag, out={}):
name, dtype, value = tag.split(":")
dtype = minimap_dtypes_map.get(dtype, str)
out[name] = dtype(value)
def parse_minimap_line(line):
"""parses a minmap paf line, return a dict.
reads tags and converts datatyes"""
elements = line.strip().split()
out = {}
if not len(elements) == 0:
try:
for i, h in enumerate(MINIMAP_HEADERS):
dtype = MINIMAP_DATATYPES[i]
out[h] = dtype(elements[i])
for i in range(len(MINIMAP_HEADERS), len(elements)):
parse_minimap_tag(elements[i], out)
except Exception as e:
raise IOError(f"Error during parsing paf line : {elements}") from e
return out
def load_paf(paf_file):
try:
parsed = []
with open(paf_file) as f:
for line in f:
parsed.append(parse_minimap_line(line))
M = pd.DataFrame(parsed).dropna(how="all")
# some values are 0.0000, some are negative
M["Identity"] = 1 - M.de.clip(
0, 1
) # .replace('0.0000',0.00005).astype(float).apply(lambda d: max(d,0))
headers = MINIMAP_HEADERS + ["Identity"]
# rearange headers
M = M.loc[:, headers + list(M.columns.drop(headers))]
except Exception as e:
raise IOError(f"Error during parsing paf file: {paf_file}") from e
return M
def parse_paf_files(paf_files, genome_stats_file, output_file):
stats = pd.read_csv(genome_stats_file, index_col=0, sep="\t")
with open(output_file, "w") as out:
out.write(
"\t".join(
[
"genome1",
"genome2",
"Identity",
"Length",
"Length_at99id",
"Length_at95id",
"Id_at90length",
"Id_at50length",
"Coverage",
"Coverage95",
"Coverage99",
]
)
+ "\n"
)
for paf_file in paf_files:
M = load_paf(paf_file)
M.sort_values("Identity", inplace=True, ascending=False)
genome1, genome2 = os.path.splitext(paf_file)[0].split(os.path.sep)[-2:]
Identity = (M.Identity * M.Allength).sum() / M.Allength.sum()
Length = M.Allength.sum()
min_genome_size = stats.loc[[genome1, genome2], "Length"].min()
AlignedFraction = Length / min_genome_size
Length_at99id = M.query("Identity>=0.99").Allength.sum()
Length_at95id = M.query("Identity>=0.95").Allength.sum()
AlignedFraction95 = Length_at95id / min_genome_size
AlignedFraction99 = Length_at99id / min_genome_size
Id_at90length = (
M.loc[0.9 * M.Allength.sum() <= M.Allength.cumsum()].iloc[0].Identity
)
Id_at50length = (
M.loc[0.5 * M.Allength.sum() <= M.Allength.cumsum()].iloc[0].Identity
)
out.write(
f"{genome1}\t{genome2}\t{Identity}\t{Length}\t"
f"{Length_at99id}\t{Length_at95id}\t{Id_at90length}\t{Id_at50length}\t"
f"{AlignedFraction}\t{AlignedFraction95}\t{AlignedFraction99}\n"
)
| 26.447552
| 87
| 0.536489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 813
| 0.214966
|
b13523d49b7c54fc6f8d9d277610505b22619edf
| 961
|
py
|
Python
|
python/sprint1_nonfinals/l.py
|
tu2gin/algorithms-templates
|
14267819a11d36ee9125009b05049334bfdcec2a
|
[
"MIT"
] | null | null | null |
python/sprint1_nonfinals/l.py
|
tu2gin/algorithms-templates
|
14267819a11d36ee9125009b05049334bfdcec2a
|
[
"MIT"
] | null | null | null |
python/sprint1_nonfinals/l.py
|
tu2gin/algorithms-templates
|
14267819a11d36ee9125009b05049334bfdcec2a
|
[
"MIT"
] | null | null | null |
# L. Лишняя буква
# Васе очень нравятся задачи про строки, поэтому он придумал свою.
# Есть 2 строки s и t, состоящие только из строчных букв. Строка
# t получена перемешиванием букв строки s и добавлением 1 буквы в
# случайную позицию. Нужно найти добавленную букву.
# Формат ввода
# На вход подаются строки s и t, разделённые переносом строки.
# Длины строк не превосходят 1000 символов. Строки не бывают пустыми.
# Формат вывода
# Выведите лишнюю букву.
from typing import Tuple
def get_excessive_letter(shorter: str, longer: str) -> str:
list_1 = list(shorter)
list_2 = list(longer)
for i in range(0,len(list_1)):
if list_1[i] in list_2:
list_2.remove(str(list_1[i]))
result = ''.join(list_2)
return result
def read_input() -> Tuple[str, str]:
shorter = input().strip()
longer = input().strip()
return shorter, longer
shorter, longer = read_input()
print(get_excessive_letter(shorter, longer))
| 29.121212
| 69
| 0.707596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 796
| 0.610429
|
b1353e1a12ba28028561c94ebd3cbfad77dbf672
| 194
|
py
|
Python
|
bentoml/lightgbm.py
|
francoisserra/BentoML
|
213e9e9b39e055286f2649c733907df88e6d2503
|
[
"Apache-2.0"
] | 1
|
2021-06-12T17:04:07.000Z
|
2021-06-12T17:04:07.000Z
|
bentoml/lightgbm.py
|
francoisserra/BentoML
|
213e9e9b39e055286f2649c733907df88e6d2503
|
[
"Apache-2.0"
] | 4
|
2021-05-16T08:06:25.000Z
|
2021-11-13T08:46:36.000Z
|
bentoml/lightgbm.py
|
francoisserra/BentoML
|
213e9e9b39e055286f2649c733907df88e6d2503
|
[
"Apache-2.0"
] | null | null | null |
from ._internal.frameworks.lightgbm import load
from ._internal.frameworks.lightgbm import save
from ._internal.frameworks.lightgbm import load_runner
__all__ = ["load", "load_runner", "save"]
| 32.333333
| 54
| 0.804124
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 25
| 0.128866
|
b1355b614d3140ba034b33a7f3ee7859a1245971
| 723
|
py
|
Python
|
flake8_strings/visitor.py
|
d1618033/flake8-strings
|
2ad34a41eab65e2264da7aa91c54dbca701af1c5
|
[
"MIT"
] | null | null | null |
flake8_strings/visitor.py
|
d1618033/flake8-strings
|
2ad34a41eab65e2264da7aa91c54dbca701af1c5
|
[
"MIT"
] | 1
|
2021-02-19T13:50:29.000Z
|
2021-02-19T13:50:29.000Z
|
flake8_strings/visitor.py
|
d1618033/flake8-strings
|
2ad34a41eab65e2264da7aa91c54dbca701af1c5
|
[
"MIT"
] | null | null | null |
import ast
from typing import List
from flake8_plugin_utils import Visitor
from .errors import UnnecessaryBackslashEscapingError
class StringsVisitor(Visitor):
lines: List[str]
def _is_escaped_char(self, character: str) -> bool:
repr_c = repr(character)
return repr_c[1] == '\\' and repr_c[2] != '\\'
def visit_Str(self, node: ast.Str) -> None: # noqa: N802
if '\\' not in node.s:
return
if node.s[-1] == '\\':
return
if any(self._is_escaped_char(c) for c in node.s):
return
if self.lines[node.lineno - 1][node.col_offset] == 'r':
return
self.error_from_node(UnnecessaryBackslashEscapingError, node)
| 27.807692
| 69
| 0.615491
| 589
| 0.814661
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 0.042877
|
b1355fb67bbb27f060266c03cc17b3aa9d3f3edd
| 1,384
|
py
|
Python
|
tests/test_asyncio_hn.py
|
MITBigDataGroup2/asyncio-hn
|
7133530e8ffb56b7810bcd956241709fc2ae0f48
|
[
"MIT"
] | 30
|
2017-02-12T21:58:10.000Z
|
2021-11-04T00:11:49.000Z
|
tests/test_asyncio_hn.py
|
MITBigDataGroup2/asyncio-hn
|
7133530e8ffb56b7810bcd956241709fc2ae0f48
|
[
"MIT"
] | 4
|
2017-03-21T12:40:19.000Z
|
2021-11-15T17:46:46.000Z
|
tests/test_asyncio_hn.py
|
MITBigDataGroup2/asyncio-hn
|
7133530e8ffb56b7810bcd956241709fc2ae0f48
|
[
"MIT"
] | 2
|
2017-12-18T09:11:45.000Z
|
2022-02-09T16:45:49.000Z
|
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
import pytest
from asyncio_hn import ClientHN
@pytest.mark.asyncio
async def test_last_n_posts():
async with ClientHN() as hn:
posts = await hn.last_n_items(2)
assert len(posts) == 2
@pytest.mark.asyncio
async def test_download_posts():
async with ClientHN() as hn:
posts = await hn.items((42, 4242, 424242))
for post in posts:
validate_post(post, post_id=424242, post_creator="1gor")
validate_post(post, post_id=4242, post_creator="PindaxDotCom")
validate_post(post, post_id=42, post_creator="sergei")
def validate_post(post, post_id, post_creator):
if post.get("id") == post_id:
assert post_creator == post.get("by")
@pytest.mark.asyncio
async def test_best_and_latest():
async with ClientHN() as hn:
stories = await hn.best_stories()
assert len(stories) == 200
latest = await hn.new_stories()
assert len(latest) == 500
@pytest.mark.asyncio
async def test_download_users():
async with ClientHN() as hn:
users = await hn.users(["maximabramchuk", "anthonybsd"])
for user in users:
if user["id"] == "maximabramchuk":
assert user["created"] == 1441729807
if user["id"] == "'anthonybsd'":
assert user["created"] == 1436886156
| 28.244898
| 74
| 0.629335
| 0
| 0
| 0
| 0
| 1,145
| 0.827312
| 1,061
| 0.766618
| 167
| 0.120665
|
b13626eb09eac5813e547227a9c0e21459be9cf0
| 5,649
|
py
|
Python
|
src/data/make_dataset.py
|
Rajasvi/Adverse-Food-Events-Analysis
|
8fb87cfaa4c55eaae56325e516623ad8661d7fb8
|
[
"MIT"
] | 1
|
2021-12-16T02:40:31.000Z
|
2021-12-16T02:40:31.000Z
|
src/data/make_dataset.py
|
AdityaR-Bits/adverse_food_events_analysis-1
|
8fb87cfaa4c55eaae56325e516623ad8661d7fb8
|
[
"MIT"
] | 1
|
2021-12-04T00:58:50.000Z
|
2021-12-04T00:58:50.000Z
|
src/data/make_dataset.py
|
AdityaR-Bits/adverse_food_events_analysis-1
|
8fb87cfaa4c55eaae56325e516623ad8661d7fb8
|
[
"MIT"
] | 2
|
2021-12-04T02:11:26.000Z
|
2021-12-04T06:32:19.000Z
|
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
import pandas as pd
import re
import string
from nltk.corpus import stopwords
def brand_preprocess(row, trim_len=2):
""" This function creates a brand name column by parsing out the product column of data. It trims the words based on trim length param to choose appropriate brand name.
Args:
row ([pd.Series]): Dataframe row
trim_len (int, optional): Length by which product name has to be trimmed. Defaults to 2.
Returns:
[str]: brand name corresponding to a product.
"""
assert isinstance(
row, pd.Series
), "Check whether the function is called over Series"
if pd.isna(row["product"]) or pd.isna(row["product"]):
return pd.NA
# Remove punctuations from product name
regexPunctuation = re.compile("[%s]" % re.escape(string.punctuation))
cleanProduct = regexPunctuation.sub("", row["product"])
nameList = [
_.upper()
for _ in cleanProduct.lower().split(" ")
if _ not in stopwords.words("english")
]
if len(nameList) == 0:
return ""
# for certain categories use trim length to select brand name.
if row["category"] in [
"Nuts/Edible Seed",
"Vit/Min/Prot/Unconv Diet(Human/Animal)",
]:
return (
" ".join(nameList)
if len(nameList) < trim_len
else " ".join(nameList[:trim_len])
)
return nameList[0]
def age_preprocess(row):
"""This function converts age reports to a single unit : year(s)
since Data has age reported in multiple units like month(s),day(s)
Args:
row ([pd.Series]): A row of the entire Dataframe
Returns:
[float]: value of patient_age converted to years unit
"""
assert isinstance(
row, pd.Series
), "Check whether the function is called over Series"
age_conv = {
"month(s)": 1 / 12,
"year(s)": 1,
"day(s)": 1 / 365,
"Decade(s)": 10,
"week(s)": 1 / 52,
}
unit = row["age_units"]
age = row["patient_age"]
if pd.isna(age) or pd.isna(unit):
return -1
else:
return row["patient_age"] * round(age_conv[unit], 4)
def strip_str(x):
if isinstance(x, str):
x = x.strip()
return x
@click.command()
@click.argument("input_dirpath", type=click.Path(exists=True))
@click.argument("output_dirpath", type=click.Path())
def main(
input_dirpath="../../data/raw/", output_dirpath="../../data/processed",
):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
outPath = Path(output_dirpath)
inPath = Path(input_dirpath)
logger = logging.getLogger(__name__)
logger.info("Creating clean unified data from raw files")
aggReports = None
for p in list(inPath.glob("*.csv")):
curr_df = pd.read_csv(p, encoding="unicode_escape")
column_map = {x: x.lower().replace(" ", "_") for x in curr_df.columns}
curr_df = curr_df.rename(columns=column_map)
curr_df = curr_df.rename(
columns={"meddra_preferred_terms": "medra_preferred_terms"}
)
curr_df = curr_df.applymap(strip_str)
aggReports = curr_df if aggReports is None else pd.concat([aggReports, curr_df])
aggReports = aggReports.rename(columns={"description": "category"})
aggReports["caers_created_date"] = pd.to_datetime(aggReports.caers_created_date)
aggReports.reset_index(drop=True, inplace=True)
aggReports.to_csv(outPath / "clean_data.csv")
logger.info("Processing and enriching data")
# Create brand-enriched column.
logger.info("Making brand name column from clean data")
aggReports["brand"] = aggReports.apply(brand_preprocess, axis=1)
# Pre-processing Age column.
logger.info("Converting age to a common unit year(s)")
aggReports["patient_age"] = aggReports.apply(age_preprocess, axis=1)
aggReports = aggReports.drop(columns=["age_units"])
aggReports.to_csv(outPath / "processed_data.csv")
# Create exploded outcome-wise cleaned data.
logger.info("Making outcomes exploded data set from clean brand-name data")
aggReports.outcomes = aggReports.outcomes.apply(
lambda x: [y.strip() for y in x.split(",") if y != []]
)
expl_aggReports = aggReports.explode("outcomes")
expl_aggReports = expl_aggReports.reset_index(drop=True)
expl_aggReports.to_csv(outPath / "exploded_data.csv")
# Create time-stamp processed & exploded data.
aggReports_time = aggReports.drop_duplicates(
["report_id", "patient_age", "category", "sex"], ignore_index=True
)
aggReports_time["year"] = aggReports_time["caers_created_date"].apply(
lambda x: x.year
)
aggReports_time = aggReports_time.rename(
columns={"caers_created_date": "time_stamp"}
)
aggReports_time.to_csv(outPath / "clean_data_time.csv")
expl_aggReports_time = aggReports_time.explode("outcomes")
expl_aggReports_time["outcomes"] = expl_aggReports_time["outcomes"].str.strip()
expl_aggReports_time.loc[
expl_aggReports_time["outcomes"] == "", "outcomes"
] = "Not Specified"
expl_aggReports_time = expl_aggReports_time.reset_index(drop=True)
expl_aggReports_time.to_csv(outPath / "exploded_data_time.csv")
logger.info("Data cleaning and pre-processing done!")
if __name__ == "__main__":
log_fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
| 32.465517
| 172
| 0.657639
| 0
| 0
| 0
| 0
| 3,142
| 0.556205
| 0
| 0
| 2,225
| 0.393875
|
b1367245e5290f368fa75d027c1ba49c8fa30f4e
| 5,061
|
py
|
Python
|
src/compare_eval.py
|
gccrpm/cdmf
|
5fca1393acbedbbd6ebc65bf2c9336645cc3e0fc
|
[
"BSD-2-Clause"
] | 1
|
2020-04-16T05:06:39.000Z
|
2020-04-16T05:06:39.000Z
|
src/compare_eval.py
|
gccrpm/cdmf
|
5fca1393acbedbbd6ebc65bf2c9336645cc3e0fc
|
[
"BSD-2-Clause"
] | null | null | null |
src/compare_eval.py
|
gccrpm/cdmf
|
5fca1393acbedbbd6ebc65bf2c9336645cc3e0fc
|
[
"BSD-2-Clause"
] | 1
|
2020-04-16T05:06:52.000Z
|
2020-04-16T05:06:52.000Z
|
import os
import re
import hyperparams as hp
from data_load import DataLoad
from tqdm import tqdm
import numpy as np
import pandas as pd
import tensorflow as tf
def load_ckpt_paths(model_name='cdmf'):
# get ckpt
ckpt_path = '../model_ckpt/compare/{}/'.format(model_name)
fpaths = []
with open(ckpt_path+'checkpoint', 'r', encoding='utf-8') as f_ckpt :
for line in f_ckpt.readlines()[1:]:
fname = re.sub(r'\"', '', line.split(':')[-1]).strip()
fpath = os.path.join(ckpt_path, fname)
fpaths.append(fpath)
return fpaths
if __name__ == '__main__':
data = DataLoad(data_path=hp.DATA_PATH,
fnames=hp.FNAMES,
forced_seq_len=hp.FORCED_SEQ_LEN,
vocab_size=hp.VOCAB_SIZE,
paly_times=hp.PLAY_TIMES,
num_main_actors=hp.NUM_MAIN_ACTORS,
batch_size=hp.BATCH_SIZE,
num_epochs=hp.NUM_EPOCHS,
noise_rate=hp.NOISE_RATE)
# CDMF
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False)
sess = tf.Session(config=session_conf)
with sess.as_default():
for fpath in load_ckpt_paths('cdmf'):
saver = tf.train.import_meta_graph(fpath+'.meta')
saver.restore(sess, fpath)
# Get the placeholders from the graph by name
m_oids = graph.get_tensor_by_name('movie_order_ids:0')
info = graph.get_tensor_by_name('info:0')
actors = graph.get_tensor_by_name('actors:0')
descriptions = graph.get_tensor_by_name('descriptions:0')
u_oids = graph.get_tensor_by_name('user_order_ids:0')
r_seq = graph.get_tensor_by_name('rating_sequence:0')
dropout_keep_prob = graph.get_tensor_by_name("dropout_keep_prob:0")
# Tensors we want to evaluate
mse_op = graph.get_tensor_by_name('mse/mse_op:0')
# load evalset
eval_iter = data.load_data('eval')
mse, count = 0.0, 0
for (sub_X_user, sub_X_movie), sub_Y in tqdm(eval_iter):
# unpack
sub_u_oids, sub_bu_seq = sub_X_user
sub_m_oids, sub_info, sub_actors, sub_des, sub_bm_seq = sub_X_movie
sub_r_seq = sub_Y
dev_feed_dict = {
m_oids: sub_m_oids,
info: sub_info,
actors: sub_actors,
descriptions: sub_des,
u_oids: sub_u_oids,
r_seq: sub_r_seq,
dropout_keep_prob: hp.DROPOUT_KEEP_PROB}
sub_mse = sess.run(mse_op, feed_dict=dev_feed_dict)
mse += sub_mse
count += 1
rmse = np.sqrt(mse / count)
print('cdmf | rmse:{}'.format(rmse))
# ConvMF
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False)
sess = tf.Session(config=session_conf)
with sess.as_default():
for fpath in load_ckpt_paths('convmf'):
saver = tf.train.import_meta_graph(fpath+'.meta')
saver.restore(sess, fpath)
# Get the placeholders from the graph by name
m_oids = graph.get_tensor_by_name('movie_order_ids:0')
descriptions = graph.get_tensor_by_name('descriptions:0')
u_oids = graph.get_tensor_by_name('user_order_ids:0')
r_seq = graph.get_tensor_by_name('rating_sequence:0')
dropout_keep_prob = graph.get_tensor_by_name("dropout_keep_prob:0")
# Tensors we want to evaluate
mse_op = graph.get_tensor_by_name('mse/mse_op:0')
# load evalset
eval_iter = data.load_data('eval')
mse, count = 0.0, 0
for (sub_X_user, sub_X_movie), sub_Y in tqdm(eval_iter):
# unpack
sub_u_oids, sub_bu_seq = sub_X_user
sub_m_oids, sub_info, sub_actors, sub_des, sub_bm_seq = sub_X_movie
sub_r_seq = sub_Y
dev_feed_dict = {
m_oids: sub_m_oids,
descriptions: sub_des,
u_oids: sub_u_oids,
r_seq: sub_r_seq,
dropout_keep_prob: hp.DROPOUT_KEEP_PROB}
sub_mse = sess.run(mse_op, feed_dict=dev_feed_dict)
mse += sub_mse
count += 1
rmse = np.sqrt(mse / count)
print('convmf | rmse:{}'.format(rmse))
| 40.814516
| 87
| 0.538234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 597
| 0.117961
|
b13b701d2eb809667c24251d55ce1c0bf248bc34
| 1,465
|
py
|
Python
|
substitute_finder/migrations/0003_product.py
|
tohugaby/pur_beurre_web
|
c3bdacee50907eea79821e7a8b3fe0f349719d88
|
[
"MIT"
] | 1
|
2020-01-05T18:58:51.000Z
|
2020-01-05T18:58:51.000Z
|
substitute_finder/migrations/0003_product.py
|
tohugaby/pur_beurre_web
|
c3bdacee50907eea79821e7a8b3fe0f349719d88
|
[
"MIT"
] | 3
|
2020-06-05T18:35:47.000Z
|
2021-06-10T20:32:44.000Z
|
substitute_finder/migrations/0003_product.py
|
tomlemeuch/pur_beurre_web
|
c3bdacee50907eea79821e7a8b3fe0f349719d88
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1 on 2018-08-14 09:42
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('substitute_finder', '0002_category'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('code', models.CharField(max_length=300, primary_key=True, serialize=False, verbose_name='identifiant')),
('product_name', models.CharField(max_length=300, verbose_name='nom du produit')),
('generic_name', models.CharField(max_length=1000, verbose_name='description')),
('url', models.URLField(max_length=1000, verbose_name='url OpenFoodFacts')),
('stores', models.CharField(max_length=300, verbose_name='vendeur')),
('nutrition_grade_fr', models.CharField(max_length=1, verbose_name='score nutritionnel')),
('last_updated', models.DateTimeField(auto_now=True, verbose_name='dernière mise à jour')),
('categories', models.ManyToManyField(to='substitute_finder.Category', verbose_name='categories')),
('users', models.ManyToManyField(related_name='favorite', to=settings.AUTH_USER_MODEL, verbose_name='utilisateurs')),
],
options={
'verbose_name': 'Produit',
'verbose_name_plural': 'Produits',
},
),
]
| 44.393939
| 133
| 0.624573
| 1,343
| 0.915474
| 0
| 0
| 0
| 0
| 0
| 0
| 420
| 0.286299
|
b13db7a0887619658384413e84415d13be784dc2
| 6,613
|
py
|
Python
|
parameters/standard.py
|
David-Loibl/gistemp
|
4b96696243cbbb425c7b27fed35398e0fef9968d
|
[
"BSD-3-Clause"
] | 1
|
2020-02-04T13:16:05.000Z
|
2020-02-04T13:16:05.000Z
|
parameters/standard.py
|
David-Loibl/gistemp4.0
|
4b96696243cbbb425c7b27fed35398e0fef9968d
|
[
"BSD-3-Clause"
] | null | null | null |
parameters/standard.py
|
David-Loibl/gistemp4.0
|
4b96696243cbbb425c7b27fed35398e0fef9968d
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python
#
# parameters/standard.py
#
# Nick Barnes, Ravenbrook Limited, 2010-02-15
# Avi Persin, Revision 2016-01-06
"""Parameters controlling the standard GISTEMP algorithm.
Various parameters controlling each phase of the algorithm are
collected and documented here. They appear here in approximately the
order in which they are used in the algorithm.
Parameters controlling cccgistemp extensions to the standard GISTEMP
algorithm, or obsolete features of GISTEMP, are in other parameter
files.
"""
station_drop_minimum_months = 20
"""A station record must have at least one month of the year with at
least this many valid data values, otherwise it is dropped immediately
prior to the peri-urban adjustment step."""
rural_designator = "global_light <= 10"
"""Describes the test used to determine whether a station is rural or
not, in terms of the station metadata fields. Relevant fields are:
'global_light' (global satellite nighttime radiance value); 'popcls'
(GHCN population class flag; the value 'R' stands for rural);
'us_light' (class derived from satellite nighttime radiance covering
the US and some neighbouring stations), 'berkeley' (a field of unknown
provenance which seems to be related to the Berkeley Earth Surface
Temperature project).
The value of this parameter may be a comma separated sequence. Each
member in that sequence can either be a metadata field name, or a
numeric comparison on a metadata field name (e.g. "global_light <= 10",
the default). If a field name appears on its own, the meaning is
field-dependent.
The fields are consulted in the order specified until one is found
that is not blank, and that obeys the condition (the only field which
is likely to be blank is 'us_light': this sequential feature is
required to emulate a previous version of GISTEMP).
Previous versions of GISTEMP can be "emulated" as follows:
"popcls" GISTEMP 1999 to 2001
"us_light, popcls" GISTEMP 2001 to 2010
"global_light <= 10" GISTEMP 2010 onwards
"global_light <= 0" GISTEMP 2011 passing 2 as second arg to do_comb_step2.sh
"berkeley <= 0" GISTEMP 2011 passing 3 as second arg to do_comb_step2.sh
"""
urban_adjustment_min_years = 20
"""When trying to calculate an urban station adjustment, at least this
many years have to have sufficient rural stations (if there are
not enough qualifying years, we may try again at a larger radius)."""
urban_adjustment_proportion_good = 2.0 / 3.0
"""When trying to calculate an urban station adjustment, at least this
proportion of the years to which the fit applies have to have
sufficient rural stations (if there are insufficient stations, we may
try again at a larger radius)."""
urban_adjustment_min_rural_stations = 3
"""When trying to calculate an urban station adjustment, a year
without at least this number of valid readings from rural stations is
not used to calculate the fit."""
urban_adjustment_min_leg = 5
"""When finding a two-part adjustment, only consider knee years which
have at least this many data points (note: not years) on each side."""
urban_adjustment_short_leg = 7
"""When a two-part adjustment has been identified, if either leg is
shorter than this number of years, a one-part adjustment is applied
instead."""
urban_adjustment_steep_leg = 0.1
"""When a two-part adjustment has been identified, if the gradient of
either leg is steeper than this (in absolute degrees Celsius per
year), or if the difference between the leg gradients is greater than
this, a one-part adjustment is applied instead."""
urban_adjustment_leg_difference = 0.05
"""When a two-part adjustment has been identified, if the difference
in gradient between the two legs is greater than this (in absolute
degrees Celsius per year), it is counted separately for statistical
purposes."""
urban_adjustment_reverse_gradient = 0.02
"""When a two-part adjustment has been identified, if the two
gradients have opposite sign, and both gradients are steeper than this
(in absolute degrees Celsius per year), a one-part adjustment is
applied instead."""
urban_adjustment_full_radius = 1000.0
"""Range in kilometres within which a rural station will be considered
for adjusting an urban station record. Half of this radius will be
attempted first."""
rural_station_min_overlap = 20
"""When combining rural station annual anomaly records to calculate
urban adjustment parameters, do not combine a candidate rural record
if it has fewer than this number years of overlap."""
gridding_min_overlap = 20
"""When combining station records to give a grid record, do not
combine a candidate station record if it has fewer than this number of
years of overlap with the combined grid record."""
gridding_radius = 1200.0
"""The radius in kilometres used to find and weight station records to
give a grid record."""
gridding_reference_period = (1951, 1980)
"""When gridding, temperature series are turned into anomaly series by
subtracting monthly means computed over a reference period. This is
the first and last years of that reference period."""
sea_surface_cutoff_temp = -1.77
"""When incorporating monthly sea-surface datasets, treat any
temperature colder than this as missing data."""
subbox_min_valid = 240
"""When combining the sub-boxes into boxes, do not use any sub-box
record, either land or ocean, which has fewer than this number of
valid data."""
subbox_land_range = 100
"""If a subbox has both land data and ocean data, but the distance
from the subbox centre to the nearest station used in its record is
less than this, the land data is used in preference to the ocean data
when calculating the box series. Note: the distance used is actually a
great-circle chord length."""
subbox_reference_period = (1961, 1990)
"""When combining subbox records into box records, temperature series
are turned into anomaly series by subtracting monthly means computed
over a reference period. This is the first and last years of that
reference period."""
box_min_overlap = 20
"""When combining subbox records to make box records, do not combine a
calendar month from a candidate subbox record if it has fewer than
this number of years of overlap with the same calendar month in the
combined box record. Also used when combining boxes into zones."""
box_reference_period = (1951, 1980)
"""When combining box records into zone records, temperature series
are turned into anomaly series by subtracting monthly means computed
over a reference period. This is the first and last years of that
reference period."""
zone_annual_min_months = 6
"""When computing zone annual means, require at least this many valid
month data."""
| 42.121019
| 77
| 0.788145
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,855
| 0.885377
|
b13ecc0cc389e823f57ccec244dcd3eab8ae5459
| 5,781
|
py
|
Python
|
pypdevs/src/pypdevs/tracers/tracerCell.py
|
martinvy/sin-model-elevators
|
ebf6511d61326972b2e366c8975f76a944196a6f
|
[
"MIT"
] | 1
|
2018-09-19T14:42:28.000Z
|
2018-09-19T14:42:28.000Z
|
pypdevs/src/pypdevs/tracers/tracerCell.py
|
martinvy/sin-model-elevators
|
ebf6511d61326972b2e366c8975f76a944196a6f
|
[
"MIT"
] | null | null | null |
pypdevs/src/pypdevs/tracers/tracerCell.py
|
martinvy/sin-model-elevators
|
ebf6511d61326972b2e366c8975f76a944196a6f
|
[
"MIT"
] | 2
|
2020-05-29T10:12:37.000Z
|
2021-05-19T21:32:35.000Z
|
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pypdevs.util import runTraceAtController, toStr
from pypdevs.activityVisualisation import visualizeMatrix
import sys
class TracerCell(object):
"""
A tracer for Cell-DEVS style tracing output
"""
def __init__(self, uid, server, filename, x_size, y_size, multifile):
"""
Constructor
:param uid: the UID of this tracer
:param server: the server to make remote calls on
:param filename: filename to save to
:param x_size: the x size of the grid
:param y_size: the y size of the grid
:param multifile: whether or not multiple files should be generated for each timestep
"""
if server.getName() == 0:
self.filename = filename
else:
self.filename = None
self.server = server
self.uid = uid
self.x_size = x_size
self.y_size = y_size
self.multifile = multifile
self.prevtime = 0.0
def startTracer(self, recover):
"""
Starts up the tracer
:param recover: whether or not this is a recovery call (so whether or not the file should be appended to)
"""
if self.filename is None:
return
elif recover:
if not self.multifile:
self.cell_realfile = open(self.filename, 'a+')
else:
if not self.multifile:
self.cell_realfile = open(self.filename, 'w')
self.cell_count = 0
self.cells = [[None] * self.y_size for _ in range(self.x_size)]
def stopTracer(self):
"""
Stop the tracer
"""
if not self.multifile:
self.cell_realfile.flush()
def traceInit(self, aDEVS, t):
"""
The trace functionality for Cell DEVS output at initialisation
:param aDEVS: the model that was initialised
:param t: time at which it should be traced
"""
try:
runTraceAtController(self.server,
self.uid,
aDEVS,
[aDEVS.x,
aDEVS.y,
t,
toStr(aDEVS.state.toCellState())])
except AttributeError:
pass
def traceInternal(self, aDEVS):
"""
The trace functionality for Cell DEVS output at an internal transition
:param aDEVS: the model that transitioned
"""
try:
runTraceAtController(self.server,
self.uid,
aDEVS,
[aDEVS.x,
aDEVS.y,
aDEVS.time_last,
toStr(aDEVS.state.toCellState())])
except AttributeError:
pass
def traceExternal(self, aDEVS):
"""
The trace functionality for Cell DEVS output at an external transition
:param aDEVS: the model that transitioned
"""
try:
runTraceAtController(self.server,
self.uid,
aDEVS,
[aDEVS.x,
aDEVS.y,
aDEVS.time_last,
toStr(aDEVS.state.toCellState())])
except AttributeError:
pass
def traceConfluent(self, aDEVS):
"""
The trace functionality for Cell DEVS output at a confluent transition
:param aDEVS: the model that transitioned
"""
try:
runTraceAtController(self.server,
self.uid,
aDEVS,
[aDEVS.x,
aDEVS.y,
aDEVS.time_last,
toStr(aDEVS.state.toCellState())])
except AttributeError as e:
print(e)
pass
def trace(self, x, y, time, state):
"""
Save the state of the cell
:param x: the x coordinate of the model, to be used when plotting
:param y: the y coordinate of the model, to be used when plotting
:param time: the time when the model assumed this state
:param state: the actual state to print
"""
# Strip of the age for Cell DEVS
time = time[0]
if time != self.prevtime:
# Frist flush the grid
self.cell_count += 1
if self.multifile:
self.cell_realfile = open(self.filename % self.cell_count, 'w')
else:
self.cell_realfile.write("== At time %s ===\n" % (self.prevtime))
visualizeMatrix(self.cells, "%s", self.cell_realfile)
self.prevtime = time
if self.multifile:
self.cell_realfile.close()
self.cells[x][y] = state
| 35.466258
| 113
| 0.523785
| 4,967
| 0.859194
| 0
| 0
| 0
| 0
| 0
| 0
| 2,327
| 0.402526
|
b13f03597d9a5e677488aa6621f7a6411da41c2d
| 3,223
|
py
|
Python
|
Estrangement/tests/test_utils.py
|
kawadia/estrangement
|
612542bf4af64f248766ad28c18028ff4b2307b5
|
[
"BSD-3-Clause"
] | 7
|
2015-02-17T14:04:25.000Z
|
2020-02-16T08:59:00.000Z
|
tnetwork/DCD/externals/estrangement_master/Estrangement/tests/test_utils.py
|
Yquetzal/tnetwork
|
43fb2f19aeed57a8a9d9af032ee80f1c9f58516d
|
[
"BSD-2-Clause"
] | 1
|
2019-07-13T16:16:28.000Z
|
2019-07-15T09:34:33.000Z
|
Estrangement/tests/test_utils.py
|
kawadia/estrangement
|
612542bf4af64f248766ad28c18028ff4b2307b5
|
[
"BSD-3-Clause"
] | 4
|
2015-02-20T15:29:59.000Z
|
2021-03-28T04:12:08.000Z
|
import networkx as nx
import sys
import os
import nose
sys.path.append(os.getcwd() + "/..")
import utils
class test_utils:
def setUp(self):
self.g0 = nx.Graph()
self.g1 = nx.Graph()
self.g2 = nx.Graph()
self.g3 = nx.Graph()
self.g4 = nx.Graph()
self.g5 = nx.Graph()
self.g7 = nx.Graph()
self.g6 = nx.Graph()
self.g0.add_edges_from([(1,2,{'weight':2}),(1,3,{'weight':1}),(2,4,{'weight':1})])
self.g1.add_edges_from([(1,4,{'weight':1}),(2,3,{'weight':1}),(3,4,{'weight':1})])
self.g2.add_edges_from([(1,2,{'weight':2}),(2,3,{'weight':1}),(3,4,{'weight':1})])
self.g3.add_edges_from([(5,6),(5,7)])
self.g4.add_edges_from([(1,5),(2,3)])
self.g5.add_edges_from([(1,2,{'weight':2}),(1,3,{'weight':1}),(2,4,{'weight':1})])
self.g6.add_edges_from([(1,2,{'weight':1}),(1,3,{'weight':1}),(2,3,{'weight':1})])
self.g7.add_edges_from([(1,2,{'weight':1})])
self.label_dict1 = {1:'a',2:'a',3:'b',4:'b',5:'c',6:'c'}
self.label_dict2 = {1:'a',2:'b',3:'b',4:'b',5:'c',6:'c'}
self.label_dict3 = {1:'a',2:'b',3:'c',4:'d',5:'e',6:'f'}
self.label_dict4 = {1:'a',2:'a',3:'a',4:'a',5:'a',6:'a'}
self.label_dict5 = {1:'b',2:'b',3:'b',4:'b',5:'b',6:'b'}
self.label_dict6 = {1:'a',2:'b',3:'b'}
def test_graph_distance(self):
assert utils.graph_distance(self.g0, self.g1) == 1
assert utils.graph_distance(self.g0, self.g1, False) == 1
assert utils.graph_distance(self.g0, self.g0) == 0
assert utils.graph_distance(self.g0, self.g0) == 0
assert utils.graph_distance(self.g0, self.g2, False) == 0.8
assert utils.graph_distance(self.g0, self.g2, True) == 0.5
def test_node_graph_distance(self):
assert utils.node_graph_distance(self.g0, self.g1) == 0
assert utils.node_graph_distance(self.g0, self.g0) == 0
assert utils.node_graph_distance(self.g0, self.g3) == 1
assert utils.node_graph_distance(self.g0, self.g4) == 0.4
assert utils.node_graph_distance(nx.path_graph(2),nx.path_graph(4)) == 0.5
def test_Estrangement(self):
assert utils.Estrangement(self.g0, self.label_dict4, self.g3) == 0 # no common edge
assert utils.Estrangement(self.g0, self.label_dict3, self.g5) == 1 # all common edge, all diff community
assert utils.Estrangement(self.g0, self.label_dict1, self.g2) == 0.25 # one edge between community
nose.tools.assert_almost_equal(utils.Estrangement(self.g6, self.label_dict6, self.g7),0.3333,4)
print(utils.Estrangement(self.g6, self.label_dict6, self.g7))
def test_match_labels(self):
assert utils.match_labels(self.label_dict1, self.label_dict1) == self.label_dict1 # snapshots are the same
assert utils.match_labels(self.label_dict5, self.label_dict4) == self.label_dict4 # same community, diff label
assert utils.match_labels(self.label_dict4, self.label_dict4) == self.label_dict4 # same community, same label
def test_confidence_interval(self):
assert utils.confidence_interval([2,2,2,2]) == 0
nose.tools.assert_almost_equal(utils.confidence_interval([1,2,3,4]), 1.096,3)
assert utils.confidence_interval([2,2,4,4]) == 0.98
| 48.104478
| 127
| 0.630779
| 3,111
| 0.96525
| 0
| 0
| 0
| 0
| 0
| 0
| 393
| 0.121936
|
b13f674704e7fed7b35db9e06e6e7c93a0224c41
| 2,184
|
py
|
Python
|
src/train.py
|
stephenllh/bcs-unet
|
be534a25e28cbe3501278d0ee6e2417b2cd737d3
|
[
"MIT"
] | 5
|
2021-05-04T12:46:32.000Z
|
2022-03-17T09:33:39.000Z
|
src/train.py
|
stephenllh/bcs-unet
|
be534a25e28cbe3501278d0ee6e2417b2cd737d3
|
[
"MIT"
] | null | null | null |
src/train.py
|
stephenllh/bcs-unet
|
be534a25e28cbe3501278d0ee6e2417b2cd737d3
|
[
"MIT"
] | null | null | null |
import os
import argparse
import pytorch_lightning as pl
from pytorch_lightning.callbacks import (
ModelCheckpoint,
EarlyStopping,
LearningRateMonitor,
)
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities.seed import seed_everything
from data.emnist import EMNISTDataModule
from data.svhn import SVHNDataModule
from data.stl10 import STL10DataModule
from engine.learner import BCSUNetLearner
from utils import load_config
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
parser = argparse.ArgumentParser()
parser.add_argument(
"-d", "--dataset", type=str, required=True, help="'EMNIST', 'SVHN', or 'STL10'"
)
parser.add_argument(
"-s",
"--sampling_ratio",
type=float,
required=True,
help="Sampling ratio in percentage",
)
args = parser.parse_args()
def run():
seed_everything(seed=0, workers=True)
config = load_config(f"../config/bcsunet_{args.dataset}.yaml")
config["sampling_ratio"] = args.sampling_ratio / 100
if args.dataset == "EMNIST":
data_module = EMNISTDataModule(config)
elif args.dataset == "SVHN":
data_module = SVHNDataModule(config)
elif args.dataset == "STL10":
data_module = STL10DataModule(config)
learner = BCSUNetLearner(config)
callbacks = [
ModelCheckpoint(**config["callbacks"]["checkpoint"]),
EarlyStopping(**config["callbacks"]["early_stopping"]),
LearningRateMonitor(),
]
log_name = f"BCSUNet_{args.dataset}_{int(config['sampling_ratio'] * 10000):04d}"
logger = TensorBoardLogger(save_dir="../logs", name=log_name)
message = f"Running BCS-UNet on {args.dataset} dataset. Sampling ratio = {config['sampling_ratio']}"
print("-" * 100)
print(message)
print("-" * 100)
trainer = pl.Trainer(
gpus=config["trainer"]["gpu"],
max_epochs=config["trainer"]["epochs"],
default_root_dir="../",
callbacks=callbacks,
precision=(16 if config["trainer"]["fp16"] else 32),
logger=logger,
)
trainer.fit(learner, data_module)
trainer.test(learner, datamodule=data_module, ckpt_path="best")
if __name__ == "__main__":
run()
| 29.12
| 104
| 0.688645
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 490
| 0.224359
|
b1402f6a4aea579ed7251e589133544512e942f3
| 6,681
|
py
|
Python
|
perturbation_classifiers/util/dataset.py
|
rjos/perturbation-classifiers
|
5637b49c5c297e20b4ee6bcee25173d9d11d642f
|
[
"MIT"
] | null | null | null |
perturbation_classifiers/util/dataset.py
|
rjos/perturbation-classifiers
|
5637b49c5c297e20b4ee6bcee25173d9d11d642f
|
[
"MIT"
] | null | null | null |
perturbation_classifiers/util/dataset.py
|
rjos/perturbation-classifiers
|
5637b49c5c297e20b4ee6bcee25173d9d11d642f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# Author: Rodolfo J. O. Soares <rodolfoj.soares@gmail.com>
import numpy as np
import re
class KeelAttribute:
"""
A class that represent an attribute of keel dataset format.
"""
TYPE_REAL, TYPE_INTEGER, TYPE_NOMINAL = ("real", "integer", "nominal")
def __init__(self, attribute_name, attribute_type, attribute_range, attribute_builder):
self.name = attribute_name
self.type = attribute_type
self.range = attribute_range
self.builder = attribute_builder
class KeelDataSet:
"""
A class that represent the keel dataset format.
"""
UNKNOWN = '?'
def __init__(self, relation_name, attributes, data, inputs=None, outputs=None):
self.name = relation_name
self.attributes = attributes
self.data = data
self.inputs = inputs
self.outputs = outputs
self.shape = len(data[0]), len(data)
self.ir = self.__imbalance_ratio()
def __get_data(self, attributes):
return [self.data[self.attributes.index(a)] for a in attributes]
def __imbalance_ratio(self):
"""Compute the imbalance ratio of the dataset
"""
labels = self.__get_data(self.outputs)
labels = np.concatenate(labels)
_, count_classes = np.unique(labels, return_counts=True)
max_count = np.max(count_classes)
min_count = np.min(count_classes)
return round((max_count / min_count), 2)
def get_data(self):
"""Returns (data, target) of the dataset.
"""
inputs = self.__get_data(self.inputs)
outputs = self.__get_data(self.outputs)
return np.transpose(inputs), np.concatenate(outputs)
def __str__(self):
row_format = "{:<31}" * 5
labels = self.__get_data(self.outputs)
labels = np.concatenate(labels)
classes = np.unique(labels)
# metadata = f"{self.name}:\tAttributes: {self.shape[1]}\tSamples: {self.shape[0]}\tClasses: {classes.shape[0]}\tImbalance Ratio: {self.ir}"
return row_format.format(f"{self.name} ", *[f"Attributes: {self.shape[1]}", f"Samples: {self.shape[0]}", f"Classes: {classes.shape[0]}", f"IR: {self.ir}"])
def __get_header(self):
"""Get the header of a keel dataset format.
"""
header = f"@relation {self.name}\n"
attributes = []
for attr in self.attributes:
attr_type = "real" if attr.type == KeelAttribute.TYPE_REAL else "integer" if attr.type == KeelAttribute.TYPE_INTEGER else ''
if len(attr_type) > 0:
attributes.append(f"@attribute {attr.name} {attr_type} [{attr.range[0]}, {attr.range[1]}]")
else:
attributes.append("@attribute " + attr.name + " {" + (", ").join(list(attr.range)) + "}")
header += "\n".join(attributes)
header += "\n"
header += f"@inputs {(', ').join([attr.name for attr in self.inputs])}\n"
header += f"@outputs {(', ').join([attr.name for attr in self.outputs])}\n"
header += "@data\n"
return header
def save(self, path):
"""Export the data on keel dataset format.
Parameters
----------
path : str
The filepath to save the dataset.
"""
with open(path, 'w') as f:
# Write header of database
f.write(self.__get_header())
# Write data of database
data = list(map(list, zip(*self.data)))
data = '\n'.join(map(', '.join, map(lambda x: map(str, x), data)))
f.write(data)
def load_keel_file(path):
"""Load a keel dataset format.
Parameters
----------
path : str
The filepath of the keel dataset format.
Returns
-------
keel_dataset: KeelDataset
The keel dataset format loaded.
"""
handle = open(path)
try:
line = handle.readline().strip()
header_parts = line.split()
if header_parts[0] != "@relation" or len(header_parts) != 2:
raise SyntaxError("This is not a valid keel database.")
# Get database name
relation_name = header_parts[1]
# Get attributes
line = handle.readline().strip()
attrs = []
lkp = {}
while line.startswith("@attribute"):
# Get attribute name
attr_name = line.split(" ")[1]
# Get attribute type
match = re.findall(r"\s([a-z]+)\s{0,1}\[", line)
if len(match) > 0:
attr_type = match[0]
else:
attr_type = "nominal"
# Get values range
if attr_type != "nominal":
match = re.findall(r"\[(.*?)\]", line)
attr_builder = float if attr_type == "real" else int
attr_range = tuple(map(attr_builder, match[0].split(",")))
else:
match = re.findall(r"\{(.*?)\}", line)
attr_builder = str
attr_range = tuple(match[0].replace(" ", "").split(","))
keel_attribute = KeelAttribute(attr_name, attr_type, attr_range, attr_builder)
attrs.append(keel_attribute)
lkp[attr_name] = keel_attribute
line = handle.readline().strip()
# Get inputs
if not line.startswith("@input"):
raise SyntaxError("Expected @input or @inputs. " + line)
inputs_parts = line.split(maxsplit=1)
inputs_name = inputs_parts[1].replace(" ", "").split(",")
inputs = [lkp[name] for name in inputs_name]
# Get output
line = handle.readline().strip()
if not line.startswith("@output"):
raise SyntaxError("Expected @outputs or @outputs. " + line)
output_parts = line.split(maxsplit=1)
output_name = output_parts[1].replace(" ", "").split(",")
outputs = [lkp[name] for name in output_name]
# Get data
line = handle.readline().strip()
if line != "@data":
raise SyntaxError("Expected @data.")
data = [[] for _ in range(len(attrs))]
for data_line in handle:
if data_line:
data_values = data_line.strip().replace(" ", "").split(',')
for lst, value, attr in zip(data, data_values, attrs):
v = value
v = v if v == KeelDataSet.UNKNOWN else attr.builder(v)
lst.append(v)
return KeelDataSet(relation_name, attrs, data, inputs, outputs)
finally:
if path:
handle.close()
| 33.074257
| 163
| 0.553959
| 3,503
| 0.524323
| 0
| 0
| 0
| 0
| 0
| 0
| 1,813
| 0.271367
|
b14084e431f80764a4ba711f2600b59b246111f5
| 830
|
py
|
Python
|
ex44e.py
|
liggettla/python
|
4bdad72bc2143679be6d1f8722b83cc359753ca9
|
[
"MIT"
] | null | null | null |
ex44e.py
|
liggettla/python
|
4bdad72bc2143679be6d1f8722b83cc359753ca9
|
[
"MIT"
] | null | null | null |
ex44e.py
|
liggettla/python
|
4bdad72bc2143679be6d1f8722b83cc359753ca9
|
[
"MIT"
] | null | null | null |
#Rather than rely on inplicit inheritance from other classes, classes can just
#call the functions from a class; termed composition
class Other(object):
def override(self):
print "OTHER override()"
def implicit(self):
print "OTHER implicit()"
def altered(self):
print "OTHER altered()"
class Child(object):
def __init__(self):
#Here the Child uses Other() to get its work done
#Rather than just using implicit inheritance
self.other = Other()
def implicit(self):
self.other.implicit()
def override(self):
print "CHILD override()"
def altered(self):
print "CHILD, BEFORE OTHER altered()"
self.other.altered()
print "CHILD, AFTER OTHER altered()"
son = Child()
son.implicit()
son.override()
son.altered()
| 21.842105
| 78
| 0.639759
| 634
| 0.763855
| 0
| 0
| 0
| 0
| 0
| 0
| 355
| 0.427711
|
b14119e47e0e47d908eda6baf79a8ccfb87c16a5
| 2,333
|
py
|
Python
|
tools/create_doc.py
|
nbigaouette/gitlab-api-rs
|
e84c871ad6f852072a373cd950ede546525913eb
|
[
"Apache-2.0",
"MIT"
] | 11
|
2017-01-22T18:12:57.000Z
|
2021-02-15T21:14:34.000Z
|
tools/create_doc.py
|
nbigaouette/gitlab-api-rs
|
e84c871ad6f852072a373cd950ede546525913eb
|
[
"Apache-2.0",
"MIT"
] | 16
|
2016-12-05T22:09:27.000Z
|
2021-12-25T14:56:43.000Z
|
tools/create_doc.py
|
nbigaouette/gitlab-api-rs
|
e84c871ad6f852072a373cd950ede546525913eb
|
[
"Apache-2.0",
"MIT"
] | 3
|
2017-01-25T19:30:52.000Z
|
2018-01-24T09:08:07.000Z
|
#!/usr/bin/env python3
import os
import re
import sys
import urllib.request
# api_filename = "projects.md"
api_filename = "groups.md"
url = "https://gitlab.com/gitlab-org/gitlab-ce/raw/master/doc/api/" + api_filename
doc_dir = "doc_tmp"
if not os.path.exists(doc_dir):
os.makedirs(doc_dir)
filename, headers = urllib.request.urlretrieve(url)
with open(filename, 'r') as f:
markdown = f.read()
# print("markdown:", markdown)
urllib.request.urlcleanup()
# Strip out all `json` code blocks included in the file.
p = re.compile("```json.*?```", re.MULTILINE | re.DOTALL)
markdown_wo_json = re.sub(p, "", markdown)
GET_block = "GET /"
p_GET_block = re.compile("```\n(%s.*?)\n```" % GET_block, re.MULTILINE | re.DOTALL)
p_GET_variable = re.compile("(:[^/]*)")
sectionsList = re.sub("[^#]#", "TOSPLIT#", markdown_wo_json).split("TOSPLIT")
for section in sectionsList:
if GET_block in section:
lines = section.splitlines()
title = lines[0].replace("#", "").strip()
# print("title:", title)
# section = re.sub(p_GET_block, "```\n```")
m = p_GET_block.search(section)
GET_command = m.group(1)
GET_variables = p_GET_variable.findall(GET_command)
# Sort the variables in decreasing order of _length_. The reason is that a replace of a shorter
# variable might catch a longer one and corrupt the final result.
GET_variables.sort(key = lambda s: -len(s))
# Replace occurrences of the found variables with upper case, removing the ":"
new_GET_command = GET_command
for GET_variable in GET_variables:
new_GET_command = new_GET_command.replace(GET_variable, GET_variable.replace(":", "").upper())
# section = section.replace(GET_command, new_GET_command)
lines = [line.replace(GET_command, new_GET_command) for line in lines]
# print("title:", title)
filename = api_filename.replace(".md", "") + "-GET-" + title.replace(" ", "-").lower() + ".md"
print("filename:", filename)
full_filename = os.path.join(doc_dir, filename)
with open(full_filename, "w") as f:
f.write("//! %s\n" % title)
f.write("//!\n")
f.write("//! # %s\n" % title)
for line in lines[1:]:
f.write("//! %s\n" % line)
| 33.328571
| 106
| 0.624946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 775
| 0.33219
|
b14315cacfc7adb3442f4613fdef5630de51a32c
| 997
|
py
|
Python
|
samples/butia/sumo_crono/push_mouse_event.py
|
RodPy/Turtlebots.activity
|
f885d7d2e5d710c01294ae60da995dfb0eb36b21
|
[
"MIT"
] | null | null | null |
samples/butia/sumo_crono/push_mouse_event.py
|
RodPy/Turtlebots.activity
|
f885d7d2e5d710c01294ae60da995dfb0eb36b21
|
[
"MIT"
] | null | null | null |
samples/butia/sumo_crono/push_mouse_event.py
|
RodPy/Turtlebots.activity
|
f885d7d2e5d710c01294ae60da995dfb0eb36b21
|
[
"MIT"
] | 1
|
2020-06-17T15:44:16.000Z
|
2020-06-17T15:44:16.000Z
|
#Copyright (c) 2009-11, Walter Bender, Tony Forster
# This procedure is invoked when the user-definable block on the
# "extras" palette is selected.
# Usage: Import this code into a Python (user-definable) block; when
# this code is run, the current mouse status will be pushed to the
# FILO heap. If a mouse button event occurs, a y, x, and 1 are pushed
# to the heap. If no button is pressed, 0 is pushed to the heap.
# To use these data, pop the heap in a compare block to determine if a
# button has been pushed. If a 1 was popped from the heap, pop the x
# and y coordinates.
def myblock(tw, x): # ignore second argument
''' Push mouse event to stack '''
if tw.mouse_flag == 1:
# push y first so x will be popped first
tw.lc.heap.append((tw.canvas.height / 2) - tw.mouse_y)
tw.lc.heap.append(tw.mouse_x - (tw.canvas.width / 2))
tw.lc.heap.append(1) # mouse event
tw.mouse_flag = 0
else:
tw.lc.heap.append(0) # no mouse event
| 36.925926
| 70
| 0.675025
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 697
| 0.699097
|
b144174f87f4c7e89faeb2a0f3dc32dfe6c660fe
| 2,593
|
py
|
Python
|
espn_api/hockey/constant.py
|
samthom1/espn-api
|
6f3f5915a65f1f7e17778d3a5d3f1121e8c7d5fe
|
[
"MIT"
] | null | null | null |
espn_api/hockey/constant.py
|
samthom1/espn-api
|
6f3f5915a65f1f7e17778d3a5d3f1121e8c7d5fe
|
[
"MIT"
] | null | null | null |
espn_api/hockey/constant.py
|
samthom1/espn-api
|
6f3f5915a65f1f7e17778d3a5d3f1121e8c7d5fe
|
[
"MIT"
] | null | null | null |
#Constants
POSITION_MAP = {
# Remaining: F, IR, Util
0 : '0' # IR?
, 1 : 'Center'
, 2 : 'Left Wing'
, 3 : 'Right Wing'
, 4 : 'Defense'
, 5 : 'Goalie'
, 6 : '6' # Forward ?
, 7 : '7' # Goalie, F (Goalie Bench?)
, 8 : '8' # Goalie, F
, 'Center': 1
, 'Left Wing' : 2
, 'Right Wing' : 3
, 'Defense' : 4
, 'Goalie' : 5
}
STATS_IDENTIFIER = {
'00': 'Total',
'01': 'Last 7',
'02': 'Last 15',
'03': 'Last 30',
'10': 'Projected',
'20': '20'
}
PRO_TEAM_MAP = {
1: 'Boston Bruins'
, 2: 'Buffalo Sabres'
, 3: 'Calgary Flames'
, 4: 'Chicago Blackhawks'
, 5: 'Detroit Red Wings'
, 6: 'Edmonton Oilers'
, 7: 'Carolina Hurricanes'
, 8: 'Los Angeles Kings'
, 9: 'Dallas Stars'
, 10: 'Montréal Canadiens'
, 11: 'New Jersey Devils'
, 12: 'New York Islanders'
, 13: 'New York Rangers'
, 14: 'Ottawa Senators'
, 15: 'Philadelphia Flyers'
, 16: 'Pittsburgh Penguins'
, 17: 'Colorado Avalanche'
, 18: 'San Jose Sharks'
, 19: 'St. Louis Blues'
, 20: 'Tampa Bay Lightning'
, 21: 'Toronto Maple Leafs'
, 22: 'Vancouver Canucks'
, 23: 'Washington Capitals'
, 24: 'Arizona Coyotes'
, 25: 'Anaheim Ducks'
, 26: 'Florida Panthers'
, 27: 'Nashville Predators'
, 28: 'Winnipeg Jets'
, 29: 'Columbus Blue Jackets'
, 30: 'Minnesota Wild'
, 37: 'Vegas Golden Knights'
, 124292: 'Seattle Krakens'
}
STATS_MAP = {
'0': 'GS',
'1': 'W',
'2': 'L',
'3': 'SA',
'4': 'GA',
'5': '5',
'6': 'SV',
'7': 'SO',
'8': 'MIN ?',
'9': 'OTL',
'10': 'GAA',
'11': 'SV%',
'12': '12',
'13': 'G',
'14': 'A',
'15': '+/-',
'16': '16',
'17': 'PIM',
'18': 'PPG',
'19': '19',
'20': 'SHG',
'21': 'SHA',
'22': 'GWG',
'23': 'FOW',
'24': 'FOL',
'25': '25',
'26': 'TTOI ?',
'27': 'ATOI',
'28': 'HAT',
'29': 'SOG',
'30': '30',
'31': 'HIT',
'32': 'BLK',
'33': 'DEF',
'34': 'GP',
'35': '35',
'36': '36',
'37': '37',
'38': 'PPP',
'39': 'SHP',
'40': '40',
'41': '41',
'42': '42',
'43': '43',
'44': '44',
'45': '45',
'99': '99'
}
ACTIVITY_MAP = {
178: 'FA ADDED',
180: 'WAIVER ADDED',
179: 'DROPPED',
181: 'DROPPED',
239: 'DROPPED',
244: 'TRADED',
'FA': 178,
'WAIVER': 180,
'TRADED': 244
}
| 20.744
| 42
| 0.415349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,335
| 0.514649
|
b1445f82594bc253e4a47533cb5834aed7b2e1e1
| 649
|
py
|
Python
|
dataval/conftest.py
|
weishengtoh/machinelearning_assignment
|
2099377faf0b1086cb3c496eecd3b0ae533a90f2
|
[
"Apache-2.0"
] | null | null | null |
dataval/conftest.py
|
weishengtoh/machinelearning_assignment
|
2099377faf0b1086cb3c496eecd3b0ae533a90f2
|
[
"Apache-2.0"
] | null | null | null |
dataval/conftest.py
|
weishengtoh/machinelearning_assignment
|
2099377faf0b1086cb3c496eecd3b0ae533a90f2
|
[
"Apache-2.0"
] | null | null | null |
import os
import pandas as pd
import pytest
import yaml
import wandb
run = wandb.init(project='RP_NVIDIA_Machine_Learning',
job_type='data_validation')
@pytest.fixture(scope='session')
def data():
config_path = os.path.join(os.pardir, 'configs')
with open(os.path.join(config_path, 'dataval_config.yaml'), 'r') as file:
config_name = yaml.safe_load(file)
data_artifact = config_name['parameters']['artifact_name']
if data_artifact is None:
pytest.fail('missing --data_artifact argument')
data_path = run.use_artifact(data_artifact).file()
df = pd.read_csv(data_path)
return df
| 21.633333
| 77
| 0.694915
| 0
| 0
| 0
| 0
| 474
| 0.730354
| 0
| 0
| 148
| 0.228043
|
b1450ba4c392fda6a05914dd0e6efe6138ef8c05
| 8,049
|
py
|
Python
|
src/abaqus/Odb/Odb.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | 7
|
2022-01-21T09:15:45.000Z
|
2022-02-15T09:31:58.000Z
|
src/abaqus/Odb/Odb.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
src/abaqus/Odb/Odb.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
from abaqusConstants import *
from .OdbPart import OdbPart
from .OdbStep import OdbStep
from .SectionCategory import SectionCategory
from ..Amplitude.AmplitudeOdb import AmplitudeOdb
from ..BeamSectionProfile.BeamSectionProfileOdb import BeamSectionProfileOdb
from ..Filter.FilterOdb import FilterOdb
from ..Material.MaterialOdb import MaterialOdb
class Odb(AmplitudeOdb,
FilterOdb,
MaterialOdb,
BeamSectionProfileOdb):
"""The Odb object is the in-memory representation of an output database (ODB) file.
Attributes
----------
isReadOnly: Boolean
A Boolean specifying whether the output database was opened with read-only access.
amplitudes: dict[str, Amplitude]
A repository of :py:class:`~abaqus.Amplitude.Amplitude.Amplitude` objects.
filters: dict[str, Filter]
A repository of :py:class:`~abaqus.Filter.Filter.Filter` objects.
rootAssembly: OdbAssembly
An :py:class:`~abaqus.Odb.OdbAssembly.OdbAssembly` object.
jobData: JobData
A :py:class:`~abaqus.Odb.JobData.JobData` object.
parts: dict[str, OdbPart]
A repository of :py:class:`~abaqus.Odb.OdbPart.OdbPart` objects.
materials: dict[str, Material]
A repository of :py:class:`~abaqus.Material.Material.Material` objects.
steps: dict[str, OdbStep]
A repository of :py:class:`~abaqus.Odb.OdbStep.OdbStep` objects.
sections: dict[str, Section]
A repository of :py:class:`~abaqus.Section.Section.Section` objects.
sectionCategories: dict[str, SectionCategory]
A repository of :py:class:`~abaqus.Odb.SectionCategory.SectionCategory` objects.
sectorDefinition: SectorDefinition
A :py:class:`~abaqus.Odb.SectorDefinition.SectorDefinition` object.
userData: UserData
A :py:class:`~abaqus.Odb.UserData.UserData` object.
customData: RepositorySupport
A :py:class:`~abaqus.CustomKernel.RepositorySupport.RepositorySupport` object.
profiles: dict[str, Profile]
A repository of :py:class:`~abaqus.BeamSectionProfile.Profile.Profile` objects.
Notes
-----
This object can be accessed by:
.. code-block:: python
import odbAccess
session.odbs[name]
"""
def Part(self, name: str, embeddedSpace: SymbolicConstant, type: SymbolicConstant) -> OdbPart:
"""This method creates an OdbPart object. Nodes and elements are added to this object at a
later stage.
Notes
-----
This function can be accessed by:
.. code-block:: python
session.odbs[name].Part
Parameters
----------
name
A String specifying the part name.
embeddedSpace
A SymbolicConstant specifying the dimensionality of the Part object. Possible values are
THREE_D, TWO_D_PLANAR, and AXISYMMETRIC.
type
A SymbolicConstant specifying the type of the Part object. Possible values are
DEFORMABLE_BODY and ANALYTIC_RIGID_SURFACE.
Returns
-------
An OdbPart object.
"""
self.parts[name] = odbPart = OdbPart(name, embeddedSpace, type)
return odbPart
def Step(self, name: str, description: str, domain: SymbolicConstant, timePeriod: float = 0,
previousStepName: str = '', procedure: str = '', totalTime: float = None) -> OdbStep:
"""This method creates an OdbStep object.
Notes
-----
This function can be accessed by:
.. code-block:: python
session.odbs[name].Step
Parameters
----------
name
A String specifying the repository key.
description
A String specifying the step description.
domain
A SymbolicConstant specifying the domain of the step. Possible values are TIME,
FREQUENCY, ARC_LENGTH, and MODAL.The type of OdbFrame object that can be created for
this step is based on the value of the *domain* argument.
timePeriod
A Float specifying the time period of the step. *timePeriod* is required if
*domain*=TIME; otherwise, this argument is not applicable. The default value is 0.0.
previousStepName
A String specifying the preceding step. If *previousStepName* is the empty string, the
last step in the repository is used. If *previousStepName* is not the last step, this
will result in a change to the *previousStepName* member of the step that was in that
position. A special value 'Initial' refers to the internal initial model step and may be
used exclusively for inserting a new step at the first position before any other
existing steps. The default value is an empty string.
procedure
A String specifying the step procedure. The default value is an empty string. The
following is the list of valid procedures:
```
*ANNEAL
*BUCKLE
*COMPLEX FREQUENCY
*COUPLED TEMPERATURE-DISPLACEMENT
*COUPLED TEMPERATURE-DISPLACEMENT, CETOL
*COUPLED TEMPERATURE-DISPLACEMENT, STEADY STATE
*COUPLED THERMAL-ELECTRICAL, STEADY STATE
*COUPLED THERMAL-ELECTRICAL
*COUPLED THERMAL-ELECTRICAL, DELTMX
*DYNAMIC
*DYNAMIC, DIRECT
*DYNAMIC, EXPLICIT
*DYNAMIC, SUBSPACE
*DYNAMIC TEMPERATURE-DISPLACEMENT, EXPLICT
*ELECTROMAGNETIC, HIGH FREQUENCY, TIME HARMONIC
*ELECTROMAGNETIC, LOW FREQUENCY, TIME DOMAIN
*ELECTROMAGNETIC, LOW FREQUENCY, TIME DOMAIN, DIRECT
*ELECTROMAGNETIC, LOW FREQUENCY, TIME HARMONIC
*FREQUENCY
*GEOSTATIC
*HEAT TRANSFER
*HEAT TRANSFER, DELTAMX=__
*HEAT TRANSFER, STEADY STATE
*MAGNETOSTATIC
*MAGNETOSTATIC, DIRECT
*MASS DIFFUSION
*MASS DIFFUSION, DCMAX=
*MASS DIFFUSION, STEADY STATE
*MODAL DYNAMIC
*RANDOM RESPONSE
*RESPONSE SPECTRUM
*SOILS
*SOILS, CETOL/UTOL
*SOILS, CONSOLIDATION
*SOILS, CONSOLIDATION, CETOL/UTOL
*STATIC
*STATIC, DIRECT
*STATIC, RIKS
*STEADY STATE DYNAMICS
*STEADY STATE TRANSPORT
*STEADY STATE TRANSPORT, DIRECT
*STEP PERTURBATION, *STATIC
*SUBSTRUCTURE GENERATE
*USA ADDDED MASS GENERATION
*VISCO
```
totalTime
A Float specifying the analysis time spend in all the steps previous to this step. The
default value is −1.0.
Returns
-------
An OdbStep object.
Raises
------
- If *previousStepName* is invalid:
ValueError: previousStepName is invalid
"""
self.steps[name] = odbStep = OdbStep(name, description, domain, timePeriod, previousStepName, procedure,
totalTime)
return odbStep
def SectionCategory(self, name: str, description: str) -> SectionCategory:
"""This method creates a SectionCategory object.
Notes
-----
This function can be accessed by:
.. code-block:: python
session.odbs[*name*].SectionCategory
Parameters
----------
name
A String specifying the name of the category.
description
A String specifying the description of the category.
Returns
-------
A SectionCategory object.
"""
self.sectionCategories[name] = sectionCategory = SectionCategory(name, description)
return sectionCategory
| 37.966981
| 112
| 0.610262
| 7,700
| 0.956403
| 0
| 0
| 0
| 0
| 0
| 0
| 6,784
| 0.842628
|
b1483e23d7d2752b7248ed2d54d8ac8e55492604
| 241
|
py
|
Python
|
popcorn_gallery/tutorials/urls.py
|
Koenkk/popcorn_maker
|
0978b9f98dacd4e8eb753404b24eb584f410aa11
|
[
"BSD-3-Clause"
] | 15
|
2015-03-23T02:55:20.000Z
|
2021-01-12T12:42:30.000Z
|
popcorn_gallery/tutorials/urls.py
|
Koenkk/popcorn_maker
|
0978b9f98dacd4e8eb753404b24eb584f410aa11
|
[
"BSD-3-Clause"
] | null | null | null |
popcorn_gallery/tutorials/urls.py
|
Koenkk/popcorn_maker
|
0978b9f98dacd4e8eb753404b24eb584f410aa11
|
[
"BSD-3-Clause"
] | 16
|
2015-02-18T21:43:31.000Z
|
2021-11-09T22:50:03.000Z
|
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns(
'popcorn_gallery.tutorials.views',
url(r'^(?P<slug>[\w-]+)/$', 'object_detail', name='object_detail'),
url(r'^$', 'object_list', name='object_list'),
)
| 30.125
| 71
| 0.66805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 116
| 0.481328
|
b1485dd7aa764623468a3437193c8ab420612082
| 3,738
|
py
|
Python
|
tests/characterisation/test_kelvin_models.py
|
pauliacomi/adsutils
|
062653b38924d419d1235edf7909078ff98a163f
|
[
"MIT"
] | 35
|
2018-01-24T14:59:08.000Z
|
2022-03-10T02:47:58.000Z
|
tests/characterisation/test_kelvin_models.py
|
pauliacomi/adsutils
|
062653b38924d419d1235edf7909078ff98a163f
|
[
"MIT"
] | 29
|
2018-01-06T12:08:08.000Z
|
2022-03-11T20:26:53.000Z
|
tests/characterisation/test_kelvin_models.py
|
pauliacomi/adsutils
|
062653b38924d419d1235edf7909078ff98a163f
|
[
"MIT"
] | 20
|
2019-06-12T19:20:29.000Z
|
2022-03-02T09:57:02.000Z
|
"""
This test module has tests relating to kelvin model validations.
All functions in /calculations/models_kelvin.py are tested here.
The purposes are:
- testing the meniscus shape determination function
- testing the output of the kelvin equations
- testing that the "function getter" is performing as expected.
The kelvin functions are tested against pre-calculated values
at several points.
"""
import numpy
import pytest
import pygaps.characterisation.models_kelvin as km
import pygaps.utilities.exceptions as pgEx
@pytest.mark.characterisation
class TestKelvinModels():
"""Test the kelvin models."""
@pytest.mark.parametrize(
'branch, pore, geometry', [
('ads', 'slit', 'hemicylindrical'),
('ads', 'cylinder', 'cylindrical'),
('ads', 'sphere', 'hemispherical'),
('des', 'slit', 'hemicylindrical'),
('des', 'cylinder', 'hemispherical'),
('des', 'sphere', 'hemispherical'),
]
)
def test_meniscus_geometry(self, branch, pore, geometry):
"""Test the meniscus geometry function."""
assert km.get_meniscus_geometry(branch, pore) == geometry
@pytest.mark.parametrize(
'model, pressure', [
(km._KELVIN_MODELS['Kelvin'], [0.1, 0.4, 0.9]),
]
)
@pytest.mark.parametrize(
'geometry, c_radius', [
('cylindrical', [0.208, 0.522, 4.539]),
('hemispherical', [0.415, 1.044, 9.078]),
('hemicylindrical', [0.831, 2.090, 18.180]),
]
)
def test_kelvin_model(
self, model, geometry, pressure, c_radius, basic_adsorbate
):
"""Test each model against pre-calculated values for N2 at 77K."""
temperature = 77.355
pressure = [0.1, 0.4, 0.9]
for index, value in enumerate(pressure):
radius = model(
value, geometry, temperature,
basic_adsorbate.liquid_density(temperature),
basic_adsorbate.molar_mass(),
basic_adsorbate.surface_tension(temperature)
)
assert numpy.isclose(radius, c_radius[index], 0.01, 0.01)
def test_kelvin_kjs_model(self, basic_adsorbate):
"""Test Kelvin KJS model against pre-calculated values for N2 at 77K."""
temperature = 77.355
pressure = [0.1, 0.4, 0.9]
c_radius = [0.715, 1.344, 9.378]
model = km._KELVIN_MODELS['Kelvin-KJS']
geometry = 'cylindrical'
for index, value in enumerate(pressure):
radius = model(
value, geometry, temperature,
basic_adsorbate.liquid_density(temperature),
basic_adsorbate.molar_mass(),
basic_adsorbate.surface_tension(temperature)
)
assert numpy.isclose(radius, c_radius[index], 0.01, 0.01)
# Now check for excluding other models
geometry = 'hemispherical'
with pytest.raises(pgEx.ParameterError):
radius = model(
value, geometry, temperature,
basic_adsorbate.liquid_density(temperature),
basic_adsorbate.molar_mass(),
basic_adsorbate.surface_tension(temperature)
)
def test_get_kelvin_error(self):
"""When the model requested is not found we raise."""
with pytest.raises(pgEx.ParameterError):
km.get_kelvin_model('bad_model')
def test_get_kelvin_callable(self):
"""When we pass a function and dict, we receive a partial back."""
def call_this(addendum):
return 'called' + addendum
ret = km.get_kelvin_model(call_this, addendum='add')
assert ret() == 'calledadd'
| 35.264151
| 80
| 0.607277
| 3,169
| 0.84778
| 0
| 0
| 3,199
| 0.855805
| 0
| 0
| 1,137
| 0.304173
|
b1491744c42a7da1be2a17f6cb231604a6c7385b
| 2,231
|
py
|
Python
|
packages/jet_bridge/jet_bridge/__main__.py
|
bokal2/jet-bridge
|
dddc4f55c2d5a28c02ce9515dffc750e3887450f
|
[
"MIT"
] | 1
|
2020-02-06T01:07:44.000Z
|
2020-02-06T01:07:44.000Z
|
packages/jet_bridge/jet_bridge/__main__.py
|
bokal2/jet-bridge
|
dddc4f55c2d5a28c02ce9515dffc750e3887450f
|
[
"MIT"
] | null | null | null |
packages/jet_bridge/jet_bridge/__main__.py
|
bokal2/jet-bridge
|
dddc4f55c2d5a28c02ce9515dffc750e3887450f
|
[
"MIT"
] | null | null | null |
import os
from datetime import datetime
import sys
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from jet_bridge_base import configuration
from jet_bridge.configuration import JetBridgeConfiguration
conf = JetBridgeConfiguration()
configuration.set_configuration(conf)
from jet_bridge_base.commands.check_token import check_token_command
from jet_bridge_base.db import database_connect
from jet_bridge_base.logger import logger
from jet_bridge import settings, VERSION
from jet_bridge.settings import missing_options, required_options_without_default
def main():
args = sys.argv[1:]
if 'ARGS' in os.environ:
args = os.environ['ARGS'].split(' ')
logger.info(datetime.now().strftime('%B %d, %Y - %H:%M:%S %Z'))
logger.info('Jet Bridge version {}'.format(VERSION))
if (len(args) >= 1 and args[0] == 'config') or missing_options == required_options_without_default:
from jet_bridge.utils.create_config import create_config
create_config(missing_options == required_options_without_default)
return
elif len(missing_options) and len(missing_options) < len(required_options_without_default):
logger.info('Required options are not specified: {}'.format(', '.join(missing_options)))
return
address = 'localhost' if settings.ADDRESS == '0.0.0.0' else settings.ADDRESS
url = 'http://{}:{}/'.format(address, settings.PORT)
api_url = '{}api/'.format(url)
if len(args) >= 1:
if args[0] == 'check_token':
check_token_command(api_url)
return
database_connect()
from jet_bridge.app import make_app
app = make_app()
server = HTTPServer(app)
server.bind(settings.PORT, settings.ADDRESS)
server.start(settings.WORKERS if not settings.DEBUG else 1)
if settings.WORKERS > 1 and settings.DEBUG:
logger.warning('Multiple workers are not supported in DEBUG mode')
logger.info('Starting server at {}'.format(url))
if settings.DEBUG:
logger.warning('Server is running in DEBUG mode')
logger.info('Quit the server with CONTROL-C')
check_token_command(api_url)
IOLoop.current().start()
if __name__ == '__main__':
main()
| 30.561644
| 103
| 0.714926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 319
| 0.142985
|
b14a3e4e999395aab5aac5de3e1df984c03e66f4
| 690
|
py
|
Python
|
casepro/translation.py
|
praekelt/helpdesk
|
69a7242679c30d2f7cb30a433809e738b9756a3c
|
[
"BSD-3-Clause"
] | 5
|
2015-07-21T15:58:31.000Z
|
2019-09-14T22:34:00.000Z
|
casepro/translation.py
|
praekelt/helpdesk
|
69a7242679c30d2f7cb30a433809e738b9756a3c
|
[
"BSD-3-Clause"
] | 197
|
2015-03-24T15:26:04.000Z
|
2017-11-28T19:24:37.000Z
|
casepro/translation.py
|
praekelt/helpdesk
|
69a7242679c30d2f7cb30a433809e738b9756a3c
|
[
"BSD-3-Clause"
] | 10
|
2015-03-24T12:26:36.000Z
|
2017-02-21T13:08:57.000Z
|
from __future__ import unicode_literals
from django.utils.translation import ugettext as _
from django.utils.translation import get_language as _get_language
from modeltranslation.translator import translator, TranslationOptions
from modeltranslation import utils
from nsms.text.models import Text
class TextTranslationOptions(TranslationOptions):
fields = ('text',)
translator.register(Text, TextTranslationOptions)
# need to translate something for django translations to kick in
_("Something to trigger localizations")
# monkey patch a version of get_language that isn't broken
def get_language():
lang = _get_language()
return lang
utils.get_language = get_language
| 27.6
| 70
| 0.815942
| 72
| 0.104348
| 0
| 0
| 0
| 0
| 0
| 0
| 164
| 0.237681
|
b14a72da64d12a7c8066ba502beb5c9606168931
| 147
|
py
|
Python
|
Booleans/4.2.4 If/4.2.5 Fix the problem.py
|
ferrerinicolas/python_samples
|
107cead4fbee30b275a5e2be1257833129ce5e46
|
[
"MIT"
] | null | null | null |
Booleans/4.2.4 If/4.2.5 Fix the problem.py
|
ferrerinicolas/python_samples
|
107cead4fbee30b275a5e2be1257833129ce5e46
|
[
"MIT"
] | null | null | null |
Booleans/4.2.4 If/4.2.5 Fix the problem.py
|
ferrerinicolas/python_samples
|
107cead4fbee30b275a5e2be1257833129ce5e46
|
[
"MIT"
] | null | null | null |
can_juggle = True
# The code below has problems. See if
# you can fix them!
#if can_juggle print("I can juggle!")
#else
print("I can't juggle.")
| 16.333333
| 37
| 0.693878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 0.782313
|
b14c88c3a21671daaf4ca901cbbd386b9d8bf26a
| 703
|
py
|
Python
|
pytools/mpiwrap.py
|
nchristensen/pytools
|
82da2e0aad6863763f1950318bcb933662020135
|
[
"MIT"
] | 52
|
2015-06-23T10:30:24.000Z
|
2021-07-28T20:50:31.000Z
|
pytools/mpiwrap.py
|
nchristensen/pytools
|
82da2e0aad6863763f1950318bcb933662020135
|
[
"MIT"
] | 72
|
2015-10-22T18:57:08.000Z
|
2022-03-01T00:04:45.000Z
|
pytools/mpiwrap.py
|
nchristensen/pytools
|
82da2e0aad6863763f1950318bcb933662020135
|
[
"MIT"
] | 27
|
2015-09-14T07:24:04.000Z
|
2021-12-17T14:31:33.000Z
|
"""See pytools.prefork for this module's reason for being."""
import mpi4py.rc # pylint:disable=import-error
mpi4py.rc.initialize = False
from mpi4py.MPI import * # noqa pylint:disable=wildcard-import,wrong-import-position
import pytools.prefork # pylint:disable=wrong-import-position
pytools.prefork.enable_prefork()
if Is_initialized(): # noqa pylint:disable=undefined-variable
raise RuntimeError("MPI already initialized before MPI wrapper import")
def InitWithAutoFinalize(*args, **kwargs): # noqa
result = Init(*args, **kwargs) # noqa pylint:disable=undefined-variable
import atexit
atexit.register(Finalize) # noqa pylint:disable=undefined-variable
return result
| 33.47619
| 85
| 0.762447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 364
| 0.517781
|
b14cfa3a8ca9bb29e189356b82457936f9e99aff
| 6,096
|
py
|
Python
|
vlcp/service/connection/tcpserver.py
|
geek-plus/vlcp
|
e7936e00929fcef00c04d4da39b67d9679d5f083
|
[
"Apache-2.0"
] | 1
|
2016-09-10T12:09:29.000Z
|
2016-09-10T12:09:29.000Z
|
vlcp/service/connection/tcpserver.py
|
wan-qy/vlcp
|
e7936e00929fcef00c04d4da39b67d9679d5f083
|
[
"Apache-2.0"
] | null | null | null |
vlcp/service/connection/tcpserver.py
|
wan-qy/vlcp
|
e7936e00929fcef00c04d4da39b67d9679d5f083
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on 2015/10/19
:author: hubo
'''
from vlcp.server.module import Module, api
from vlcp.event import TcpServer
from vlcp.event.runnable import RoutineContainer
from vlcp.event.connection import Client
class TcpServerBase(Module):
'''
Generic tcp server on specified URLs, vHosts are supported.
'''
_default_url = 'tcp:///'
_default_connmanage = False
_default_client = False
service = True
def _createprotocol(self, config):
return self._protocolclass()
def _createServers(self, config, vhostname, defaultconfig = {}, key = None, certificate = None, ca_certs = None, exists = {}, client = False):
urls = list(getattr(config, 'urls', []))
if hasattr(config, 'url') and config.url and config.url.strip():
urls.append(config.url.strip())
settings = dict(defaultconfig.items())
if hasattr(config, 'protocol'):
settings.update(config.protocol.items())
key = getattr(config, 'key', key)
certificate = getattr(config, 'certificate', certificate)
ca_certs = getattr(config, 'ca_certs', ca_certs)
client = getattr(config, 'client', client)
if urls:
defaultProtocol = self._createprotocol(config)
if self.connmanage:
# Patch init() and final()
def patch(prococol):
orig_init = prococol.init
def init(conn):
for m in orig_init(conn):
yield m
self.managed_connections.add(conn)
prococol.init = init
orig_final = prococol.final
def final(conn):
try:
self.managed_connections.remove(conn)
except Exception:
pass
for m in orig_final(conn):
yield m
prococol.final = final
patch(defaultProtocol)
defaultProtocol.vhost = vhostname
# Copy extra configurations to protocol
for k,v in settings:
setattr(defaultProtocol, k, v)
for url in urls:
if (vhostname, url) in exists:
exists.remove((vhostname, url))
else:
if client:
self.connections.append(self._client_class(config, defaultProtocol, vhostname)(url, defaultProtocol, self.scheduler,
key, certificate, ca_certs, getattr(config, 'bindaddress', None)))
else:
self.connections.append(self._server_class(config, defaultProtocol, vhostname)(url, defaultProtocol, self.scheduler,
key, certificate, ca_certs))
if hasattr(config, 'vhost'):
for k,v in config.vhost.items():
self._createServers(v, k, settings, key, certificate, ca_certs, exists, client)
def _client_class(self, config, protocol, vhost):
return Client
def _server_class(self, config, protocol, vhost):
return TcpServer
def __init__(self, server, protocolclass):
Module.__init__(self, server)
self._protocolclass = protocolclass
self.apiroutine = RoutineContainer(self.scheduler)
self.managed_connections = set()
self._createServers(self, '')
self.createAPI(api(self.getservers),
api(self.stoplisten, self.apiroutine),
api(self.startlisten, self.apiroutine),
api(self.updateconfig, self.apiroutine),
api(self.getconnections))
def unload(self, container, force=False):
if self.connmanage:
self.connections.extend(self.managed_connections)
self.managed_connections.clear()
for m in Module.unload(self, container, force=force):
yield m
def getservers(self, vhost = None):
'''
Return current servers
:param vhost: return only servers of vhost if specified. '' to return only default servers.
None for all servers.
'''
if vhost is not None:
return [s for s in self.connections if s.protocol.vhost == vhost]
else:
return list(self.connections)
def stoplisten(self, vhost = None):
'''
Stop listen on current servers
:param vhost: return only servers of vhost if specified. '' to return only default servers.
None for all servers.
'''
servers = self.getservers(vhost)
for s in servers:
for m in s.stoplisten():
yield m
self.apiroutine.retvalue = len(servers)
def startlisten(self, vhost = None):
'''
Start listen on current servers
:param vhost: return only servers of vhost if specified. '' to return only default servers.
None for all servers.
'''
servers = self.getservers(vhost)
for s in servers:
for m in s.startlisten():
yield m
self.apiroutine.retvalue = len(servers)
def updateconfig(self):
"Reload configurations, remove non-exist servers, add new servers, and leave others unchanged"
exists = {}
for s in self.connections:
exists[(s.protocol.vhost, s.rawurl)] = s
self._createServers(self, '', exists = exists)
for _,v in exists.items():
for m in v.shutdown():
yield m
self.connections.remove(v)
self.apiroutine.retvalue = None
def getconnections(self, vhost = None):
"Return accepted connections, optionally filtered by vhost"
if vhost is None:
return list(self.managed_connections)
else:
return [c for c in self.managed_connections if c.protocol.vhost == vhost]
| 43.234043
| 146
| 0.564304
| 5,883
| 0.965059
| 4,170
| 0.684055
| 0
| 0
| 0
| 0
| 1,014
| 0.166339
|
b14d75f54839eba4678025c29ab6853f284addcb
| 1,571
|
py
|
Python
|
make/requirements.py
|
Fizzadar/Kanmail
|
3915b1056949b50410478d1519b9276d64ef4f5d
|
[
"OpenSSL"
] | 12
|
2019-02-10T21:18:53.000Z
|
2020-02-17T07:40:48.000Z
|
make/requirements.py
|
Fizzadar/Kanmail
|
3915b1056949b50410478d1519b9276d64ef4f5d
|
[
"OpenSSL"
] | 71
|
2017-11-17T07:13:02.000Z
|
2020-04-03T15:25:43.000Z
|
make/requirements.py
|
Fizzadar/Kanmail
|
3915b1056949b50410478d1519b9276d64ef4f5d
|
[
"OpenSSL"
] | 1
|
2020-02-15T03:16:13.000Z
|
2020-02-15T03:16:13.000Z
|
from distutils.spawn import find_executable
from os import path
import click
from .settings import (
BASE_DEVELOPMENT_REQUIREMENTS_FILENAME,
BASE_REQUIREMENTS_FILENAME,
DEVELOPMENT_REQUIREMENTS_FILENAME,
REQUIREMENTS_FILENAME,
)
from .util import print_and_run
def _ensure_pip_tools_installed():
if not find_executable('pip-sync'):
click.echo('Installing pip-tools')
print_and_run(('pip', 'install', 'pip-tools'))
@click.group()
def cli():
pass
@cli.command()
@click.option('--dev', is_flag=True, default=False)
def install(dev):
_ensure_pip_tools_installed()
requirements_file = (
DEVELOPMENT_REQUIREMENTS_FILENAME
if dev
else REQUIREMENTS_FILENAME
)
print_and_run(('pip-sync', requirements_file))
click.echo('Requirements setup complete!')
@cli.command()
def update():
_ensure_pip_tools_installed()
print_and_run((
'pip-compile',
'-q',
f'--output-file={path.relpath(REQUIREMENTS_FILENAME)}',
path.relpath(BASE_REQUIREMENTS_FILENAME),
))
click.echo(f'Requiremnts file updated: {path.relpath(REQUIREMENTS_FILENAME)}')
@cli.command()
def update_dev():
_ensure_pip_tools_installed()
print_and_run((
'pip-compile',
'-q',
f'--output-file={path.relpath(DEVELOPMENT_REQUIREMENTS_FILENAME)}',
path.relpath(BASE_DEVELOPMENT_REQUIREMENTS_FILENAME),
))
click.echo(f'Development requirements file updated: {DEVELOPMENT_REQUIREMENTS_FILENAME}')
if __name__ == '__main__':
cli()
| 21.819444
| 93
| 0.695099
| 0
| 0
| 0
| 0
| 1,066
| 0.678549
| 0
| 0
| 411
| 0.261617
|
b14f875123a59ce6fa0837c5ecb49e829cede9cf
| 1,135
|
py
|
Python
|
integration/python/src/helper/hosts.py
|
ArpitShukla007/planetmint
|
4b1e215e0059e26c0cee6778c638306021b47bdd
|
[
"Apache-2.0"
] | 3
|
2022-01-19T13:39:52.000Z
|
2022-01-28T05:57:08.000Z
|
integration/python/src/helper/hosts.py
|
ArpitShukla007/planetmint
|
4b1e215e0059e26c0cee6778c638306021b47bdd
|
[
"Apache-2.0"
] | 67
|
2022-01-13T22:42:17.000Z
|
2022-03-31T14:18:26.000Z
|
integration/python/src/helper/hosts.py
|
ArpitShukla007/planetmint
|
4b1e215e0059e26c0cee6778c638306021b47bdd
|
[
"Apache-2.0"
] | 7
|
2022-01-13T16:20:54.000Z
|
2022-02-07T11:42:05.000Z
|
# Copyright © 2020 Interplanetary Database Association e.V.,
# Planetmint and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
from typing import List
from planetmint_driver import Planetmint
class Hosts:
hostnames = []
connections = []
def __init__(self, filepath):
self.set_hostnames(filepath=filepath)
self.set_connections()
def set_hostnames(self, filepath) -> None:
with open(filepath) as f:
self.hostnames = f.readlines()
def set_connections(self) -> None:
self.connections = list(map(lambda h: Planetmint(h), self.hostnames))
def get_connection(self, index=0) -> Planetmint:
return self.connections[index]
def get_transactions(self, tx_id) -> List:
return list(map(lambda connection: connection.transactions.retrieve(tx_id), self.connections))
def assert_transaction(self, tx_id) -> None:
txs = self.get_transactions(tx_id)
for tx in txs:
assert txs[0] == tx, \
'Cannot find transaction {}'.format(tx_id)
| 30.675676
| 102
| 0.667841
| 861
| 0.757923
| 0
| 0
| 0
| 0
| 0
| 0
| 229
| 0.201585
|
b151396bf4b33731a5544d5a99c0e63a228fafd2
| 24,737
|
py
|
Python
|
baiduspider/core/__init__.py
|
samuelmao415/BaiduSpider
|
c896201ced6714878ad13867f83d740f303df68b
|
[
"MIT"
] | 1
|
2020-09-19T03:17:08.000Z
|
2020-09-19T03:17:08.000Z
|
baiduspider/core/__init__.py
|
samuelmao415/BaiduSpider
|
c896201ced6714878ad13867f83d740f303df68b
|
[
"MIT"
] | null | null | null |
baiduspider/core/__init__.py
|
samuelmao415/BaiduSpider
|
c896201ced6714878ad13867f83d740f303df68b
|
[
"MIT"
] | null | null | null |
"""BaiduSpider,爬取百度的利器
:Author: Sam Zhang
:Licence: MIT
:GitHub: https://github.com/samzhangjy
:GitLab: https://gitlab.com/samzhangjy
TODO: 完成文档
TODO: 添加更多爬虫
"""
import json
import os
import re
from html import unescape
from pprint import pprint
from urllib.parse import quote, urlparse
import requests
from bs4 import BeautifulSoup
from baiduspider.core._spider import BaseSpider
from baiduspider.core.parser import Parser
from baiduspider.errors import ParseError, UnknownError
__all__ = ['BaiduSpider']
class BaiduSpider(BaseSpider):
def __init__(self) -> None:
"""爬取百度的搜索结果
本类的所有成员方法都遵循下列格式:
{
'results': <一个列表,表示搜索结果,内部的字典会因为不同的成员方法而改变>,
'total': <一个正整数,表示搜索结果的最大页数,可能会因为搜索结果页码的变化而变化,因为百度不提供总共的搜索结果页数>
}
目前支持百度搜索,百度图片,百度知道,百度视频,百度资讯,百度文库,百度经验和百度百科,并且返回的搜索结果无广告。继承自``BaseSpider``。
BaiduSpider.`search_web(self: BaiduSpider, query: str, pn: int = 1) -> dict`: 百度网页搜索
BaiduSpider.`search_pic(self: BaiduSpider, query: str, pn: int = 1) -> dict`: 百度图片搜索
BaiduSpider.`search_zhidao(self: BaiduSpider, query: str, pn: int = 1) -> dict`: 百度知道搜索
BaiduSpider.`search_video(self: BaiduSpider, query: str, pn: int = 1) -> dict`: 百度视频搜索
BaiduSpider.`search_news(self: BaiduSpider, query: str, pn: int = 1) -> dict`: 百度资讯搜索
BaiduSpider.`search_wenku(self: BaiduSpider, query: str, pn: int = 1) -> dict`: 百度文库搜索
BaiduSpider.`search_jingyan(self: BaiduSpider, query: str, pn: int = 1) -> dict`: 百度经验搜索
BaiduSpider.`search_baike(self: BaiduSpider, query: str) -> dict`: 百度百科搜索
"""
super().__init__()
# 爬虫名称(不是请求的,只是用来表识)
self.spider_name = 'BaiduSpider'
# 设置请求头
self.headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
'Referer': 'https://www.baidu.com',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Cookie': 'BAIDUID=BB66E815C068DD2911DB67F3F84E9AA5:FG=1; BIDUPSID=BB66E815C068DD2911DB67F3F84E9AA5; PSTM=1592390872; BD_UPN=123253; BDUSS=RQa2c4eEdKMkIySjJ0dng1ZDBLTDZEbVNHbmpBLU1rcFJkcVViaTM5NUdNaDFmRVFBQUFBJCQAAAAAAAAAAAEAAAAPCkwAZGF5ZGF5dXAwNgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEal9V5GpfVebD; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; BD_HOME=1; delPer=0; BD_CK_SAM=1; PSINO=2; COOKIE_SESSION=99799_0_5_2_8_0_1_0_5_0_0_0_99652_0_3_0_1593609921_0_1593609918%7C9%230_0_1593609918%7C1; H_PS_PSSID=1457_31326_32139_31660_32046_32231_32091_32109_31640; sug=3; sugstore=0; ORIGIN=0; bdime=0; BDRCVFR[feWj1Vr5u3D]=I67x6TjHwwYf0; H_PS_645EC=1375sSQTgv84OSzYM3CN5w5Whp9Oy7MkdGdBcw5umqOIFr%2FeFZO4D952XrS0pC1kVwPI; BDSVRTM=223'
}
self.parser = Parser()
def search_web(self, proxies, query: str, pn: int = 1) -> dict:
"""百度网页搜索
- 简单搜索:
>>> BaiduSpider().search_web('搜索词')
{
'results': [
{
'result': int, 总计搜索结果数,
'type': 'total' # type用来区分不同类别的搜索结果
},
{
'results': [
'str, 相关搜索建议',
'...',
'...',
'...',
...
],
'type': 'related'
},
{
'process': 'str, 算数过程',
'result': 'str, 运算结果',
'type': 'calc'
# 这类搜索结果仅会在搜索词涉及运算时出现,不一定每个搜索结果都会出现的
},
{
'results': [
{
'author': 'str, 新闻来源',
'time': 'str, 新闻发布时间',
'title': 'str, 新闻标题',
'url': 'str, 新闻链接',
'des': 'str, 新闻简介,大部分情况为None'
},
{ ... },
{ ... },
{ ... },
...
],
'type': 'news'
# 这类搜索结果仅会在搜索词有相关新闻时出现,不一定每个搜索结果都会出现的
},
{
'results': [
{
'cover': 'str, 视频封面图片链接',
'origin': 'str, 视频来源',
'length': 'str, 视频时长',
'title': 'str, 视频标题',
'url': 'str, 视频链接'
},
{ ... },
{ ... },
{ ... },
...
],
'type': 'video'
# 这类搜索结果仅会在搜索词有相关视频时出现,不一定每个搜索结果都会出现的
},
{
'result': {
'cover': 'str, 百科封面图片/视频链接',
'cover-type': 'str, 百科封面类别,图片是image,视频是video',
'des': 'str, 百科简介',
'title': 'str, 百科标题',
'url': 'str, 百科链接'
},
'type': 'baike'
# 这类搜索结果仅会在搜索词有相关百科时出现,不一定每个搜索结果都会出现的
},
{
'des': 'str, 搜索结果简介',
'origin': 'str, 搜索结果的来源,可能是域名,也可能是名称',
'time': 'str, 搜索结果的发布时间',
'title': 'str, 搜索结果标题',
'type': 'result', # 正经的搜索结果
'url': 'str, 搜索结果链接'
},
{ ... },
{ ... },
{ ... },
...
],
'total': int, 总计的搜索结果页数,可能会因为当前页数的变化而随之变化
}
- 带页码:
>>> BaiduSpider().search_web('搜索词', pn=2)
{
'results': [ ... ],
'total': ...
}
Args:
query (str): 要爬取的query
pn (int, optional): 爬取的页码. Defaults to 1.
Returns:
dict: 爬取的返回值和搜索结果
"""
error = None
try:
text = quote(query, 'utf-8')
url = 'https://www.baidu.com/s?wd=%s&pn=%d' % (text, (pn - 1) * 10)
print('proxies: ', proxies)
content = self._get_response(url, proxies = proxies)
results = self.parser.parse_web(content)
except Exception as err:
error = err
finally:
self._handle_error(error)
return {
'results': results['results'],
'total': results['pages']
}
def search_pic(self, query: str, pn: int = 1) -> dict:
"""百度图片搜索
- 实例:
>>> BaiduSpider().search_pic('搜索词')
{
'results': [
{
'host': 'str, 图片来源域名',
'title': 'str, 图片标题',
'url': 'str, 图片链接'
},
{ ... },
{ ... },
{ ... },
...
],
'total': int, 搜索结果总计页码,可能会变化
}
- 带页码的搜索:
>>> BaiduSpider().search_pic('搜索词', pn=2)
{
'results': [ ... ],
'total': ...
}
Args:
query (str): 要爬取的query
pn (int, optional): 爬取的页码. Defaults to 1.
Returns:
dict: 爬取的搜索结果
"""
error = None
try:
url = 'http://image.baidu.com/search/flip?tn=baiduimage&word=%s&pn=%d' % (
quote(query), (pn - 1) * 20)
source = requests.get(url, headers=self.headers)
content = source.text
result = self.parser.parse_pic(content)
except Exception as err:
error = err
finally:
self._handle_error(error)
return {
'results': result['results'],
'total': result['pages']
}
def search_zhidao(self, query: str, pn: int = 1) -> dict:
"""百度知道搜索
- 普通搜索:
>>> BaiduSpider().search_zhidao('搜索词')
{
'results': [
{
'count': int, 回答总数,
'date': 'str, 发布日期',
'des': 'str, 简介',
'title': 'str, 标题',
'url': 'str, 链接'
},
{ ... },
{ ... },
{ ... },
...
],
'total': int, 搜索结果最大页数,可能会变化
}
- 带页码的搜索:
>>> BaiduSpider().search_zhidao('搜索词', pn=2) # `pn` !!
{
'results': [ ... ],
'total': ...
}
Args:
query (str): 要搜索的query
pn (int, optional): 搜索结果的页码. Defaults to 1.
Returns:
dict: 搜索结果以及总页码
"""
url = 'https://zhidao.baidu.com/search?pn=%d&tn=ikaslis&word=%s' % (
(pn - 1) * 10, quote(query))
source = requests.get(url, headers=self.headers)
# 转化编码
source.encoding = 'gb2312'
code = source.text
bs = BeautifulSoup(self._minify(code), 'html.parser')
# 所有搜索结果
list_ = bs.find('div', class_='list').findAll('dl')
results = []
for item in list_:
# 屏蔽企业回答
if 'ec-oad' in item['class']:
continue
# 标题
title = item.find('dt').text
# 链接
url = item.find('dt').find('a')['href']
# 简介
des = item.find('dd').text.strip('答:')
tmp = item.find('dd', class_='explain').findAll(
'span', class_='mr-8')
# 发布日期
date = tmp[0].text
# 回答总数
count = int(str(tmp[-1].text).strip('个回答'))
# 生成结果
result = {
'title': title,
'des': des,
'date': date,
'count': count,
'url': url
}
results.append(result) # 加入结果
# 获取分页
wrap = bs.find('div', class_='pager')
pages_ = wrap.findAll('a')[:-2]
pages = []
for _ in pages_:
# 暴力
try:
pages.append(int(_.text))
except ValueError:
pass
return {
'results': results,
# 取最大页码
'total': max(pages)
}
def search_video(self, query: str, pn: int = 1) -> dict:
"""百度视频搜索
- 普通搜索:
>>> BaiduSpider().search_video('搜索词')
{
'results': [
{
'img': 'str, 视频封面图片链接',
'time': 'str, 视频时长',
'title': 'str, 视频标题',
'url': 'str, 视频链接'
},
{ ... },
{ ... },
{ ... },
...
'total': int, 搜索结果最大页数,可能因搜索页数改变而改变
}
- 带页码:
>>> BaiduSpider().search_video('搜索词', pn=2) # <=== `pn`
{
'results': [ ... ],
'total': ...
}
Args:
query (str): 要搜索的query
pn (int, optional): 搜索结果的页码. Defaults to 1.
Returns:
dict: 搜索结果及总页码
"""
url = 'http://v.baidu.com/v?no_al=1&word=%s&pn=%d' % (
quote(query), (60 if pn == 2 else (pn - 1) * 20))
# 获取源码
source = requests.get(url, headers=self.headers)
code = self._minify(source.text)
bs = BeautifulSoup(code, 'html.parser')
# 锁定结果div
data = bs.findAll('li', class_='result')
results = []
for res in data:
# 标题
title = res.find('a')['title']
# 链接
url = 'https://v.baidu.com' + res.find('a')['href']
# 封面图片链接
img = res.find('img', class_='img-normal-layer')['src']
# 时长
time = res.find('span', class_='info').text
# 生成结果
result = {
'title': title,
'url': url,
'img': img,
'time': time
}
results.append(result) # 加入结果
# 分页
wrap = bs.find('div', class_='page-wrap')
pages_ = wrap.findAll('a', class_='filter-item')[:-1]
pages = []
for _ in pages_:
pages.append(int(_.text))
return {
'results': results,
# 获取最大值
'total': max(pages)
}
def search_news(self, query: str, pn: int = 1) -> dict:
"""百度资讯搜索
- 获取资讯搜索结果:
>>> BaiduSpider().search_news('搜索词')
{
'results': [
{
'author': 'str, 资讯来源(作者)',
'date': 'str, 资讯发布时间',
'des': 'str, 资讯简介',
'title': 'str, 资讯标题',
'url': 'str, 资讯链接'
},
{ ... },
{ ... },
{ ... },
...
],
'total': int, 搜索结果最大页码,可能会因为当前页数变化而变化
}
- 带页码:
>>> BaiduSpider().search_news('搜索词', pn=2)
{
'results': [ ... ],
'total': ...
}
Args:
query (str): 搜索query
pn (int, optional): 搜索结果的页码. Defaults to 1.
Returns:
dict: 爬取的搜索结果与总页码。
"""
url = 'https://www.baidu.com/s?rtt=1&bsst=1&tn=news&word=%s&pn=%d' % (
quote(query), (pn - 1) * 10)
# 源码
source = requests.get(url, headers=self.headers)
# 压缩
code = self._minify(source.text)
bs = BeautifulSoup(self._format(code), 'html.parser')
# 搜索结果容器
data = bs.find('div', id='content_left').findAll('div')[1].findAll('div', class_='result-op')
# print(len(data))
results = []
for res in data:
# 标题
title = self._format(
res.find('h3').find('a').text)
# 链接
url = res.find('h3').find('a')['href']
# 简介
des = res.find('div', class_='c-span-last').find('span', class_='c-color-text').text
# 作者
author = res.find('div', class_='c-span-last').find('div', class_='news-source').find('span', class_='c-gap-right').text
# 发布日期
date = res.find('div', class_='c-span-last').find('div', class_='news-source').find('span', class_='c-color-gray2').text
# 生成结果
result = {
'title': title,
'author': author,
'date': date,
'des': des,
'url': url
}
results.append(result) # 加入结果
# 获取所有页数
pages_ = bs.find('div', id='page').findAll('a')
# 过滤页码
if '< 上一页' in pages_[0].text:
pages_ = pages_[1:]
if '下一页 >' in pages_[-1].text:
pages_ = pages_[:-1]
pages = []
for _ in pages_:
pages.append(int(_.find('span', class_='pc').text))
return {
'results': results,
# 最大页数值
'total': max(pages)
}
def search_wenku(self, query: str, pn: int = 1) -> dict:
"""百度文库搜索
- 普通搜索:
>>> BaiduSpider().search_wenku('搜索词')
{
'results': [
{
'date': 'str, 文章发布日期',
'des': 'str, 文章简介',
'downloads': int, 文章下载量,
'pages': int, 文章页数,
'title': 'str, 文章标题',
'type': 'str, 文章格式,为全部大写字母',
'url': 'str, 文章链接'
},
{ ... },
{ ... },
{ ... },
...
],
'total': int, 总计搜索结果的页数
}
- 带页码的搜索:
>>> BaiduSpider().search_wenku('搜索词', pn=2)
{
'results': [ ... ],
'total': ...
}
Args:
query (str): 要搜索的query
pn (int, optional): 搜索的页码. Defaults to 1.
Returns:
dict: 搜索结果和总计页数
"""
url = 'https://wenku.baidu.com/search?word=%s&pn=%d' % (
quote(query), (pn - 1) * 10)
source = requests.get(url, headers=self.headers)
source.encoding = 'gb2312'
code = self._minify(source.text)
bs = BeautifulSoup(code, 'html.parser')
data = bs.findAll('dl')
results = []
for res in data:
dt = res.find('dt')
type_ = self._format(dt.find('p', class_='fl').find(
'span', class_='ic')['title']).upper()
tmp = dt.find('p', class_='fl').find('a')
title = self._format(tmp.text)
url = tmp['href']
try:
quality = float(self._format(
res.find('p', class_='fr').findAll('span', class_='ib')[1].text))
except:
quality = None
dd = res.find('dd', class_='clearfix').find(
'div', class_='summary-box')
des = self._format(dd.find('p', class_='summary').text)
try:
dd_tags = res.find('dd', class_='tag-tips')
tags = []
for a in dd_tags.findAll('a'):
tags.append(self._format(a.text))
except AttributeError:
tags = []
detail = dd.find('div', class_='detail').find(
'div', class_='detail-info')
date = self._format(detail.text.split('|')[0])
pages = int(self._format(detail.text.split('|')[
1].replace('共', '').replace('页', '')))
downloads = int(self._format(
detail.text.split('|')[2].strip('次下载')))
result = {
'title': title,
'type': type_,
'url': url,
'des': des,
'date': date,
'pages': pages,
'downloads': downloads
}
results.append(result)
pages_ = bs.find('div', class_='page-content').findAll('a')
if '尾页' in pages_[-1].text:
total = int(int(pages_[-1]['href'].split('&')
[-1].strip('pn=')) / 10 + 1)
else:
total = int(
bs.find('div', class_='page-content').find('span', class_='cur').text)
return {
'results': results,
'total': total
}
def search_jingyan(self, query: str, pn: int = 1) -> dict:
"""百度经验搜索
- 例如:
>>> BaiduSpider().search_jingyan('关键词')
{
'results': [
{
'title': 'str, 经验标题',
'url': 'str, 经验链接',
'des': 'str, 经验简介',
'date': 'str, 经验发布日期',
'category': 'str, 经验分类',
'votes': int, 经验的支持票数
},
{ ... },
{ ... },
{ ... },
...
],
'total': int, 总计搜索结果页数
}
- 带页码的:
>>> BaiduSpider().search_jingyan('搜索词', pn=2) # `pn` 是页码
{
'results': [ ... ],
'total': ...
}
Args:
query (str): 要搜索的关键词
pn (int, optional): 搜索结果的页码. Defaults to 1.
Returns:
dict: 搜索结果以及总计的页码.
"""
url = 'https://jingyan.baidu.com/search?word=%s&pn=%d&lm=0' % (
quote(query), (pn - 1) * 10)
# 获取网页源代码
source = requests.get(url, headers=self.headers)
# 最小化代码
code = self._minify(source.text)
bs = BeautifulSoup(code, 'html.parser')
# 加载搜索结果
data = bs.find('div', class_='search-list').findAll('dl')
results = []
for res in data:
# 标题
title = self._format(res.find('dt').find('a').text)
# 链接
url = 'https://jingyan.baidu.com/' + \
res.find('dt').find('a')['href']
# 简介
des = self._format(res.find('dd').find(
'div', class_='summary').find('span', class_='abstract').text)
# 获取发布日期和分类,位于`<span class="cate"/>`中
tmp = self._format(res.find('dd').find('div', class_='summary').find(
'span', class_='cate').text).split('-')
# 发布日期
date = self._format(tmp[1])
# 分类
category = self._format(tmp[-1]).strip('分类:')
# 支持票数
votes = int(self._format(res.find('dt').find(
'span', class_='succ-times').text).strip('得票'))
# 生成结果
result = {
'title': title,
'url': url,
'des': des,
'date': date,
'category': category,
'votes': votes
}
results.append(result) # 加入结果到集合中
# 获取分页
pages_ = bs.find('div', id='pg').findAll('a')[-1]
# 既不是最后一页也没有超出最后一页
if '尾页' in pages_.text:
# 获取尾页并加一
total = int(int(pages_['href'].split(
'&')[-1].strip('pn=')) / 10) + 1
# 是最后一页或者是超过了最后一页
else:
# 重新获取分页
pages_ = bs.find('div', id='pg').findAll('a')[1]
# 获取尾页并加一
total = int(int(pages_['href'].split(
'&')[-1].strip('pn=')) / 10) + 1
return {
'results': results,
'total': total
}
def search_baike(self, query: str) -> dict:
"""百度百科搜索
- 使用方法:
>>> BaiduSpider().search_baike('搜索词')
{
'results': {
[
'title': 'str, 百科标题',
'des': 'str, 百科简介',
'date': 'str, 百科最后更新时间',
'url': 'str, 百科链接'
],
[ ... ],
[ ... ],
[ ... ]
},
'total': int, 搜索结果总数
}
Args:
query (str): 要搜索的关键词
Returns:
dict: 搜索结果和总页数
"""
# 获取源码
source = requests.get(
'https://baike.baidu.com/search?word=%s' % quote(query), headers=self.headers)
code = self._minify(source.text)
# 创建BeautifulSoup对象
soup = BeautifulSoup(code, 'html.parser').find(
'div', class_='body-wrapper').find('div', class_='searchResult')
# 获取百科总数
total = int(
soup.find('div', class_='result-count').text.strip('百度百科为您找到相关词条约').strip('个'))
# 获取所有结果
container = soup.findAll('dd')
results = []
for res in container:
# 链接
url = 'https://baike.baidu.com' + \
self._format(res.find('a', class_='result-title')['href'])
# 标题
title = self._format(res.find('a', class_='result-title').text)
# 简介
des = self._format(res.find('p', class_='result-summary').text)
# 更新日期
date = self._format(res.find('span', class_='result-date').text)
# 生成结果
results.append({
'title': title,
'des': des,
'date': date,
'url': url
})
return {
'results': results,
'total': total
}
| 33.701635
| 767
| 0.402353
| 27,191
| 0.980174
| 0
| 0
| 0
| 0
| 0
| 0
| 17,579
| 0.633683
|
b15153401c65e82722c6b9906d4e09d6524f4e20
| 1,200
|
py
|
Python
|
HY_Plotter/windReader/reader/cfosat.py
|
BigShuiTai/HY-CFOSAT-ASCAT-Wind-Data-Plotter
|
5be90e5d35151d4c056c77344bf5075e144c3113
|
[
"MIT"
] | 1
|
2021-08-22T06:30:58.000Z
|
2021-08-22T06:30:58.000Z
|
HY_Plotter/windReader/reader/cfosat.py
|
Dapiya/HY-CFOSAT-L2B-Wind-Data-Plotter
|
5be90e5d35151d4c056c77344bf5075e144c3113
|
[
"MIT"
] | 1
|
2021-10-30T07:25:17.000Z
|
2021-10-30T16:22:17.000Z
|
HY_Plotter/windReader/reader/cfosat.py
|
Dapiya/HY-CFOSAT-L2B-Wind-Data-Plotter
|
5be90e5d35151d4c056c77344bf5075e144c3113
|
[
"MIT"
] | 1
|
2021-08-21T12:51:39.000Z
|
2021-08-21T12:51:39.000Z
|
import netCDF4
import numpy as np
class CFOSAT(object):
def extract(fname):
try:
init = netCDF4.Dataset(fname)
except Exception:
lats, lons, data_spd, data_dir, data_time, sate_name, res = [], [], [], [], "", "", ""
return lats, lons, data_spd, data_dir, data_time, sate_name, res
if init.platform == "CFOSAT":
# get values
lons, lats = init.variables["wvc_lon"][:], init.variables["wvc_lat"][:]
data_spd, data_dir = init.variables["wind_speed_selection"][:], init.variables["wind_dir_selection"][:]
# data_spd, data_dir = data_spd[:,:,band_index], data_dir[:,:,band_index]
data_time = init.time_coverage_end
sate_name = f"{init.platform} Scatterometer Level 2B"
if init.dimensions["numrows"].size == 3248:
res = "0.125°"
else:
res = "0.25°"
# process values
data_spd = data_spd / 0.514
else:
lats, lons, data_spd, data_dir, data_time, sate_name, res = [], [], [], [], "", "", ""
return lats, lons, data_spd, data_dir, data_time, sate_name, res
| 41.37931
| 115
| 0.549167
| 1,166
| 0.97005
| 0
| 0
| 0
| 0
| 0
| 0
| 248
| 0.206323
|
b15405b5c4a9b35dd5bdc84b62d31229a91e7265
| 17,228
|
py
|
Python
|
example_snippets.py
|
kimberscott/ffmpeg-stimuli-generation
|
54bce134a3236d9e7d2fefe4538378d76f2db798
|
[
"MIT"
] | null | null | null |
example_snippets.py
|
kimberscott/ffmpeg-stimuli-generation
|
54bce134a3236d9e7d2fefe4538378d76f2db798
|
[
"MIT"
] | null | null | null |
example_snippets.py
|
kimberscott/ffmpeg-stimuli-generation
|
54bce134a3236d9e7d2fefe4538378d76f2db798
|
[
"MIT"
] | 1
|
2020-08-14T17:15:29.000Z
|
2020-08-14T17:15:29.000Z
|
"""
Examples of using the functions in videotools.py to generate videos.
This file will not run as-is - it is just intended to provide reference commands you might copy and edit.
"""
import os
from videotools import *
this_path = os.path.dirname(os.path.abspath(__file__))
input_path = os.path.join(this_path, "example_input")
output_path = os.path.join(this_path, "example_output")
# Put two videos side-by-side
makeSideBySide(os.path.join(input_path, "cropped_book.mp4"), os.path.join(input_path, "cropped_box.mp4"), "right", os.path.join(output_path, "side_by_side.mp4"))
# Make a collage of the object-introduction videos
vids = [
"apple",
"cup",
"lotion",
"spray",
"whiteball",
"orangeball",
"train",
"toycar",
"sunglasses",
"marker",
"flashlight",
"block",
]
vids = ["cropped_" + v + ".mp4" for v in vids]
make_collage(input_path, vids, 4, os.path.join(output_path, "0_introsA"), True, 1920, vidHeight=640)
# Replace the audio in VIDEO_1 with a different mp3 file NEW_AUDIO
sp.call([
"ffmpeg",
"-i",
VIDEO_1,
"-i",
NEW_AUDIO,
"-map",
"0:v",
"-map",
"1:a",
"-shortest",
OUTPUT_VIDEO_NAME,
])
# Make a video where the input video plays backwards then forwards
sp.call(
[
"ffmpeg",
"-i",
INPUT_VIDEO,
"-i",
INPUT_VIDEO,
"-filter_complex",
"[1:v]reverse[secondhalf];[0:v][secondhalf]concat[out]",
"-map",
"""[out]""",
"-loglevel",
"error",
OUTPUT_VIDEO,
]
)
# The following are included for reference about potentially useful ffmpeg commands only - they are very specialized
# for particular stimuli!
def combineVideos(croppedVideoDir, sidebysideDir, regularOrderDict, whichVersions, minimal=False):
'''Generate all versions of side-by-side videos needed for Lookit physics study.
i.e. A / B, flippedA / B, A / flippedB, flippedA / flippedB.'''
make_sure_path_exists(sidebysideDir)
commands = ["""[0:v]setpts=PTS-STARTPTS,pad=iw*3:ih:color=white[a];[1:v]setpts=PTS-STARTPTS[z];[a][z]overlay=x=2*w:repeatlast=1:shortest=1:eof_action=repeat[out]""", \
"""[0:v]setpts=PTS-STARTPTS,hflip,pad=iw*3:ih:color=white[b];[1:v]setpts=PTS-STARTPTS[z];[b][z]overlay=x=2*w:repeatlast=1:shortest=1:eof_action=repeat[out]""", \
"""[0:v]setpts=PTS-STARTPTS,pad=iw*3:ih:color=white[b];[1:v]setpts=PTS-STARTPTS[z];[z]hflip[c];[b][c]overlay=x=2*w:repeatlast=1:shortest=1:eof_action=repeat[out]""", \
"""[0:v]setpts=PTS-STARTPTS,hflip,pad=iw*3:ih:color=white[b];[1:v]setpts=PTS-STARTPTS[z];[z]hflip[c];[b][c]overlay=x=2*w:repeatlast=1:shortest=1:eof_action=repeat[out]"""]
suffixes = ['NN', 'RN', 'NR', 'RR']
allfiles = os.listdir(croppedVideoDir)
for iVid1, video1 in enumerate(allfiles):
(shortname1, ext1) = os.path.splitext(video1)
if not(os.path.isdir(os.path.join(croppedVideoDir, video1))) and ext1 == VIDEXT:
for iVid2 in range(len(allfiles)):
if iVid2 == iVid1:
continue
if minimal and iVid2 <= iVid1:
continue
else:
video2 = allfiles[iVid2]
(shortname2, ext2) = os.path.splitext(video2)
if not(os.path.isdir(os.path.join(croppedVideoDir, video2))) and ext2 == VIDEXT:
labels = [parse_video_filename(v, regularOrderDict) for v in [video1, video2]]
if labels[0][0] == labels[1][0] and \
labels[0][2] == labels[1][2] and \
labels[0][3] == labels[1][3] and \
labels[0][4] == labels[1][4]:
outfilenameBase = 'sbs_' + labels[0][0] + '_' + labels[0][1] + '_' + labels[1][1] + '_' + \
labels[0][2] + '_' + labels[0][3] + '_' + labels[0][4] + '_'
for iVid in range(len(commands)):
if suffixes[iVid] in whichVersions:
sp.call(["ffmpeg", "-i", os.path.join(croppedVideoDir, video1), \
"-i", os.path.join(croppedVideoDir, video2), \
"-filter_complex", \
commands[iVid], \
"-map", """[out]""", "-loglevel", "error", \
os.path.join(sidebysideDir, outfilenameBase + suffixes[iVid] + '.mp4')])
def flipVideos(rawVideoDir, origVideoDir, unflippedOrderDict):
vidLengths = {
"apple": 66,
"cup": 86,
"lotion": 68,
"orangeball": 76,
"spray": 90,
"whiteball": 64,
}
fadeFrames = 10
make_sure_path_exists(origVideoDir)
for f in os.listdir(rawVideoDir):
if not (os.path.isdir(os.path.join(rawVideoDir, f))):
(shortname, ext) = os.path.splitext(f)
if ext in ORIGEXT:
(event, outcome, object, camera,
background) = parse_video_filename(
shortname, unflippedOrderDict
)
if outcome == "up":
continue
sp.call(
[
"ffmpeg",
"-i",
os.path.join(rawVideoDir, f),
"-vf",
"""vflip,fade=type=in:start_frame=1:nb_frames={}:color=0x009EFC,fade=type=out:start_frame={}:color=0x009EFC""".format(
fadeFrames, vidLengths[object] - fadeFrames
),
"-loglevel",
"error",
os.path.join(
origVideoDir,
event
+ "_up_"
+ object
+ "_"
+ background
+ "_"
+ camera
+ ".mp4",
),
]
)
sp.call(
[
"ffmpeg",
"-i",
os.path.join(rawVideoDir, f),
"-vf",
"""fade=type=in:start_frame=1:nb_frames={}:color=0x009EFC,fade=type=out:start_frame={}:color=0x009EFC""".format(
fadeFrames, vidLengths[object] - fadeFrames
),
"-loglevel",
"error",
os.path.join(
origVideoDir,
event
+ "_down_"
+ object
+ "_"
+ background
+ "_"
+ camera
+ ".mp4",
),
]
)
return 0
### Crops and rescales 640px wide.
def cropVideos(
origVideoDir,
croppedVideoDir,
regularOrderDict,
originalSizes=[],
cropStrings=[],
which=[],
cropByName=[],
timecrop=[],
fadeParams=[],
doCrossFade=False,
):
"""TODO: docstring
timecrop: list of (ID, start, stop, padStart, padStop) tuples.
ID: dict containing any keys in ['object', 'event', 'outcome', 'camera', 'background'] and values.
This time cropping will be applied to any videos that match the values for all
the specified keys.
start, stop: start and stop times in s.
padStart, padStop: amount of time to extend first and last frames by, in s.
fadeParams: (fadeFrames, fadeColor)
"""
make_sure_path_exists(croppedVideoDir)
for f in os.listdir(origVideoDir):
if not (os.path.isdir(os.path.join(origVideoDir, f))):
(shortname, ext) = os.path.splitext(f)
if ext in ORIGEXT:
if regularOrderDict:
(event, outcome, object, camera, background) = parse_video_filename(
shortname, regularOrderDict
)
thisID = {
"event": event,
"outcome": outcome,
"object": object,
"camera": camera,
"background": background,
}
if len(which) == 2 and not (object, event) == which:
continue
if len(which) == 3 and not (object, event, outcome) == which:
continue
timecropCommand = []
doTimeCrop = False
if timecrop:
for (ID, s, e, pS, pE) in timecrop:
if all([thisID[key] == val for (key, val) in ID.items()]):
startTime = s
endTime = e
padStart = pS
padEnd = pE
doTimeCrop = True
if doTimeCrop:
if not startTime == -1:
timecropCommand = ["-ss", str(startTime)]
if not endTime == -1:
timecropCommand = timecropCommand + [
"-t",
str(endTime - startTime),
]
else:
warnings.warn("No time cropping for this video")
if cropByName:
for (vidNames, cropStrForNames) in cropByName:
if f in vidNames:
cropStr = cropStrForNames
else:
if originalSizes == "*":
cropStr = cropStrings[0]
else:
res = findVideoResolution(os.path.join(origVideoDir, f))
if res in originalSizes:
cropStr = cropStrings[originalSizes.index(res)]
else:
cropStr = """scale=640:-2"""
cropStr = cropStr + ",setpts=PTS-STARTPTS"
if doTimeCrop:
croppedVid = os.path.join(
croppedVideoDir, shortname + "_middle.mp4"
)
croppedVidFinal = os.path.join(croppedVideoDir, shortname + ".mp4")
else:
croppedVid = os.path.join(croppedVideoDir, shortname + ".mp4")
croppedVidFinal = croppedVid
command = (
["ffmpeg", "-i", os.path.join(origVideoDir, f), "-vf", cropStr]
+ timecropCommand
+ ["-loglevel", "error", croppedVid]
)
sp.call(command)
if doTimeCrop:
firstImg = os.path.join(croppedVideoDir, shortname + "_first.png")
lastImg = os.path.join(croppedVideoDir, shortname + "_last.png")
firstVid = os.path.join(croppedVideoDir, shortname + "_first.mp4")
lastVid = os.path.join(croppedVideoDir, shortname + "_last.mp4")
sp.call(
[
"ffmpeg",
"-i",
croppedVid,
"-vframes",
"1",
"-f",
"image2",
firstImg,
"-loglevel",
"error",
]
)
[nF, dur, x, y] = get_video_details(
croppedVid, ["nframes", "vidduration", "width", "height"]
)
sp.call(
[
"ffmpeg",
"-i",
croppedVid,
"-vf",
"select='eq(n,{})'".format(nF - 1),
"-vframes",
"1",
"-f",
"image2",
lastImg,
"-loglevel",
"error",
]
)
sp.call(
[
"ffmpeg",
"-loop",
"1",
"-i",
firstImg,
"-t",
str(padStart),
firstVid,
"-loglevel",
"error",
]
)
sp.call(
[
"ffmpeg",
"-loop",
"1",
"-i",
lastImg,
"-t",
str(padEnd),
lastVid,
"-loglevel",
"error",
]
)
if not doCrossFade:
concat_mp4s(croppedVidFinal, [firstVid, croppedVid, lastVid])
else:
unfaded = os.path.join(
croppedVideoDir, shortname + "_beforecrossfade.mp4"
)
concat_mp4s(unfaded, [croppedVid, lastVid])
# see crossfade advice at http://superuser.com/a/778967
sp.call(
[
"ffmpeg",
"-i",
unfaded,
"-i",
firstVid,
"-f",
"lavfi",
"-i",
"color=white:s={}x{}".format(int(x), int(y)),
"-filter_complex",
"[0:v]format=pix_fmts=yuva420p,fade=t=out:st={}:d={}:alpha=1,setpts=PTS-STARTPTS[va0];\
[1:v]format=pix_fmts=yuva420p,fade=t=in:st=0:d={}:alpha=1,setpts=PTS-STARTPTS+{}/TB[va1];\
[2:v]scale={}x{},trim=duration={}[over];\
[over][va0]overlay=format=yuv420[over1];\
[over1][va1]overlay=format=yuv420[outv]".format(
dur + padEnd,
padEnd,
padEnd,
dur,
int(x),
int(y),
dur + padStart + padEnd,
),
"-vcodec",
"libx264",
"-map",
"[outv]",
croppedVidFinal,
"-loglevel",
"error",
]
)
os.remove(unfaded)
os.remove(firstImg)
os.remove(lastImg)
os.remove(firstVid)
os.remove(lastVid)
os.remove(croppedVid)
if fadeParams:
(fadeFrames, fadeColor) = fadeParams
nF = get_video_details(croppedVidFinal, "nframes")
unfaded = os.path.join(croppedVideoDir, shortname + "_unfaded.mp4")
os.rename(croppedVidFinal, unfaded)
sp.call(
[
"ffmpeg",
"-i",
unfaded,
"-vf",
"""fade=type=in:start_frame=1:nb_frames={}:color={},fade=type=out:start_frame={}:color={}""".format(
fadeFrames, fadeColor, nF - fadeFrames, fadeColor
),
"-loglevel",
"error",
croppedVidFinal,
]
)
os.remove(unfaded)
| 40.252336
| 187
| 0.390585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,994
| 0.231832
|
b1542cd589e62fb7173b027c1b40c713b7897ca2
| 615
|
py
|
Python
|
sample_project/env/lib/python3.9/site-packages/qtpy/tests/test_qtprintsupport.py
|
Istiakmorsalin/ML-Data-Science
|
681e68059b146343ef55b0671432dc946970730d
|
[
"MIT"
] | 4
|
2021-11-19T03:25:13.000Z
|
2022-02-24T15:32:30.000Z
|
sample_project/env/lib/python3.9/site-packages/qtpy/tests/test_qtprintsupport.py
|
Istiakmorsalin/ML-Data-Science
|
681e68059b146343ef55b0671432dc946970730d
|
[
"MIT"
] | null | null | null |
sample_project/env/lib/python3.9/site-packages/qtpy/tests/test_qtprintsupport.py
|
Istiakmorsalin/ML-Data-Science
|
681e68059b146343ef55b0671432dc946970730d
|
[
"MIT"
] | 3
|
2020-08-04T02:48:32.000Z
|
2020-08-17T01:20:09.000Z
|
from __future__ import absolute_import
import pytest
from qtpy import QtPrintSupport
def test_qtprintsupport():
"""Test the qtpy.QtPrintSupport namespace"""
assert QtPrintSupport.QAbstractPrintDialog is not None
assert QtPrintSupport.QPageSetupDialog is not None
assert QtPrintSupport.QPrintDialog is not None
assert QtPrintSupport.QPrintPreviewDialog is not None
assert QtPrintSupport.QPrintEngine is not None
assert QtPrintSupport.QPrinter is not None
assert QtPrintSupport.QPrinterInfo is not None
assert QtPrintSupport.QPrintPreviewWidget is not None
| 32.368421
| 59
| 0.782114
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 44
| 0.071545
|
b155f55e9f976d163537ef6daaa4dfc7e72b3594
| 2,004
|
py
|
Python
|
logbook/auth.py
|
nicola-zanardi/personal-logbook
|
d44989825ec82437ffd50572c23ef7c2ddf00e30
|
[
"Unlicense"
] | null | null | null |
logbook/auth.py
|
nicola-zanardi/personal-logbook
|
d44989825ec82437ffd50572c23ef7c2ddf00e30
|
[
"Unlicense"
] | 7
|
2019-08-28T18:22:40.000Z
|
2020-01-15T09:10:13.000Z
|
logbook/auth.py
|
nicola-zen/personal-logbook
|
d44989825ec82437ffd50572c23ef7c2ddf00e30
|
[
"Unlicense"
] | null | null | null |
from flask import Blueprint, flash, redirect, render_template, request, url_for
from werkzeug.security import check_password_hash, generate_password_hash
from flask_login import login_required, login_user, logout_user
from logbook.models import User, db
from peewee import fn
auth = Blueprint("auth", __name__)
@auth.route("/login")
def login():
return render_template("login.html")
@auth.route("/login", methods=["POST"])
def login_post():
username = request.form.get("username")
password = request.form.get("password")
remember = True if request.form.get("remember") else False
user = User.get_or_none(fn.Lower(User.username) == username.lower())
# inform the user if the username/password is wrong
if user is None or not check_password_hash(user.password, password):
flash("Please check your login details and try again.")
return redirect(url_for("logbook.index_next_pages"))
login_user(user, remember=remember)
return redirect(url_for("logbook.index_next_pages"))
@auth.route("/signup")
def signup():
return render_template("signup.html")
@auth.route("/signup", methods=["POST"])
def signup_post():
username = request.form.get("username").lower()
password = request.form.get("password")
user = User.get_or_none(
fn.Lower(User.username) == username
) # if this returns a user, then the email already exists in database
if user is not None:
flash("Username already exists")
return redirect(url_for("auth.signup"))
# create new user with the form data. Hash the password so plaintext version isn't saved.
new_user = User(
username=username, password=generate_password_hash(password, method="sha256")
)
new_user.save()
return redirect(url_for("auth.login"))
@auth.route("/logout", methods=["POST"])
@login_required
def logout():
logout_user()
return redirect(url_for("auth.login"))
| 30.363636
| 94
| 0.686627
| 0
| 0
| 0
| 0
| 1,653
| 0.82485
| 0
| 0
| 522
| 0.260479
|