blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e43c62f38f42153c1eeb029ab4ae25e4e477a4a8 | 48db5f8cf922c27d1a1ebab4b98206ca31336c65 | /trainer.py | 8afb7ca41825d2f4225dbb15d85fc659fdfb4136 | [] | no_license | yatendernitk/Machine-Learning-Python | fc45f8339c08d34ebcb8b901491d9251102b1fc7 | 654b75e4d3c4b99ea6e1d48b216fe2e925e994ff | refs/heads/master | 2020-05-23T22:17:27.452184 | 2017-03-14T13:10:34 | 2017-03-14T13:10:34 | 84,795,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | from scipy import optimize
class trainer(object):
def __init__(self, N):
# Make Local reference to network:
self.N = N
def callbackF(self, params):
self.N.setParams(params)
self.J.append(self.N.costFunction(self.X, self.y))
def costFunctionWrapper(self, params, X, y):
self.N.setParams(params)
cost = self.N.costFunction(X, y)
grad = self.N.computeGradients(X, y)
return cost, grad
def train(self, X, y):
# Make an internal variable for the callback function:
self.X = X
self.y = y
# Make empty list to store costs:
self.J = []
params0 = self.N.getParams()
options = {'maxiter': 200, 'disp': True}
_res = optimize.minimize(self.costFunctionWrapper, params0, jac=True, method='BFGS',
args=(X, y), options=options, callback=self.callbackF)
self.N.setParams(_res.x)
self.optimizationResults = _res
| [
"ok@Yatenders-MacBook-Pro-2.local"
] | ok@Yatenders-MacBook-Pro-2.local |
6c4260cb4c1cd8605e45e07848249e782def76d3 | e3d6f803beece2ecc2cde8de795fdd20291213ff | /nova/tests/unit/scheduler/test_utils.py | e92dd111cd4e2d319bac244ad5f43e733c2f47ab | [
"Apache-2.0"
] | permissive | panguan737/nova | 437c1adb81f3e9ef82c28ad957144623db13ba52 | 0d177185a439baa228b42c948cab4e934d6ac7b8 | refs/heads/main | 2023-01-07T00:08:44.069599 | 2020-11-01T14:00:42 | 2020-11-01T14:00:42 | 309,332,719 | 0 | 0 | Apache-2.0 | 2020-11-02T10:17:13 | 2020-11-02T10:17:13 | null | UTF-8 | Python | false | false | 27,014 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from nova.api.openstack.placement import lib as plib
from nova import context as nova_context
from nova import exception
from nova import objects
from nova.scheduler.client import report
from nova.scheduler import utils
from nova import test
from nova.tests.unit import fake_instance
from nova.tests import uuidsentinel as uuids
@ddt.ddt
class TestUtils(test.NoDBTestCase):
def setUp(self):
super(TestUtils, self).setUp()
self.context = nova_context.get_admin_context()
def assertResourceRequestsEqual(self, expected, observed):
ex_by_id = expected._rg_by_id
ob_by_id = observed._rg_by_id
self.assertEqual(set(ex_by_id), set(ob_by_id))
for ident in ex_by_id:
self.assertEqual(vars(ex_by_id[ident]), vars(ob_by_id[ident]))
self.assertEqual(expected._limit, observed._limit)
def _test_resources_from_request_spec(self, flavor, expected):
fake_spec = objects.RequestSpec(flavor=flavor)
resources = utils.resources_from_request_spec(fake_spec)
self.assertResourceRequestsEqual(expected, resources)
def test_resources_from_request_spec(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0)
expected_resources = utils.ResourceRequest()
expected_resources._rg_by_id[None] = plib.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
}
)
self._test_resources_from_request_spec(flavor, expected_resources)
def test_resources_from_request_spec_with_no_disk(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=0,
ephemeral_gb=0,
swap=0)
expected_resources = utils.ResourceRequest()
expected_resources._rg_by_id[None] = plib.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
}
)
self._test_resources_from_request_spec(flavor, expected_resources)
def test_get_resources_from_request_spec_custom_resource_class(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={"resources:CUSTOM_TEST_CLASS": 1})
expected_resources = utils.ResourceRequest()
expected_resources._rg_by_id[None] = plib.RequestGroup(
use_same_provider=False,
resources={
"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 15,
"CUSTOM_TEST_CLASS": 1,
}
)
self._test_resources_from_request_spec(flavor, expected_resources)
def test_get_resources_from_request_spec_override_flavor_amounts(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
"resources:VCPU": 99,
"resources:MEMORY_MB": 99,
"resources:DISK_GB": 99})
expected_resources = utils.ResourceRequest()
expected_resources._rg_by_id[None] = plib.RequestGroup(
use_same_provider=False,
resources={
"VCPU": 99,
"MEMORY_MB": 99,
"DISK_GB": 99,
}
)
self._test_resources_from_request_spec(flavor, expected_resources)
def test_get_resources_from_request_spec_remove_flavor_amounts(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
"resources:VCPU": 0,
"resources:DISK_GB": 0})
expected_resources = utils.ResourceRequest()
expected_resources._rg_by_id[None] = plib.RequestGroup(
use_same_provider=False,
resources={
"MEMORY_MB": 1024,
}
)
self._test_resources_from_request_spec(flavor, expected_resources)
def test_get_resources_from_request_spec_vgpu(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=0,
swap=0,
extra_specs={
"resources:VGPU": 1,
"resources:VGPU_DISPLAY_HEAD": 1})
expected_resources = utils.ResourceRequest()
expected_resources._rg_by_id[None] = plib.RequestGroup(
use_same_provider=False,
resources={
"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 10,
"VGPU": 1,
"VGPU_DISPLAY_HEAD": 1,
}
)
self._test_resources_from_request_spec(flavor, expected_resources)
def test_get_resources_from_request_spec_bad_std_resource_class(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
"resources:DOESNT_EXIST": 0})
fake_spec = objects.RequestSpec(flavor=flavor)
with mock.patch("nova.scheduler.utils.LOG.warning") as mock_log:
utils.resources_from_request_spec(fake_spec)
mock_log.assert_called_once()
args = mock_log.call_args[0]
self.assertEqual(args[0], "Received an invalid ResourceClass "
"'%(key)s' in extra_specs.")
self.assertEqual(args[1], {"key": "DOESNT_EXIST"})
def test_get_resources_from_request_spec_granular(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=0, swap=0,
extra_specs={'resources1:VGPU': '1',
'resources1:VGPU_DISPLAY_HEAD': '2',
# Replace
'resources3:VCPU': '2',
# Stay separate (don't sum)
'resources42:SRIOV_NET_VF': '1',
'resources24:SRIOV_NET_VF': '2',
# Ignore
'some:bogus': 'value',
# Custom in the unnumbered group (merge with DISK_GB)
'resources:CUSTOM_THING': '123',
# Traits make it through
'trait3:CUSTOM_SILVER': 'required',
'trait3:CUSTOM_GOLD': 'required',
# Delete standard
'resources86:MEMORY_MB': '0',
# Standard and custom zeroes don't make it through
'resources:IPV4_ADDRESS': '0',
'resources:CUSTOM_FOO': '0',
# Bogus values don't make it through
'resources1:MEMORY_MB': 'bogus'})
expected_resources = utils.ResourceRequest()
expected_resources._rg_by_id[None] = plib.RequestGroup(
use_same_provider=False,
resources={
'DISK_GB': 10,
'CUSTOM_THING': 123,
}
)
expected_resources._rg_by_id['1'] = plib.RequestGroup(
resources={
'VGPU': 1,
'VGPU_DISPLAY_HEAD': 2,
}
)
expected_resources._rg_by_id['3'] = plib.RequestGroup(
resources={
'VCPU': 2,
},
required_traits={
'CUSTOM_GOLD',
'CUSTOM_SILVER',
}
)
expected_resources._rg_by_id['24'] = plib.RequestGroup(
resources={
'SRIOV_NET_VF': 2,
},
)
expected_resources._rg_by_id['42'] = plib.RequestGroup(
resources={
'SRIOV_NET_VF': 1,
}
)
self._test_resources_from_request_spec(flavor, expected_resources)
@mock.patch("nova.scheduler.utils.ResourceRequest.from_extra_specs")
def test_process_extra_specs_granular_called(self, mock_proc):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={"resources:CUSTOM_TEST_CLASS": 1})
fake_spec = objects.RequestSpec(flavor=flavor)
utils.resources_from_request_spec(fake_spec)
mock_proc.assert_called_once()
@mock.patch("nova.scheduler.utils.ResourceRequest.from_extra_specs")
def test_process_extra_specs_granular_not_called(self, mock_proc):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor)
utils.resources_from_request_spec(fake_spec)
mock_proc.assert_not_called()
def test_process_missing_extra_specs_value(self):
flavor = objects.Flavor(
vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={"resources:CUSTOM_TEST_CLASS": ""})
fake_spec = objects.RequestSpec(flavor=flavor)
utils.resources_from_request_spec(fake_spec)
def test_process_no_force_hosts_or_force_nodes(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor)
expected = utils.ResourceRequest()
resources = utils.resources_from_request_spec(fake_spec)
self.assertEqual(expected._limit, resources._limit)
def test_process_use_force_nodes(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor, force_nodes=['test'])
expected = utils.ResourceRequest()
expected._limit = None
resources = utils.resources_from_request_spec(fake_spec)
self.assertEqual(expected._limit, resources._limit)
def test_process_use_force_hosts(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor, force_hosts=['test'])
expected = utils.ResourceRequest()
expected._limit = None
resources = utils.resources_from_request_spec(fake_spec)
self.assertEqual(expected._limit, resources._limit)
@ddt.data(
# Test single hint that we are checking for.
{'group': [uuids.fake]},
# Test hint we care about and some other random hint.
{'same_host': [uuids.fake], 'fake-hint': ['fake-value']},
# Test multiple hints we are checking for.
{'same_host': [uuids.server1], 'different_host': [uuids.server2]})
def test_resources_from_request_spec_no_limit_based_on_hint(self, hints):
"""Tests that there is no limit applied to the
GET /allocation_candidates query string if a given scheduler hint
is in the request spec.
"""
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(
flavor=flavor, scheduler_hints=hints)
expected = utils.ResourceRequest()
expected._rg_by_id[None] = plib.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
expected._limit = None
resources = utils.resources_from_request_spec(fake_spec)
self.assertResourceRequestsEqual(expected, resources)
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=False)
def test_resources_from_flavor_no_bfv(self, mock_is_bfv):
flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=10,
ephemeral_gb=5, swap=1024,
extra_specs={})
instance = objects.Instance()
expected = {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 16,
}
actual = utils.resources_from_flavor(instance, flavor)
self.assertEqual(expected, actual)
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=True)
def test_resources_from_flavor_bfv(self, mock_is_bfv):
flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=10,
ephemeral_gb=5, swap=1024,
extra_specs={})
instance = objects.Instance()
expected = {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 6, # No root disk...
}
actual = utils.resources_from_flavor(instance, flavor)
self.assertEqual(expected, actual)
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=False)
def test_resources_from_flavor_with_override(self, mock_is_bfv):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=1024,
extra_specs={
# Replace
'resources:VCPU': '2',
# Sum up
'resources42:SRIOV_NET_VF': '1',
'resources24:SRIOV_NET_VF': '2',
# Ignore
'some:bogus': 'value',
# Custom
'resources:CUSTOM_THING': '123',
# Ignore
'trait:CUSTOM_GOLD': 'required',
# Delete standard
'resources86:MEMORY_MB': 0,
# Standard and custom zeroes don't make it through
'resources:IPV4_ADDRESS': 0,
'resources:CUSTOM_FOO': 0})
instance = objects.Instance()
expected = {
'VCPU': 2,
'DISK_GB': 16,
'CUSTOM_THING': 123,
'SRIOV_NET_VF': 3,
}
actual = utils.resources_from_flavor(instance, flavor)
self.assertEqual(expected, actual)
def test_resource_request_from_extra_specs(self):
extra_specs = {
'resources:VCPU': '2',
'resources:MEMORY_MB': '2048',
'trait:HW_CPU_X86_AVX': 'required',
# Key skipped because no colons
'nocolons': '42',
'trait:CUSTOM_MAGIC': 'required',
# Resource skipped because invalid resource class name
'resources86:CUTSOM_MISSPELLED': '86',
'resources1:SRIOV_NET_VF': '1',
# Resource skipped because non-int-able value
'resources86:CUSTOM_FOO': 'seven',
# Resource skipped because negative value
'resources86:CUSTOM_NEGATIVE': '-7',
'resources1:IPV4_ADDRESS': '1',
# Trait skipped because unsupported value
'trait86:CUSTOM_GOLD': 'preferred',
'trait1:CUSTOM_PHYSNET_NET1': 'required',
'resources2:SRIOV_NET_VF': '1',
'resources2:IPV4_ADDRESS': '2',
'trait2:CUSTOM_PHYSNET_NET2': 'required',
'trait2:HW_NIC_ACCEL_SSL': 'required',
# Groupings that don't quite match the patterns are ignored
'resources_5:SRIOV_NET_VF': '7',
'traitFoo:HW_NIC_ACCEL_SSL': 'required',
# Solo resource, no corresponding traits
'resources3:DISK_GB': '5',
}
# Build up a ResourceRequest from the inside to compare against.
expected = utils.ResourceRequest()
expected._rg_by_id[None] = plib.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'MEMORY_MB': 2048,
},
required_traits={
'HW_CPU_X86_AVX',
'CUSTOM_MAGIC',
}
)
expected._rg_by_id['1'] = plib.RequestGroup(
resources={
'SRIOV_NET_VF': 1,
'IPV4_ADDRESS': 1,
},
required_traits={
'CUSTOM_PHYSNET_NET1',
}
)
expected._rg_by_id['2'] = plib.RequestGroup(
resources={
'SRIOV_NET_VF': 1,
'IPV4_ADDRESS': 2,
},
required_traits={
'CUSTOM_PHYSNET_NET2',
'HW_NIC_ACCEL_SSL',
}
)
expected._rg_by_id['3'] = plib.RequestGroup(
resources={
'DISK_GB': 5,
}
)
self.assertResourceRequestsEqual(
expected, utils.ResourceRequest.from_extra_specs(extra_specs))
def test_merge_resources(self):
resources = {
'VCPU': 1, 'MEMORY_MB': 1024,
}
new_resources = {
'VCPU': 2, 'MEMORY_MB': 2048, 'CUSTOM_FOO': 1,
}
doubled = {
'VCPU': 3, 'MEMORY_MB': 3072, 'CUSTOM_FOO': 1,
}
saved_orig = dict(resources)
utils.merge_resources(resources, new_resources)
# Check to see that we've doubled our resources
self.assertEqual(doubled, resources)
# and then removed those doubled resources
utils.merge_resources(resources, saved_orig, -1)
self.assertEqual(new_resources, resources)
def test_merge_resources_zero(self):
"""Test 0 value resources are ignored."""
resources = {
'VCPU': 1, 'MEMORY_MB': 1024,
}
new_resources = {
'VCPU': 2, 'MEMORY_MB': 2048, 'DISK_GB': 0,
}
# The result should not include the zero valued resource.
doubled = {
'VCPU': 3, 'MEMORY_MB': 3072,
}
utils.merge_resources(resources, new_resources)
self.assertEqual(doubled, resources)
def test_merge_resources_original_zeroes(self):
"""Confirm that merging that result in a zero in the original
excludes the zeroed resource class.
"""
resources = {
'VCPU': 3, 'MEMORY_MB': 1023, 'DISK_GB': 1,
}
new_resources = {
'VCPU': 1, 'MEMORY_MB': 512, 'DISK_GB': 1,
}
merged = {
'VCPU': 2, 'MEMORY_MB': 511,
}
utils.merge_resources(resources, new_resources, -1)
self.assertEqual(merged, resources)
def test_claim_resources_on_destination_no_source_allocations(self):
"""Tests the negative scenario where the instance does not have
allocations in Placement on the source compute node so no claim is
attempted on the destination compute node.
"""
reportclient = report.SchedulerReportClient()
instance = fake_instance.fake_instance_obj(self.context)
source_node = objects.ComputeNode(
uuid=uuids.source_node, host=instance.host)
dest_node = objects.ComputeNode(uuid=uuids.dest_node, host='dest-host')
@mock.patch.object(reportclient,
'get_allocations_for_consumer_by_provider',
return_value={})
@mock.patch.object(reportclient,
'claim_resources',
new_callable=mock.NonCallableMock)
def test(mock_claim, mock_get_allocs):
utils.claim_resources_on_destination(
self.context, reportclient, instance, source_node, dest_node)
mock_get_allocs.assert_called_once_with(
self.context, uuids.source_node, instance.uuid)
test()
def test_claim_resources_on_destination_claim_fails(self):
"""Tests the negative scenario where the resource allocation claim
on the destination compute node fails, resulting in an error.
"""
reportclient = report.SchedulerReportClient()
instance = fake_instance.fake_instance_obj(self.context)
source_node = objects.ComputeNode(
uuid=uuids.source_node, host=instance.host)
dest_node = objects.ComputeNode(uuid=uuids.dest_node, host='dest-host')
source_res_allocs = {
'VCPU': instance.vcpus,
'MEMORY_MB': instance.memory_mb,
# This would really include ephemeral and swap too but we're lazy.
'DISK_GB': instance.root_gb
}
dest_alloc_request = {
'allocations': {
uuids.dest_node: {
'resources': source_res_allocs
}
}
}
@mock.patch.object(reportclient,
'get_allocations_for_consumer_by_provider',
return_value=source_res_allocs)
@mock.patch.object(reportclient,
'claim_resources', return_value=False)
def test(mock_claim, mock_get_allocs):
# NOTE(danms): Don't pass source_node_allocations here to test
# that they are fetched if needed.
self.assertRaises(exception.NoValidHost,
utils.claim_resources_on_destination,
self.context, reportclient, instance,
source_node, dest_node)
mock_get_allocs.assert_called_once_with(
self.context, uuids.source_node, instance.uuid)
mock_claim.assert_called_once_with(
self.context, instance.uuid, dest_alloc_request,
instance.project_id, instance.user_id,
allocation_request_version='1.12')
test()
def test_claim_resources_on_destination(self):
"""Happy path test where everything is successful."""
reportclient = report.SchedulerReportClient()
instance = fake_instance.fake_instance_obj(self.context)
source_node = objects.ComputeNode(
uuid=uuids.source_node, host=instance.host)
dest_node = objects.ComputeNode(uuid=uuids.dest_node, host='dest-host')
source_res_allocs = {
'VCPU': instance.vcpus,
'MEMORY_MB': instance.memory_mb,
# This would really include ephemeral and swap too but we're lazy.
'DISK_GB': instance.root_gb
}
dest_alloc_request = {
'allocations': {
uuids.dest_node: {
'resources': source_res_allocs
}
}
}
@mock.patch.object(reportclient,
'get_allocations_for_consumer_by_provider')
@mock.patch.object(reportclient,
'claim_resources', return_value=True)
def test(mock_claim, mock_get_allocs):
utils.claim_resources_on_destination(
self.context, reportclient, instance, source_node, dest_node,
source_res_allocs)
self.assertFalse(mock_get_allocs.called)
mock_claim.assert_called_once_with(
self.context, instance.uuid, dest_alloc_request,
instance.project_id, instance.user_id,
allocation_request_version='1.12')
test()
@mock.patch('nova.scheduler.client.report.SchedulerReportClient')
@mock.patch('nova.scheduler.utils.request_is_rebuild')
def test_claim_resources(self, mock_is_rebuild, mock_client):
"""Tests that when claim_resources() is called, that we appropriately
call the placement client to claim resources for the instance.
"""
mock_is_rebuild.return_value = False
ctx = mock.Mock(user_id=uuids.user_id)
spec_obj = mock.Mock(project_id=uuids.project_id)
instance_uuid = uuids.instance
alloc_req = mock.sentinel.alloc_req
mock_client.claim_resources.return_value = True
res = utils.claim_resources(ctx, mock_client, spec_obj, instance_uuid,
alloc_req)
mock_client.claim_resources.assert_called_once_with(
ctx, uuids.instance, mock.sentinel.alloc_req, uuids.project_id,
uuids.user_id, allocation_request_version=None)
self.assertTrue(res)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient')
@mock.patch('nova.scheduler.utils.request_is_rebuild')
def test_claim_resouces_for_policy_check(self, mock_is_rebuild,
mock_client):
mock_is_rebuild.return_value = True
ctx = mock.Mock(user_id=uuids.user_id)
res = utils.claim_resources(ctx, None, mock.sentinel.spec_obj,
mock.sentinel.instance_uuid, [])
self.assertTrue(res)
mock_is_rebuild.assert_called_once_with(mock.sentinel.spec_obj)
self.assertFalse(mock_client.claim_resources.called)
| [
"147360410@qq.com"
] | 147360410@qq.com |
34629a6d69c14efaa2f5389ad697c3260d71ecd0 | ab1891d96edf63354926158a2a96481b5ab4587f | /app.py | 894802b303109fabdcbdf2204f97f39c6053f3c7 | [] | no_license | Ravikumar-Orsu/BMI-calculator-using-Flask | 2989357ae3e3f6a2ffd133030a3d7dba47d5ed32 | 35b94bbacc46b4b612589f3492f263352dfe013f | refs/heads/main | 2023-03-22T14:31:36.047122 | 2021-03-16T04:54:21 | 2021-03-16T04:54:21 | 348,218,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | from flask import Flask, render_template,request
app = Flask(__name__)
@app.route("/",methods=['GET','POST'])
def index():
bmi=""
if request.method=="POST" and 'weight' in request.form:
weight=float(request.form.get('weight'))
height=float(request.form.get('height'))
bmi=round(weight / (height * height), 2)
return render_template("index.html",bmi=bmi)
app.debug=True
app.run()
app.run(debug=True) | [
"noreply@github.com"
] | noreply@github.com |
4bad0a9d74fdc33c1b08594b16c3ae6ae2d4ad36 | 26b6a35e2415d94fbc1c9fc43814309a5d6f443b | /tests/test_openapi_basic.py | f18074c73970570a97135bc4faab94c39ee95a93 | [
"BSD-3-Clause",
"MIT"
] | permissive | BigRLab/apiflask | 57e0c036aa5d284da5340dcecd49108eea651bcd | d6dd5595009be5de6a7741a5a887276c3ac011bf | refs/heads/main | 2023-05-30T21:30:17.930046 | 2021-07-11T04:07:15 | 2021-07-11T04:07:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,700 | py | import json
import pytest
from openapi_spec_validator import validate_spec
from .schemas import BarSchema
from .schemas import BazSchema
from .schemas import FooSchema
from apiflask import doc
from apiflask import input
from apiflask import output
from apiflask import Schema as BaseSchema
from apiflask.fields import Integer
def test_spec(app):
assert app.spec
assert 'openapi' in app.spec
def test_spec_processor(app, client):
@app.spec_processor
def edit_spec(spec):
assert spec['openapi'] == '3.0.3'
spec['openapi'] = '3.0.2'
assert app.title == 'APIFlask'
assert spec['info']['title'] == 'APIFlask'
spec['info']['title'] = 'Foo'
return spec
rv = client.get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
assert rv.json['openapi'] == '3.0.2'
assert rv.json['info']['title'] == 'Foo'
@pytest.mark.parametrize('spec_format', ['json', 'yaml', 'yml'])
def test_get_spec(app, spec_format):
spec = app._get_spec(spec_format)
if spec_format == 'json':
assert isinstance(spec, dict)
else:
assert 'title: APIFlask' in spec
def test_get_spec_force_update(app):
app._get_spec()
@app.route('/foo')
@output(FooSchema)
def foo():
pass
spec = app._get_spec()
assert '/foo' not in spec['paths']
new_spec = app._get_spec(force_update=True)
assert '/foo' in new_spec['paths']
def test_spec_attribute(app):
spec = app._get_spec()
@app.route('/foo')
@output(FooSchema)
def foo():
pass
assert '/foo' not in spec['paths']
assert '/foo' in app.spec['paths']
def test_spec_schemas(app):
@app.route('/foo')
@output(FooSchema(partial=True))
def foo():
pass
@app.route('/bar')
@output(BarSchema(many=True))
def bar():
pass
@app.route('/baz')
@output(BazSchema)
def baz():
pass
class Spam(BaseSchema):
id = Integer()
@app.route('/spam')
@output(Spam)
def spam():
pass
class Schema(BaseSchema):
id = Integer()
@app.route('/schema')
@output(Schema)
def schema():
pass
with app.app_context():
spec = app.spec
assert len(spec['components']['schemas']) == 5
assert 'FooUpdate' in spec['components']['schemas']
assert 'Bar' in spec['components']['schemas']
assert 'Baz' in spec['components']['schemas']
assert 'Spam' in spec['components']['schemas']
assert 'Schema' in spec['components']['schemas']
def test_servers_and_externaldocs(app):
assert app.external_docs is None
assert app.servers is None
app.external_docs = {
'description': 'Find more info here',
'url': 'https://docs.example.com/'
}
app.servers = [
{
'url': 'http://localhost:5000/',
'description': 'Development server'
},
{
'url': 'https://api.example.com/',
'description': 'Production server'
}
]
rv = app.test_client().get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
assert rv.json['externalDocs'] == {
'description': 'Find more info here',
'url': 'https://docs.example.com/'
}
assert rv.json['servers'] == [
{
'url': 'http://localhost:5000/',
'description': 'Development server'
},
{
'url': 'https://api.example.com/',
'description': 'Production server'
}
]
def test_auto_200_response(app, client):
@app.get('/foo')
def bare():
pass
@app.get('/bar')
@input(FooSchema)
def only_input():
pass
@app.get('/baz')
@doc(summary='some summary')
def only_doc():
pass
@app.get('/eggs')
@output(FooSchema, 204)
def output_204():
pass
@app.get('/spam')
@doc(responses={204: 'empty'})
def doc_responses():
pass
rv = client.get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
assert '200' in rv.json['paths']['/foo']['get']['responses']
assert '200' in rv.json['paths']['/bar']['get']['responses']
assert '200' in rv.json['paths']['/baz']['get']['responses']
assert '200' not in rv.json['paths']['/eggs']['get']['responses']
assert '200' not in rv.json['paths']['/spam']['get']['responses']
assert rv.json['paths']['/spam']['get']['responses'][
'204']['description'] == 'empty'
def test_sync_local_json_spec(app, client, tmp_path):
local_spec_path = tmp_path / 'openapi.json'
app.config['SYNC_LOCAL_SPEC'] = True
app.config['LOCAL_SPEC_PATH'] = local_spec_path
app.config['SPEC_FORMAT'] = 'json'
rv = client.get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
with open(local_spec_path) as f:
spec_content = f.read()
assert json.loads(spec_content) == app.spec
assert '{\n "info": {' in spec_content
assert '"title": "APIFlask",' in spec_content
def test_sync_local_yaml_spec(app, client, tmp_path):
local_spec_path = tmp_path / 'openapi.json'
app.config['SYNC_LOCAL_SPEC'] = True
app.config['LOCAL_SPEC_PATH'] = local_spec_path
app.config['SPEC_FORMAT'] = 'yaml'
rv = client.get('/openapi.json')
assert rv.status_code == 200
with open(local_spec_path) as f:
spec_content = f.read()
assert spec_content == str(app.spec)
assert 'title: APIFlask' in spec_content
def test_sync_local_spec_no_path(app):
app.config['SYNC_LOCAL_SPEC'] = True
with pytest.raises(TypeError):
app.spec
| [
"withlihui@gmail.com"
] | withlihui@gmail.com |
bfe6d834e8798d3475fd843db6ea34bbfcd75c37 | 94cb06d6a89b3518ab103fab3bcc03634283bde1 | /doc/conf.py | 66028559cfc5b5e931cb05980597b20be38f05b7 | [] | no_license | krakphp/lava | 886bf108fa9ce86e284a070569c883ed8a8d6b1b | 88490c5b9bb577289139f73e91ef996362eb6bf1 | refs/heads/master | 2023-08-12T00:35:49.017101 | 2017-09-17T03:08:31 | 2017-09-17T03:08:31 | 84,716,297 | 1 | 0 | null | 2017-09-17T03:08:32 | 2017-03-12T10:08:09 | PHP | UTF-8 | Python | false | false | 9,752 | py | # -*- coding: utf-8 -*-
#
# Mw Http documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 27 18:12:29 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Lava'
copyright = u'2017, RJ Garcia'
author = u'RJ Garcia'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.3'
# The full version, including alpha/beta/rc tags.
release = u'0.3.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Mw Http v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'MwHttpdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MwHttp.tex', u'Mw Http Documentation',
u'RJ Garcia', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mwhttp', u'Mw Http Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MwHttp', u'Mw Http Documentation',
author, 'MwHttp', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| [
"rj@bighead.net"
] | rj@bighead.net |
e8cff7405331705ecde8b0a9722786a9a9e6d615 | 11ff14c118240e87c4804d0373e4656d0683d479 | /RatToolAgent/test/firefox_test.py | 63c7ccf8fd97890cb406cd2616cc6efaffa93c1d | [] | no_license | wxmmavis/OS3.1 | e3028d9c79d5a1a17449fea6380fcdda902bdec7 | 26d954344207a82d2298821c3c4f01302393dc7e | refs/heads/master | 2020-03-25T20:07:11.225493 | 2018-08-13T03:20:57 | 2018-08-13T03:20:57 | 144,115,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | import sys
sys.path += ['../../RatToolAgent']
import RatToolAgent as rta
id = rta.init_and_start_browser()
conf = {
'validation_url': "http://172.16.10.252/authenticated/",
'download_loc': r"//a[@id='logo']",
'file_name': "logo.zip",
'page_title': "Ruckus Automation Test",
}
try:
rta.download_file_on_web_server(id, conf.pop('validation_url'),
conf.pop('download_loc'),
conf.pop('file_name'),
**conf
)
except Exception, e:
print '........................................'
print 'Raise:' + e.message
rta.close_browser(id)
| [
"1475806321@qq.com"
] | 1475806321@qq.com |
37e1e2f43b6abf4adb8f7a1bbb7af5db0d5c8355 | 3e640f5c59e6cbd1741afb08212aa0e3627f9752 | /deps/bcRead.py | 296744bfcbb0f25518a6b6d59317e399202bc7a8 | [
"MIT"
] | permissive | CapPow/VARP_supplimental | 36a845cf5713c8d9243d7a1a2ac3afc35472f078 | 1db65f7447ec066232a8cb00c9b86bff9ee11b3f | refs/heads/master | 2023-02-21T11:13:05.537202 | 2021-01-28T05:19:33 | 2021-01-28T05:19:33 | 296,470,678 | 0 | 0 | null | 2021-01-27T23:45:36 | 2020-09-18T00:15:02 | Jupyter Notebook | UTF-8 | Python | false | false | 25,397 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
HerbASAP - Herbarium Application for Specimen Auto-Processing
performs post processing steps on raw format images of natural history
specimens. Specifically designed for Herbarium sheet images.
"""
import re
import numpy as np
import cv2
from pylibdmtx.pylibdmtx import decode as libdmtx_decode
# import the pyzbar fork (local)
from .pyzbar.pyzbar import decode as zbar_decode
###
# Developer note: the libraries: re, pyzbar, and pylibdmtx all have a "decode"
# method which are used in this class. This can cause difficult to debug issues
###
class bcRead():
"""A barcode reader class
Args:
patterns (str): A string of uncompiled, "|" concatenated regex patterns.
backend (str): Either "zbar" or "libdmtx", to determine which libarary
should be used for decoding. Default is 'zbar.'
rotation_list (iterable, optional): iterable containing a series of int
representing image rotations (in degrees) to attempt if no barcode is
found. Default values are [9, 25, 18]. Rotation attempts stop after any
results are found. The list's rotations are cumulative. Short or empty
lists will decrease the time before giving up on finding a barcode.
Attributes:
rePattern (obj): A compiled regex pattern
backend (str): a string to determine which decoder was imported.
rotation_list (list): The saved rotation list
"""
def __init__(self, patterns, backend='zbar',
rotation_list=[9, 25, 18], parent=None, *args):
super(bcRead, self).__init__()
self.parent = parent
self.compileRegexPattern(patterns)
# This might need promoted to a user preference in mainwindow
self.rotation_list = rotation_list
self.backend = backend
def decode_zbar(self, im):
return zbar_decode(im)
def decode_libdmtx(self, im):
return libdmtx_decode(im, timeout=1500)
def set_backend(self, backend='zbar'):
"""
Sets which libarary should be used for decoding. Default is 'zbar.'
:param backend: string either 'zbar' or 'libdmtx' libdmtx is useful for
datamatrix decoding.
:type backend: str
:return:
"""
self.backend = backend
if backend == 'zbar':
self.decode = self.decode_zbar
elif backend == 'libdmtx':
self.decode = self.decode_libdmtx
def compileRegexPattern(self, patterns):
""" compiles a collection specific regex pattern """
# assume an empty pattern is a confused user, match everything.
if patterns == '':
patterns = '^(.*)'
try:
rePattern = re.compile(patterns)
self.rePattern = rePattern
except re.error:
raise
def decodeBC(self, img, verifyPattern=True, return_details=False):
""" attempts to decode barcodes from an image array object.
Given a np array image object (img), decodes BCs and optionally
returns those which match self.rePattern
verifies if the bcData matches the compiled rePattern.
Args:
img (numpy.ndarray): a numpy image array object
return_details (bool, optional): default = False. Whether or not to
return the barcode(s) bounding box coordinate(s) and format(s) in
addition to the barcode value(s). Return a list of dictionaries.
Returns (list): a list of matched barcode value(s) found in the image.
If return_details = True, then returns a list of dictionaries.
"""
# the complete output from pyzbar which matches checkPattern
backend = self.backend
if backend == 'zbar':
#code_reader = self.decode_zbar
code_reader = self.extract_by_squares
elif backend == 'libdmtx':
code_reader = self.decode_libdmtx
# decode each code found from bytes to utf-8
bcRawData = [x.data.decode('utf-8') for x in code_reader(img)]
if verifyPattern: # limit the results to those matching rePattern
bcRawData = [x for x in bcRawData if self.rePattern.match(x)]
return bcRawData
def rotateImg(self, img, angle, reversible=False):
"""
given a np array image object (img), and an angle rotates the img
without cropping the corners. If reversable == True, calculate the
reversible matrix
"""
# see: https://stackoverflow.com/questions/48479656/how-can-i-rotate-an-ndarray-image-properly
# https://www.pyimagesearch.com/2017/01/02/rotate-images-correctly-with-opencv-and-python/
(height, width) = img.shape[:2]
(cent_x, cent_y) = (width // 2, height // 2)
mat = cv2.getRotationMatrix2D((cent_x, cent_y), -angle, 1.0)
cos = np.abs(mat[0, 0])
sin = np.abs(mat[0, 1])
n_width = int((height * sin) + (width * cos))
n_height = int((height * cos) + (width * sin))
mat[0, 2] += (n_width / 2) - cent_x
mat[1, 2] += (n_height / 2) - cent_y
rotated_img = cv2.warpAffine(img, mat, (n_width, n_height))
if reversible: # now calculate the reverse matrix
(r_height, r_width) = rotated_img.shape[:2]
(cent_x, cent_y) = (r_width // 2, r_height // 2)
rev_mat = cv2.getRotationMatrix2D((cent_x, cent_y), angle, 1.0)
rev_mat[0, 2] += (width / 2) - cent_x
rev_mat[1, 2] += (height / 2) - cent_y
return rotated_img, rev_mat
else: # return none so the results can be parsed similarly
return rotated_img, None
def det_bc_center(self, rect, rev_mat):
"""
Used to determine the center point of a rotation corrected bounding box
:param rect: a pyzbar rectangle array structured as:
(left, top, width, height)
:type rect: Rect, array
:param angle: the angle of rotation applied to the initial image.
:type angle: int
:param rotated_shape: a tuple containing the rotated image's
(height, width) .
:type rotated_shape: tuple
:return: Returns the center point of the barcode before rotation.
:rtype: tuple, (x, y)
"""
px = rect.left + (rect.width/2)
py = rect.top + (rect.height/2)
if not isinstance(rev_mat, np.ndarray):
# no rotation, so current centerpoint is correct centerpoint
return (int(px), int(py))
# otherwise convert current centerpoint using reverse matrix
nx, ny = rev_mat.dot(np.array((px, py) + (1,))).astype(int)
return (nx, ny)
def angle_cos(self, p0, p1, p2):
"""
Utalized in find_squares, from opencv samples
"""
d1, d2 = (p0 - p1).astype('float'), (p2 - p1).astype('float')
return abs(np.dot(d1, d2) / np.sqrt(np.dot(d1, d1) * np.dot(d2, d2)))
def adjust_gamma(self, image, gamma=1.0):
#from https://www.pyimagesearch.com/2015/10/05/opencv-gamma-correction/
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
def find_squares(self, img):
"""
Heavily modified from opencv samples, attempts to identify squares
in an img.
"""
ret,img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
img = cv2.GaussianBlur(img, (3, 3), 3)
img = cv2.erode(img, None)
img = cv2.dilate(img, None, iterations=2)
squares = []
for thrs in range(0, 255, 51):
if thrs == 0:
bin = cv2.Canny(img, 0, 50, apertureSize=5)
bin = cv2.dilate(bin, None)
else:
_retval, bin = cv2.threshold(img, thrs, 255, cv2.THRESH_BINARY)
contours, _hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02 * cnt_len, True)
contourArea = cv2.contourArea(cnt)
if len(cnt) == 4 and contourArea > 25 and contourArea < 10000 and cv2.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([self.angle_cos(cnt[i], cnt[(i + 1) % 4], cnt[(i + 2) % 4]) for i in range(4)])
if max_cos < 0.1 :
squares.append(cnt)
return squares
def merge_proposals(self, images):
"""
given a list of image slices, merges them vertically into one image.
"""
min_pix_length = 10
images = [x for x in images if x.shape[0] > min_pix_length]
height = max(image.shape[0] for image in images) +1
width = len(images) + 1
output = np.zeros((height,width)).astype('uint8')
y = 0
for image in images:
h = image.shape[0] - 1
w = 1
output[0:h+1, y+w] = image
y += w
return output
def det_midpoint(self, p1, p2):
"""
called by det_centroid_intersection()
"""
return int((p1[0]+p2[0])/2), int((p1[1]+p2[1])/2)
def det_centroid_intersection(self, square):
"""
given a square contour, returns 2 vectors intersecting the midpoint.
"""
a, b, c, d = square
ab_mid = self.det_midpoint(a, b)
cd_mid = self.det_midpoint(c, d)
da_mid = self.det_midpoint(d, a)
bc_mid = self.det_midpoint(b, c)
return ab_mid, cd_mid, da_mid, bc_mid
def extend_vector(self, p1, p2, h, w, extend=500):
"""
given 2 points of a vector, extends it an arbitrary amount not
exceeding a given height or width and not less than 0.
"""
theta = np.arctan2(p1[1]-p2[1], p1[0]-p2[0])
endpt_x = max(0, min(p1[0] - extend*np.cos(theta), w))
endpt_y = max(0, min(p1[1] - extend*np.sin(theta), h))
theta = np.arctan2(p2[1]-p1[1], p2[0]-p1[0])
startpt_x = max(0, min(p2[0] - extend*np.cos(theta), w))
startpt_y = max(0, min(p2[1] - extend*np.sin(theta), h))
return startpt_x, startpt_y, endpt_x, endpt_y
def extract_vector_coords(self, x1, y1, x2, y2, h, w):
"""
given 2 points of a vector, returns coordinates for the nearest pixels
traversed by that vector.
Modified from:
https://stackoverflow.com/questions/7878398/how-to-extract-an-arbitrary-line-of-values-from-a-numpy-array
"""
length = int(np.hypot(x2-x1, y2-y1))
x = np.linspace(x1, x2, length)
x = np.rint(x).astype(int)
y = np.linspace(y1, y2, length)
y = np.rint(y).astype(int)
pix_coords = y, x
return pix_coords
def extract_by_squares(self, gray, retry=True, extension=6):
"""
given a numpy array image attempts to identify all barcodes using
vector extraction.
"""
img = gray.copy()
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
# apparently this does not generalize well for very large resolutions
h, w = gray.shape[0:2]
if max(w,h) > 6800:
new_size = (int(w*0.8), int(h*0.8))
w, h = new_size
gray = cv2.resize(gray, new_size, interpolation=cv2.INTER_NEAREST)
# ID squares
squares = self.find_squares(gray)
cv2.drawContours(img, squares, -1, (0,255,0), 2)
#cv2.imwrite("squares.jpg", img[4897:5682, 1352:3009, ...])
#print(f'found {len(squares)} squares.')
if len(squares) < 1:
z = zbar_decode(gray, y_density=3, x_density=3)
else:
# iterate over each and det their midpoint intersects
h -= 1
w -= 1
line_data = []
# extension happens in both directions, therefore effectively doubled.
extend = min(h, w) // extension
for square in squares:
a, b, c, d = square
ab_mid = self.det_midpoint(a, b)
cd_mid = self.det_midpoint(c, d)
x1, y1, x2, y2 = self.extend_vector(ab_mid, cd_mid, h, w, extend=extend)
pix_coords = self.extract_vector_coords(x1, y1, x2, y2, h, w)
zi = gray[pix_coords]
line_data.append(zi)
da_mid = self.det_midpoint(d, a)
bc_mid = self.det_midpoint(b, c)
x1, y1, x2, y2 = self.extend_vector(da_mid, bc_mid, h, w, extend=extend)
pix_coords = self.extract_vector_coords(x1, y1, x2, y2, h, w)
zi = gray[pix_coords]
line_data.append(zi)
merged_lines = self.merge_proposals(line_data)
#print(f'merged_lines shape = {merged_lines_shape}')
z = zbar_decode(merged_lines, y_density=0, x_density=1)
# fallback methods if no results are found.
if len(z) < 1 and retry:
# first try darkening it
merged_lines = self.adjust_gamma(merged_lines, 0.8)
z = zbar_decode(merged_lines, y_density=0, x_density=1)
if len(z) < 1:
very_gamma_lines = self.adjust_gamma(merged_lines, 0.4)
z = zbar_decode(very_gamma_lines, y_density=0, x_density=1)
if len(z) < 1:
# if that fails try sharpening it
blurred = cv2.GaussianBlur(merged_lines, (0, 0), 10)
merged_lines = cv2.addWeighted(merged_lines, 2, blurred, -1, 0)
z = zbar_decode(merged_lines, y_density=0, x_density=1)
if len(z) < 1 & retry:
# if all that fails squares again but with a darker img
gray = self.adjust_gamma(gray, 0.4)
z = self.extract_by_squares(gray, retry=False)
if len(z) < 1 & retry:
# if that fails, try squares on shrunk img
o_h, o_w = gray.shape[0:2]
new_size = (int(o_h * 0.8), int(o_w * 0.8))
gray = cv2.resize(gray, new_size)
#print(f'retrying with size {new_size}')
z = self.extract_by_squares(gray, retry=False)
return z
def extract_by_squares_with_annotation(self, gray, fname, retry=True,
extension=6):
"""
This method only exists to produce a visual representation of the
VARP process.
"""
base_fn = fname.rsplit(".", 1)[0]
retry_img = gray.copy()
img = gray.copy()
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
# apparently this does not generalize well for very large resolutions
h, w = gray.shape[0:2]
if max(w,h) > 6800:
new_size = (int(w*0.8), int(h*0.8))
w, h = new_size
gray = cv2.resize(gray, new_size, interpolation=cv2.INTER_NEAREST)
# ID squares
squares = self.find_squares(gray)
# save the annotated squares (subfig "A")
cv2.drawContours(img, squares, -1, (0,255,0), 2)
squares_fn = base_fn + "_squares.jpg"
cv2.imwrite(squares_fn, img)
#print(f'found {len(squares)} squares.')
if len(squares) < 1:
z = zbar_decode(gray, y_density=3, x_density=3)
else:
# iterate over each and det their midpoint intersects
h -= 1
w -= 1
line_data = []
# extension happens in both directions, therefore effectively doubled.
extend = min(h, w) // extension
for square in squares:
a, b, c, d = square
ab_mid = self.det_midpoint(a, b)
cd_mid = self.det_midpoint(c, d)
x1, y1, x2, y2 = self.extend_vector(ab_mid, cd_mid, h, w, extend=extend)
# annotate the extension
cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), thickness=1, lineType=8)
pix_coords = self.extract_vector_coords(x1, y1, x2, y2, h, w)
zi = gray[pix_coords]
line_data.append(zi)
da_mid = self.det_midpoint(d, a)
bc_mid = self.det_midpoint(b, c)
x1, y1, x2, y2 = self.extend_vector(da_mid, bc_mid, h, w, extend=extend)
# annotate the extension
cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), thickness=1, lineType=8)
pix_coords = self.extract_vector_coords(x1, y1, x2, y2, h, w)
zi = gray[pix_coords]
line_data.append(zi)
# save the annotated vectors (subfig "B")
lines_fn = base_fn + "_vectors.jpg"
cv2.imwrite(lines_fn, img)
merged_lines = self.merge_proposals(line_data)
# save the resulting composite (subfig "C")
comp_fn = base_fn + "_composite.jpg"
cv2.imwrite(comp_fn, merged_lines)
#print(f'merged_lines shape = {merged_lines_shape}')
z = zbar_decode(merged_lines, y_density=0, x_density=1)
if len(z) < 1 and retry:
print("Implimenting fallback methods:")
# first try darkening it
print(" [fallback method]: darkening composite image")
merged_lines = self.adjust_gamma(merged_lines, 0.8)
z = zbar_decode(merged_lines, y_density=0, x_density=1)
if len(z) < 1:
very_gamma_lines = self.adjust_gamma(merged_lines, 0.4)
z = zbar_decode(very_gamma_lines, y_density=0, x_density=1)
if len(z) < 1:
print(" [fallback method]: sharpening composite image")
# if that fails try sharpening it
blurred = cv2.GaussianBlur(merged_lines, (0, 0), 10)
merged_lines = cv2.addWeighted(merged_lines, 2, blurred, -1, 0)
z = zbar_decode(merged_lines, y_density=0, x_density=1)
if len(z) < 1:
# if all that fails squares again but with a darker img
print(" [fallback method]: darkening input image")
gray = self.adjust_gamma(retry_img, 0.4)
z = self.extract_by_squares_with_annotation(gray, fname, retry=False, extension=extension)
if len(z) < 1:
# if that fails, try squares on shrunk img
print(" [fallback method]: shrunk input image")
o_w, o_h = gray.shape[0:2]
new_size = (int(o_h * 0.8), int(o_w * 0.8))
gray = cv2.resize(gray, new_size)
#print(f'retrying with size {new_size}')
z = self.extract_by_squares_with_annotation(gray, fname, retry=False, extension=extension)
return z
def reduction_determination_extract_by_squares(self, gray, retry=True, extension=6):
"""
This method only exists to determine a typical resolution reduction
given a numpy array image attempts to identify all barcodes using
vector extraction.
"""
# apparently this does not generalize well for very large resolutions
h, w = gray.shape[0:2]
if max(w,h) > 6800:
new_size = (int(w*0.8), int(h*0.8))
w, h = new_size
gray = cv2.resize(gray, new_size, interpolation=cv2.INTER_NEAREST)
# ID squares
squares = self.find_squares(gray)
if len(squares) < 1:
z = zbar_decode(gray, y_density=3, x_density=3)
reduction = 0
else:
# iterate over each and det their midpoint intersects
h -= 1
w -= 1
line_data = []
# extension happens in both directions, therefore effectively doubled.
extend = min(h, w) // extension
for square in squares:
a, b, c, d = square
ab_mid = self.det_midpoint(a, b)
cd_mid = self.det_midpoint(c, d)
x1, y1, x2, y2 = self.extend_vector(ab_mid, cd_mid, h, w, extend=extend)
pix_coords = self.extract_vector_coords(x1, y1, x2, y2, h, w)
zi = gray[pix_coords]
line_data.append(zi)
da_mid = self.det_midpoint(d, a)
bc_mid = self.det_midpoint(b, c)
x1, y1, x2, y2 = self.extend_vector(da_mid, bc_mid, h, w, extend=extend)
pix_coords = self.extract_vector_coords(x1, y1, x2, y2, h, w)
zi = gray[pix_coords]
line_data.append(zi)
merged_lines = self.merge_proposals(line_data)
z = zbar_decode(merged_lines, y_density=0, x_density=1)
o_h, o_w = gray.shape[0:2]
m_h, m_w = merged_lines.shape[0:2]
reduction = self.det_res_reduction(o_h, o_w, m_h, m_w)
if len(z) < 1:
# first try darkening it
merged_lines = self.adjust_gamma(merged_lines, 0.8)
z = zbar_decode(merged_lines, y_density=0, x_density=1)
if len(z) < 1:
very_gamma_lines = self.adjust_gamma(merged_lines, 0.4)
z = zbar_decode(very_gamma_lines, y_density=0, x_density=1)
if len(z) < 1:
# if that fails try sharpening it
blurred = cv2.GaussianBlur(merged_lines, (0, 0), 10)
merged_lines = cv2.addWeighted(merged_lines, 2, blurred, -1, 0)
z = zbar_decode(merged_lines, y_density=0, x_density=1)
if len(z) < 1 & retry:
# if all that fails squares again but with a darker img
gray = self.adjust_gamma(gray, 0.4)
z, reduction = self.reduction_determination_extract_by_squares(gray, retry=False, extension=extension)
if len(z) < 1 & retry:
# if that fails, try squares on shrunk img
o_h, o_w = gray.shape[0:2]
new_size = (int(o_h * 0.8), int(o_w * 0.8))
gray = cv2.resize(gray, new_size, interpolation=cv2.INTER_NEAREST)
#print(f'retrying with size {new_size}')
z, reduction = self.reduction_determination_extract_by_squares(gray, retry=False, extension=extension)
return z, reduction
def det_res_reduction(self, o_h, o_w, m_h, m_w):
orig_px = o_h * o_w
merged_px = m_h * m_w
reduction = round((merged_px / orig_px) - 1, 8)
return reduction
def testFeature(self, img):
"""Returns bool condition, if this module functions on a test input."""
try:
# set aside current pattern and check for ANYTHING
decodedData = self.decodeBC(img, verifyPattern=True)
# return current pattern
if isinstance(decodedData, list):
return True
else:
return False
except Exception as e:
print(e)
# some unknown error, assume test failed
return False
| [
"calebadampowell@gmail.com"
] | calebadampowell@gmail.com |
f22e1fdd4d81cea8b179bd88fa184e853f2d3167 | 133643780ba3ee33291471261e9d365c3b0bb9ae | /Includes/reset-model.py | e1543f7f67d1c4b0ec89948999389b0ee30c9c68 | [] | no_license | rajeshcummins/streaming-lakehouse | b884919af33a0af8a367dcfbfca469a0c997980c | c5533627a472fae25fcb85662eb11f434efd85d4 | refs/heads/master | 2023-06-02T06:41:47.162425 | 2021-06-15T17:03:23 | 2021-06-15T17:03:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | # Databricks notebook source
from mlflow.tracking.client import MlflowClient
from mlflow.utils.rest_utils import RestException
dbutils.widgets.text("model_name", "foo")
model_name = dbutils.widgets.get("model_name")
client = MlflowClient()
try:
for version in [model.version for stage in ["production", "staging"] for model in client.get_latest_versions(model_name, stages=[stage])]:
client.transition_model_version_stage(model_name, version, "archived")
client.delete_registered_model(model_name)
except RestException as E:
print(E)
| [
"dstrodtman-db"
] | dstrodtman-db |
537d0566875454072446c9da67810baccfa301fc | b46e3837b5fa2e877ad0370aaa62a1c82d01408c | /old_files/akp/akp.py | 79a392d299c8580cab8e206a823db16e33a0ac38 | [] | no_license | alisever/WebCrawling | 13c3236ac1319ef9d7f23012a3d4f72f5dec73b7 | 4de03f9af3b2dec6647f01957fbe92bc4774040c | refs/heads/main | 2023-09-05T19:04:59.069459 | 2021-11-10T16:07:33 | 2021-11-10T16:07:33 | 399,955,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,686 | py | import json
import scrapy
base = 'https://www.akparti.org.tr/haberler/kategori/genel-baskan'
class AllSpider(scrapy.Spider):
name = 'akp_all'
start_urls = [base]
page_no = 0
def parse(self, response, **kwargs):
self.page_no += 1
headers = {
"accept": "application/json, text/javascript, */*; q=0.01",
"accept-language": "en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7",
"content-type": "application/json",
"sec-ch-ua": "\"Chromium\";v=\"94\", \"Google Chrome\";v=\"94\", \";Not A Brand\";v=\"99\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Windows\"",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"sec-gpc": "1",
"x-requested-with": "XMLHttpRequest"
}
payload = {
"category": "27d14268-9f90-4830-b011-c48df5b97843",
"page": self.page_no,
"culture": "tr",
"newsRootId": 1164,
"tag": ""
}
if self.page_no == 1:
for link in response.css('div.col-md-4.'
'col-sm-6 a::attr(href)').getall():
yield {
'link': 'https://www.akparti.org.tr' + link
}
yield scrapy.Request(
'https://www.akparti.org.tr/cms/surface/NewsAjaxOperations/Get',
method='POST',
headers=headers,
body=json.dumps(payload),
callback=self.parse)
else:
hasan = json.loads(response.text)
for item in hasan['Items']:
yield {
'link': 'https://www.akparti.org.tr' + item['Url']
}
if hasan['HasNext']:
yield scrapy.Request('https://www.akparti.org.tr/cms/surface/NewsAjaxOperations/Get',
method='POST',
headers=headers,
body=json.dumps(payload),
callback=self.parse)
with open('akp.json') as json_file:
news_pages = json.load(json_file)
class SingleSpider(scrapy.Spider):
name = 'akp'
start_urls = [a.get('link') for a in news_pages[:10]]
def parse(self, response, **kwargs):
yield {
'Url': response.request.url,
'Baslik': response.css('div.content.clearfix.newsDetail > h1 ::text').get().replace("\r", "").replace("\n", "").replace("\xa0", " "),
'Tarih': response.css('div.col-md-6 span ::text').get()[:-9],
'Detay': ' '.join(response.css('div.content.clearfix.newsDetail > p ::text').getall()).replace("\r", "").replace("\n", "").replace("\xa0", " "),
}
# class ExceptionSpider(scrapy.Spider):
# name = 'yeni_safak_except'
# start_urls = ['https://www.yenisafak.com/dusunce-gunlugu/'
# '15-temmuz-2016da-turkiyede-ne-oldu-2499421']
#
# def parse(self, response, **kwargs):
# yield {
# 'Url': response.request.url,
# 'Baslik': response.css('h1.title::text').get(),
# 'Tarih': response.css('time.item.time::text').get().partition(
# ',')[0],
# 'Detay': ''.join(response.css('[class^="text text"]::text'
# ).getall()).strip(),
# 'Yazar': response.css('div.text.text-666666 > strong::text').get()[1:],
# 'Haber / Kose Yazisi / Konusma': 'Kose Yazisi'
# }
| [
"alisever96@hotmail.com"
] | alisever96@hotmail.com |
4394a0d430e67e0281718259baad893a3d33cdd3 | ab67dd529f45972b14e9a42fab5531861dbc582d | /Computer Science GCSE/Project/dobVerify.py | 20173cc912a82ee267f034620ac95d93a95e8402 | [] | no_license | hirurana/Past-Projects | e7f2e767625ca9df175c542f672a9f8ba5734875 | 5c29f59ce4a395f9a2b2b9b65b1d2594e5e5d6fa | refs/heads/master | 2021-08-27T23:47:47.146865 | 2017-12-10T20:27:22 | 2017-12-10T20:27:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | dob = []
#ask for date of birth
newDob = input("Enter you date of birth in this format (e.g 10/08/99): \n")
if int(newDob[0])>3:
print("ERROR1")
newDob = input("Enter you date of birth in this format (e.g 10/08/99): \n")
elif int(newDob[3])>1:
print("ERROR2")
newDob = input("Enter you date of birth in this format (e.g 10/08/99): \n")
elif int(newDob[3])==1 and int(newDob[4])>2:
print("ERROR3")
newDob = input("Enter you date of birth in this format (e.g 10/08/99): \n")
dob.append(newDob)
| [
"zcabhra@ucl.ac.uk"
] | zcabhra@ucl.ac.uk |
fc362768e4ec1bd2b2882b5a20af0d37ee5f822a | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Keras_tensorflow_nightly/source2.7/tensorflow/contrib/model_pruning/python/layers/core_layers.py | 764ab620bc2227ff5e8e3f473d689e0e133e83d4 | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 19,691 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the core layer classes for model pruning and its functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
MASK_COLLECTION = 'masks'
THRESHOLD_COLLECTION = 'thresholds'
MASKED_WEIGHT_COLLECTION = 'masked_weights'
WEIGHT_COLLECTION = 'kernel'
# The 'weights' part of the name is needed for the quantization library
# to recognize that the kernel should be quantized.
MASKED_WEIGHT_NAME = 'weights/masked_weight'
class _MaskedConv(base.Layer):
"""Abstract nD convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. The weight tensor of this layer is masked.
If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(_MaskedConv, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.rank = rank
self.filters = filters
self.kernel_size = utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = utils.normalize_tuple(strides, rank, 'strides')
self.padding = utils.normalize_padding(padding)
self.data_format = utils.normalize_data_format(data_format)
self.dilation_rate = utils.normalize_tuple(dilation_rate, rank,
'dilation_rate')
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.input_spec = base.InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = 1 if self.data_format == 'channels_first' else -1
if input_shape[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis].value
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.mask = self.add_variable(
name='mask',
shape=kernel_shape,
initializer=init_ops.ones_initializer(),
trainable=False,
dtype=self.dtype)
self.kernel = self.add_variable(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True,
dtype=self.dtype)
self.threshold = self.add_variable(
name='threshold',
shape=[],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=self.dtype)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
self.masked_kernel = math_ops.multiply(self.mask, self.kernel,
MASKED_WEIGHT_NAME)
ops.add_to_collection(MASK_COLLECTION, self.mask)
ops.add_to_collection(MASKED_WEIGHT_COLLECTION, self.masked_kernel)
ops.add_to_collection(THRESHOLD_COLLECTION, self.threshold)
ops.add_to_collection(WEIGHT_COLLECTION, self.kernel)
if self.use_bias:
self.bias = self.add_variable(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.input_spec = base.InputSpec(
ndim=self.rank + 2, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
outputs = nn.convolution(
input=inputs,
filter=self.masked_kernel,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self.padding.upper(),
data_format=utils.convert_data_format(self.data_format, self.rank + 2))
if self.bias is not None:
if self.data_format == 'channels_first':
if self.rank == 1:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
if self.rank == 2:
outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
if self.rank == 3:
# As of Mar 2017, direct addition is significantly slower than
# bias_add when computing gradients. To use bias_add, we collapse Z
# and Y into a single dimension to obtain a 4D input tensor.
outputs_shape = outputs.shape.as_list()
outputs_4d = array_ops.reshape(outputs, [
outputs_shape[0], outputs_shape[1],
outputs_shape[2] * outputs_shape[3], outputs_shape[4]
])
outputs_4d = nn.bias_add(outputs_4d, self.bias, data_format='NCHW')
outputs = array_ops.reshape(outputs_4d, outputs_shape)
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0]] + new_space +
[self.filters])
else:
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0], self.filters] +
new_space)
class MaskedConv2D(_MaskedConv):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(MaskedConv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
trainable=trainable,
name=name,
**kwargs)
class MaskedFullyConnected(base.Layer):
"""Fully-connected layer class with masked weights.
This layer implements the operation:
`outputs = activation(inputs.kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the weight matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the weight matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(MaskedFullyConnected, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.units = units
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.input_spec = base.InputSpec(min_ndim=2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
self.input_spec = base.InputSpec(
min_ndim=2, axes={-1: input_shape[-1].value})
self.kernel = self.add_variable(
'kernel',
shape=[input_shape[-1].value, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
dtype=self.dtype,
trainable=True)
self.mask = self.add_variable(
name='mask',
shape=[input_shape[-1].value, self.units],
initializer=init_ops.ones_initializer(),
trainable=False,
dtype=self.dtype)
self.threshold = self.add_variable(
name='threshold',
shape=[],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=self.dtype)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
self.masked_kernel = math_ops.multiply(self.mask, self.kernel,
MASKED_WEIGHT_NAME)
ops.add_to_collection(MASK_COLLECTION, self.mask)
ops.add_to_collection(MASKED_WEIGHT_COLLECTION, self.masked_kernel)
ops.add_to_collection(THRESHOLD_COLLECTION, self.threshold)
ops.add_to_collection(WEIGHT_COLLECTION, self.kernel)
if self.use_bias:
self.bias = self.add_variable(
'bias',
shape=[
self.units,
],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
if len(output_shape) > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.masked_kernel,
[[len(shape) - 1], [0]])
# Reshape the output back to the original ndim of the input.
outputs.set_shape(output_shape)
else:
outputs = standard_ops.matmul(inputs, self.masked_kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if input_shape[-1].value is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
8fab3a6177d60d8c3c725dbf64490e37be63d9d3 | 0f70a3eee204d7450fc955f10376bd45585b225c | /relational_operator.py | 53644ec930c9346b1ecf57434b82c2da97ac1f7f | [] | no_license | WillyChen123/2015Summer | 958aa4b6b969eff9f9b04052bbea43cf64b60ce8 | a8a3ea886d88db42299fe59880c99323516b2457 | refs/heads/master | 2021-01-01T05:38:35.912470 | 2015-07-15T03:10:55 | 2015-07-15T03:10:55 | 39,056,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | print 3==5
print 3!=5
print 3>5
print 3>=5
print 3<5
print 3<=5 | [
"st40809@gmail.com"
] | st40809@gmail.com |
a9c5032284b09d96cf4da891a6fccb74215aa549 | 72fe58285dbfefb9151ebb7be11279fd55a8f5b2 | /chapter-04-trees-and-graphs/src/find_common.py | 99507b985f0e5a1e699c7800ffaadf6bf7720497 | [] | no_license | bearzk/cracking-the-coding-interview | 65f48c4352651c20ce6faba11d91c641bdfdfb88 | 461ce1887766f45eea57e3383db2c0fc4506e5ca | refs/heads/master | 2021-01-16T18:43:45.664633 | 2013-05-10T01:48:32 | 2013-05-10T01:48:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | # coding: utf-8
class Result
def __init__(self, node, is_anc):
self.node = node
self.is_anc = is_anc
def common_anc(root, p, q):
if root is None:
return Result(None, False)
if root is p and root is q:
return Result(root, true)
rx = common_anc(root.left, p, q)
if rx.is_anc:
return rx
ry = common_anc(root.right, p, q)
if ry.is_anc:
return ry
if rx.node is not None and ry.node is not None:
return Result(root, True)
elif root is p or root is q:
is_anc = True if rx.node is not None or ry.node is not None else False
return Result(root, is_anc)
else:
return Result(rx.node if rx.node is not None else ry.node, False)
| [
"sugihara@gmail.com"
] | sugihara@gmail.com |
62130bd3030fa0dac1dd8acc26e97984f4f5efaa | fcd000497f179b1f43fead5a3198b0cbc85a5f0a | /src/smile_to_world_xiaofeng/hello.py | 08c2291ec06d2f8a2a828a4f8667aecddafe99e5 | [
"MIT"
] | permissive | XiaofengZhu/pypi-hello-world | 649480b524a2e9fa6bd347afdeb8ce4c53728a95 | ed980582362774c0b6350e0d0ade358022a14bbd | refs/heads/main | 2023-05-02T14:34:53.842517 | 2021-05-13T20:34:19 | 2021-05-13T20:34:19 | 366,445,915 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | import time
class Hello:
def __init__(self):
self._author = 'Xiaofeng Zhu'
def print_message(self):
print('Hello world from {} at {}'.format(self._author, time.time())) | [
"xiaofengzhu@Xiaofengs-MBP.hsd1.wa.comcast.net"
] | xiaofengzhu@Xiaofengs-MBP.hsd1.wa.comcast.net |
5a452e46158ffd07b091c7de1a10427e119ef214 | 956b239b94b090931232f5f974366ee82bb89fef | /app/auth/email.py | 9eaedbed9bb867d3ae9c8fc1436b50726e8eba49 | [] | no_license | victor-aunon/microblog | 57b3650cf9f999dba32ea4a6d3843dc00ed41bdc | b74107d5a0df4649ee6a8e2e055fba03c01bb09e | refs/heads/master | 2022-12-16T10:36:51.045300 | 2019-04-21T01:22:15 | 2019-04-21T01:22:15 | 180,673,044 | 2 | 0 | null | 2022-12-08T04:58:18 | 2019-04-10T22:25:32 | Python | UTF-8 | Python | false | false | 497 | py | from flask import render_template, current_app
from flask_babel import _
from app.email import send_email
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email(_('[Microblog] Reset Your Password'), sender=current_app.config['ADMINS'][0],
recipients=[user.email], text_body=render_template('email/reset_password.txt', user=user, token=token),
html_body=render_template('email/reset_password.html', user=user, token=token)) | [
"angel.aunon.garcia@gmail.com"
] | angel.aunon.garcia@gmail.com |
0ecb406dc4b005795c6d37aaa895fd106844ac7f | b1e7481f8b5bf40c2547c95b1863e25b11b8ef78 | /Kai/crab/NANOv7_NoveCampaign/2017/crab_script_2017_Mu_C.py | a8cd9f368837fbf5bec45d00d8e189ee53cc12fe | [
"Apache-2.0"
] | permissive | NJManganelli/FourTopNAOD | 3df39fd62c0546cdbb1886b23e35ebdc1d3598ad | c86181ae02b1933be59d563c94e76d39b83e0c52 | refs/heads/master | 2022-12-22T22:33:58.697162 | 2022-12-17T01:19:36 | 2022-12-17T01:19:36 | 143,607,743 | 1 | 1 | Apache-2.0 | 2022-06-04T23:11:42 | 2018-08-05T11:40:42 | Python | UTF-8 | Python | false | false | 6,794 | py | #!/usr/bin/env python
import os, time, collections, copy, json, multiprocessing
from PhysicsTools.NanoAODTools.postprocessing.framework.postprocessor import *
from PhysicsTools.NanoAODTools.postprocessing.framework.crabhelper import inputFiles,runsAndLumis
from PhysicsTools.NanoAODTools.postprocessing.modules.common.puWeightProducer import *
from PhysicsTools.NanoAODTools.postprocessing.modules.btv.btagSFProducer import *
from PhysicsTools.NanoAODTools.postprocessing.modules.jme.jetmetHelperRun2 import *
from FourTopNAOD.Kai.modules.LeptonSkimmer import *
from FourTopNAOD.Kai.modules.JetMETSkimmer import *
isData = True
isUltraLegacy = False
era = "2017"
subera = "C"
thePreselection = None
crossSection = None
equivLumi = 41.53
nEventsPositive = None
nEventsNegative = None
sumWeights = None
TriggerChannel = "Mu"
JESUnc = "Merged" # options: "All", "Merged", "Total"
theFiles = inputFiles()
GoldenJSON = {"2016": {"non-UL": "Cert_271036-284044_13TeV_ReReco_07Aug2017_Collisions16_JSON.txt",
"UL": "Cert_271036-284044_13TeV_Legacy2016_Collisions16_JSON.txt"
},
"2017": {"non-UL": "Cert_294927-306462_13TeV_EOY2017ReReco_Collisions17_JSON_v1.txt",
"UL": "Cert_294927-306462_13TeV_UL2017_Collisions17_GoldenJSON.txt"
},
"2018": {"non-UL": "Cert_314472-325175_13TeV_17SeptEarlyReReco2018ABC_PromptEraD_Collisions18_JSON.txt",
"UL": "Cert_314472-325175_13TeV_Legacy2018_Collisions18_JSON.txt"
}
}
if isData:
theLumis = os.path.join(os.environ["CMSSW_BASE"], "python/FourTopNAOD/Kai/jsons", GoldenJSON.get(era).get("UL" if isUltraLegacy else "non-UL"))
print("Loading Golden Json: {}".format(theLumis))
if not os.path.isfile(theLumis):
theLumis = os.path.join(os.environ["CMSSW_BASE"], "src/FourTopNAOD/Kai/python/jsons", GoldenJSON.get(era).get("UL" if isUltraLegacy else "non-UL"))
if not os.path.isfile(theLumis):
raise RuntimeError("Valid GoldenJSON file not found, if running on CRAB try a new scram build before resubmitting")
else:
theLumis = None
moduleCache = []
if not isData:
if era == "2016":
moduleCache.append(puWeight_2016())
elif era == "2017":
moduleCache.append(puWeight_2017())
elif era == "2018":
moduleCache.append(puWeight_2018())
else:
raise RuntimeError("Unexpected era identifier {}".format(era))
if JESUnc in ["All", "Merged"]: #btag POG provides all JEC unc sources, except for RelativeSample
btagjes_sources = ['jes', 'jesAbsoluteMPFBias', 'jesAbsoluteScale', 'jesAbsoluteStat', 'jesFlavorQCD', 'jesFragmentation', 'jesPileUpDataMC', 'jesPileUpPtBB', 'jesPileUpPtEC1', 'jesPileUpPtEC2', 'jesPileUpPtHF', 'jesPileUpPtRef', 'jesRelativeBal', 'jesRelativeFSR', 'jesRelativeJEREC1', 'jesRelativeJEREC2', 'jesRelativeJERHF', 'jesRelativePtBB', 'jesRelativePtEC1', 'jesRelativePtEC2', 'jesRelativePtHF', 'jesRelativeStatEC', 'jesRelativeStatFSR', 'jesRelativeStatHF', 'jesSinglePionECAL', 'jesSinglePionHCAL', 'jesTimePtEta']
# if JESUnc == "Merged": #no btag shape unc for regrouped JEC available, so use the total one ("jes") and the remaining single ones that are not grouped (see also: https://docs.google.com/spreadsheets/d/1Feuj1n0MdotcPq19Mht7SUIgvkXkA4hiB0BxEuBShLw/edit#gid=1345121349)
# btagjes_sources = ['jes', 'jesFlavorQCD','jesPileUpPtEC2', 'jesRelativeBal']
else:
btagjes_sources = ['jes']
moduleCache.append(btagSFProducer(era,
algo="deepjet",
selectedWPs=['M', 'shape_corr'],
sfFileName=None, #Automatically deduced
verbose=0,
jesSystsForShape=btagjes_sources
)
)
moduleCache.append(btagSFProducer(era,
algo="deepcsv",
selectedWPs=['M', 'shape_corr'],
sfFileName=None, #Automatically deduced
verbose=0,
jesSystsForShape=btagjes_sources
)
)
#Need to make it into a function, so extra () pair...
jmeModule = createJMECorrector(isMC=(not isData),
dataYear=int(era),
runPeriod=subera if isData else None,
jesUncert=JESUnc,
jetType="AK4PFchs",
noGroom=False,
metBranchName="METFixEE2017" if era == "2017" else "MET",
applySmearing=True,
isFastSim=False,
applyHEMfix=True if era == "2018" and isUltraLegacy else False,
splitJER=False,
saveMETUncs=['T1', 'T1Smear']
)
moduleCache.append(jmeModule())
moduleCache.append(TriggerAndLeptonSkimmer('baseline',
era=era,
subera=subera,
isData=isData,
TriggerChannel=TriggerChannel,
fillHists=False,
mode="Flag",
)
)
moduleCache.append(JetMETSkimmer(jetMinPt=20.0,
jetMaxEta=2.4 if era == "2016" else 2.5,
jetMinID=0b010,
jetMinCount=4,
minPseudoHT=350,
fillHists=False
)
)
p=PostProcessor(".",
theFiles,
modules=moduleCache,
cut=thePreselection,
provenance=True,
fwkJobReport=True,
jsonInput=theLumis,
histFileName="hist.root",
histDirName="plots",
branchsel=None,
outputbranchsel=None,
compression="LZMA:9",
friend=False,
postfix=None,
noOut=False,
justcount=False,
haddFileName="tree.root",
maxEntries=None,
firstEntry=0,
prefetch=True,
longTermCache=False
)
p.run()
| [
"nicholas.james.manganelli@cern.ch"
] | nicholas.james.manganelli@cern.ch |
d7212911ec66d3aef973e7604e6b39d5d3ee0e66 | 38242a2852f166267bf65b050ec95ed8cb877852 | /python_tips/manage.py | 7a69fcaa0b68220124d0d1e0fcbc5f915d12f9c7 | [
"MIT"
] | permissive | charlesDavid009/tweety | 33cb9307aba2f131caf6184de785117c6027aa53 | 52d1dcda47c12596a4d37e7e253a41b0130a6a61 | refs/heads/main | 2023-05-31T08:34:06.950645 | 2021-07-14T08:25:19 | 2021-07-14T08:25:19 | 383,505,671 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'python_tips.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"zuesghost1@gmail.com"
] | zuesghost1@gmail.com |
9e2c439b85cafbc58738b03142b5eb7d4ab22848 | f0c44fbc139404011f55edc0c6a4f3c07f63a89f | /Assignment_6_Sort_Dataframe_Columns.py | 08be0b7cd396afa624b77cdc2ab28cbe72c46da6 | [] | no_license | A-Kryston/ISM-4402-BI-Public | 9a2e24102da19776b7d54a13b1bf51e621e0cf72 | ffe78e2dbc2c0e1bd184dc59b0710a61d28f80c1 | refs/heads/master | 2020-07-22T23:35:29.094728 | 2019-12-02T21:22:03 | 2019-12-02T21:22:03 | 207,369,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# Andrea Kryston
# Assignment #6 - Sort columns in a dataframe (pg 60)
# Due Date - October 14, 2019
import pandas as pd # Import pandas library
Location = "Datasets/gradedata.csv" # Locate file to be used for this exercise
df = pd.read_csv(Location) # Create df dataframe from gradedata.csv file
df.head() # View first five rows to verify dataframe
# In[ ]:
df = df.sort_values(by=['lname','age','grade'], ascending=[True,True,False]) # Sort the dataframe as follows:
# 'lname' column (ascending order)
# 'age' column (ascending order)
# 'grade' column (descending order)
df.head() # View first five rows to verify dataframe
| [
"noreply@github.com"
] | noreply@github.com |
809f94d693db05b2538535e2564bda1e23251399 | fc76bfd8519ad194a07d21130b74fe5794517056 | /PicoCTF 2017/Level 3/Cryptography/smallRSA/main.py | 65ddd2a955ddfa77ed7e1c613a96d45fb304dade | [] | no_license | t00lbox/CTF-Writeups | 7fe63bbe7a5af3f30d2d9542e2ac51ff828bc0e9 | 62b45e679dee40f75bd4cc914bc2e893a85f20a4 | refs/heads/master | 2020-03-21T14:48:58.859341 | 2018-06-26T00:06:01 | 2018-06-26T00:06:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,077 | py | e = 165528674684553774754161107952508373110624366523537426971950721796143115780129435315899759675151336726943047090419484833345443949104434072639959175019000332954933802344468968633829926100061874628202284567388558408274913523076548466524630414081156553457145524778651651092522168245814433643807177041677885126141
n = 380654536359671023755976891498668045392440824270475526144618987828344270045182740160077144588766610702530210398859909208327353118643014342338185873507801667054475298636689473117890228196755174002229463306397132008619636921625801645435089242900101841738546712222819150058222758938346094596787521134065656721069
c = 60109758698128083867894286068285517856121577775873732971271767838094375540242140682860856525076716857853484762310661349595705965454241788627490154678487289327504291223547525832864143253412180183596307295520420578906308624860023542143928885210079178897416418810270090406582415840515326539954964020452551186119
import RSAwienerHacker
d = RSAwienerHacker.hack_RSA(e,n)
print(d)
m = pow(c,d,n)
print(bytearray.fromhex(hex(m).split('x')[1]).decode()) | [
"19695201+BOAKGP@users.noreply.github.com"
] | 19695201+BOAKGP@users.noreply.github.com |
636959b4b870889c2713bd4b762912ae01610e75 | b75d6359ae8166287ad3e2cf0cf0115facfc4e48 | /stonks/stonks_view/urls.py | f26091ebd08a383fe3e698bebfaee4c83446615f | [] | no_license | webclinic017/stonks-5 | 1e9f60da74fb74f4e09bb3f840de0fe1e5141ae8 | 4e5e59c98c420a88eb4836b2316c3820b64b8484 | refs/heads/master | 2023-04-18T07:31:04.963716 | 2021-04-25T17:24:47 | 2021-04-25T17:24:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | from django.urls import path, include
from stonks_view import views
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'users', views.UserViewSet)
router.register(r'companies', views.CompanyViewSet)
router.register(r'company-earnings', views.CompanyEarningsViewSet, basename="companyearnings")
router.register(r'company-balance', views.CompanyBalanceViewSet)
router.register(r'company-cash-flow', views.CompanyCashFlowViewSet)
router.register(r'watch-list', views.WatchListViewSet)
router.register(r'watch-items', views.WatchItemsViewSet)
from pprint import pprint
pprint(router.urls)
pprint(router.get_default_basename(views.WatchItemsViewSet))
pprint(router.get_default_basename(views.WatchListViewSet))
# user_list
urlpatterns = [
path('', include(router.urls)),
path('register/', views.UserCreate.as_view()),
] | [
"bulvikk@gmail.ccom"
] | bulvikk@gmail.ccom |
5e3af5114585babcf6308a0eed07cab9358841d2 | 7edebeae484480e9ecd786308846b434f3ace53c | /python/clu/websocket.py | 7cfe3da86f84c10d7fa762a32a19e9cb9f065dc8 | [
"BSD-3-Clause"
] | permissive | sdss/clu | fb7b220df092447bbbd6de996dc0fa1785b5ff87 | 086e80a6baa783e25f8c7ca73cd4ba5c42234e08 | refs/heads/main | 2023-08-30T00:30:42.191915 | 2023-08-25T19:07:55 | 2023-08-25T19:07:55 | 183,817,446 | 6 | 0 | BSD-3-Clause | 2023-05-25T06:56:06 | 2019-04-27T20:12:55 | Python | UTF-8 | Python | false | false | 3,648 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: José Sánchez-Gallego (gallegoj@uw.edu)
# @Date: 2023-05-25
# @Filename: websocket.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
from __future__ import annotations
import json
from typing import TYPE_CHECKING
from websockets.legacy.protocol import broadcast
from websockets.server import serve
from clu.client import AMQPClient
if TYPE_CHECKING:
from websockets.server import WebSocketServerProtocol
from clu.client import AMQPReply
class WebsocketServer:
"""A websocket server that allows communication with the RabbitMQ exchange.
The websocket server is a simple pass-through between a websocket client and
an `.AMQPClient` that connects to the RabbitMQ exchange. Any `.AMQPReply`
received by the AMQP client is packaged as a JSON and forwarded to the
websocket clients. Websocket clients can send messages with the format ::
{
"consumer": ...,
"command_string": ...,
"command_id": ...
}
that will be sent to the corresponding actor commands queue. The websocket
server does not track command completion, which is left to the user. Including
a ``command_id`` with the message is recommended for the client to be able to
track commands.
Parameters
----------
whost
The host where to run the websocket server.
wport
The TCP port on which to run the websocket server.
client_kwargs
Arguments to pass to the `.AMQPClient` connection to RabbitMQ.
"""
def __init__(self, whost: str = "0.0.0.0", wport: int = 9876, **client_kwargs):
self.client = AMQPClient(**client_kwargs)
self.wparams = (whost, wport)
self.wclients: set[WebSocketServerProtocol] = set()
async def start(self):
"""Start the server and AMQP client."""
self.client.add_reply_callback(self._handle_reply)
await self.client.start()
self.websocket_server = await serve(
self._handle_websocket,
*self.wparams,
)
return self
async def stop(self):
"""Stop the server and AMQP client."""
await self.client.stop()
self.websocket_server.close()
async def _handle_websocket(self, websocket: WebSocketServerProtocol):
"""Handle a connection to the websocket server."""
# Register the client
self.wclients.add(websocket)
async for data in websocket:
try:
message = json.loads(data)
if not isinstance(message, dict):
continue
except ValueError:
continue
if "consumer" not in message or "command_string" not in message:
continue
command_id = message.get("command_id", None)
await self.client.send_command(
message["consumer"],
message["command_string"],
command_id=command_id,
await_command=False,
)
self.wclients.remove(websocket)
async def _handle_reply(self, reply: AMQPReply):
"""Broadcast a reply to the connected websockets."""
message = reply.message
data = dict(
headers=message.headers,
exchange=message.exchange,
message_id=message.message_id,
routing_key=message.routing_key,
timestamp=message.timestamp.isoformat() if message.timestamp else None,
body=reply.body,
)
broadcast(self.wclients, json.dumps(data))
| [
"noreply@github.com"
] | noreply@github.com |
bec783ab3edecf3218d44d70daa4be040eb61f27 | a398e6f0d692130b64c12943b1efdaa6c3a0eac9 | /Chapter08/display_message.py | da194bda7a95a1ce1874642fe44ec068f5e26d7b | [] | no_license | xue9981/LP2 | c0792681c928348d6ff479315eb7e9d80be8a156 | 500d585ffe057b4a8522f9df2b2052e56bbf25f8 | refs/heads/master | 2020-07-30T23:09:06.089682 | 2019-10-01T16:07:22 | 2019-10-01T16:07:22 | 210,391,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | def display_message():
print("関数の使い方、作成の仕方について学ぶ")
display_message()
| [
"xue@cs.meiji.ac.jp"
] | xue@cs.meiji.ac.jp |
8eb0ddd533b6242fa21b29701e10215b497fcd90 | d93901e7ff019c7c929594c17b9ed0c575dd1165 | /NumPyNet/box.py | 506948ebbb806413bf3c0380425a8914f0f69669 | [
"MIT"
] | permissive | Nico-Curti/NumPyNet | 0e673ad3da4120cd761a5b1f4c1f0c429cfd20a9 | c5e217751e28f0812282333b83964b7fee217cfb | refs/heads/master | 2022-05-04T04:51:50.076629 | 2022-03-28T10:02:15 | 2022-03-28T10:02:15 | 199,490,280 | 57 | 10 | null | null | null | null | UTF-8 | Python | false | false | 7,109 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import operator
from functools import wraps
__author__ = ['Mattia Ceccarelli', 'Nico Curti']
__email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
class Box (object):
'''
Detection box class
Parameters
----------
coords : tuple (default=None)
Box Coordinates as (x, y, w, h)
Example
-------
>>> import pylab as plt
>>> from matplotlib.patches import Rectangle
>>>
>>> b1 = Box((.5, .3, .2, .1))
>>> x_1, y_1, w_1, h_1 = b1.box
>>> left_1, top_1, right_1, bottom_1 = b1.coords
>>>
>>> print('Box1: {}'.format(b1))
>>>
>>> b2 = Box((.4, .5, .2, .5))
>>> x_2, y_2, w_2, h_2 = b2.box
>>> left_2, top_2, right_2, bottom_2 = b2.coords
>>>
>>> print('Box2: {}'.format(b2))
>>>
>>> print('Intersection: {:.3f}'.format(b1.intersection(b2)))
>>> print('Union: {:.3f}'.format(b1.union(b2)))
>>> print('IOU: {:.3f}'.format(b1.iou(b2)))
>>> print('rmse: {:.3f}'.format(b1.rmse(b2)))
>>>
>>> plt.figure()
>>> axis = plt.gca()
>>> axis.add_patch(Rectangle(xy=(left_1, top_1),
>>> width=w_1, height=h_1,
>>> alpha=.5, linewidth=2, color='blue'))
>>> axis.add_patch(Rectangle(xy=(left_2, top_2),
>>> width=w_2, height=h_2,
>>> alpha=.5, linewidth=2, color='red'))
'''
def __init__ (self, coords=None):
if coords is not None:
try:
self.x, self.y, self.w, self.h = coords
except ValueError:
class_name = self.__class__.__name__
raise ValueError('{0}: inconsistent input shape. Expected a 4D (x, y, w, h) shapes and given {1}'.format(class_name, coords))
else:
self.x, self.y, self.w, self.h = (None, None, None, None)
def _is_box (func):
'''
Decorator function to check if the input variable is a Box object
'''
@wraps(func)
def _ (self, b):
if isinstance(b, self.__class__):
return func(self, b)
else:
raise ValueError('Box functions can be applied only on other Box objects')
return _
@property
def box(self):
'''
Get the box coordinates
Returns
-------
coords : tuple
Box coordinates as (x, y, w, h)
'''
return (self.x, self.y, self.w, self.h)
def __iter__ (self):
'''
Iter over coordinates as (x, y, w, h)
'''
yield self.x
yield self.y
yield self.w
yield self.h
def __eq__ (self, other):
'''
Check if the box coordinates are equal
'''
return isinstance(other, Box) and tuple(self) == tuple(other)
def __ne__ (self, other):
'''
Check if the box coordinates are NOT equal
'''
return not (self == other)
def __repr__ (self):
'''
Object representation
'''
return type(self).__name__ + repr(tuple(self))
def _overlap (self, x1, w1, x2, w2):
'''
Compute the overlap between (left, top) | (right, bottom) of the coordinates
Parameters
----------
x1 : float
X coordinate
w1 : float
W coordinate
x2 : float
w2 : float
Returns
-------
overlap : float
The overlapping are between the two boxes
'''
half_w1, half_w2 = w1 * .5, w2 * .5
l1, l2 = x1 - half_w1, x2 - half_w2
r1, r2 = x1 + half_w1, x2 + half_w2
return min(r1, r2) - max(l1, l2)
@_is_box
def intersection (self, other):
'''
Common area between boxes
Parameters
----------
other : Box
2nd term of the evaluation
Returns
-------
intersection : float
Intersection area of two boxes
'''
w = self._overlap(self.x, self.w, other.x, other.w)
h = self._overlap(self.y, self.h, other.y, other.h)
w = w if w > 0. else 0.
h = h if h > 0. else 0.
return w * h
__and__ = intersection
@_is_box
def union (self, other):
'''
Full area without intersection
Parameters
----------
other : Box
2nd term of the evaluation
Returns
-------
union : float
Union area of the two boxes
'''
return self.area + other.area - self.intersection(other)
__add__ = union
@_is_box
def iou (self, other):
'''
Intersection over union
Parameters
----------
other : Box
2nd term of the evaluation
Returns
-------
iou : float
Intersection over union between boxes
'''
union = self.union(other)
return self.intersection(other) / union if union != 0. else float('nan')
__sub__ = iou
@_is_box
def rmse (self, other):
'''
Root mean square error of the boxes
Parameters
----------
other : Box
2nd term of the evaluation
Returns
-------
rmse : float
Root mean square error of the boxes
'''
diffs = tuple(map(operator.sub, self, other))
dot = sum(map(operator.mul, diffs, diffs))
return dot**(.5)
@property
def center(self):
'''
In the current storage the x,y are the center of the box
Returns
-------
center : tuple
Center of the current box.
'''
x, y, _, _ = self._object.box
return (x, y)
@property
def dimensions(self):
'''
In the current storage the w,h are the dimensions of the rectangular box
Returns
-------
dims : tuple
Dimensions of the current box as (width, height).
'''
_, _, w, h = self._object.box
return (w, h)
@property
def area(self):
'''
Compute the are of the box
Returns
-------
area : float
Area of the current box.
'''
return self.w * self.h
@property
def coords(self):
'''
Return box coordinates in clock order (left, top, right, bottom)
Returns
-------
coords : tuple
Coordinates as (left, top, right, bottom)
'''
x, y, w, h = self.box
half_w, half_h = w * .5, h * .5
return (x - half_w, y - half_h, x + half_w, y + half_h)
def __str__(self):
'''
Printer
'''
fmt = '(left={0:.3f}, bottom={1:.3f}, right={2:.3f}, top={3:.3f})'.format(*self.coords)
return fmt
if __name__ == '__main__':
import pylab as plt
from matplotlib.patches import Rectangle
b1 = Box((.5, .3, .2, .1))
x_1, y_1, w_1, h_1 = b1.box
left_1, top_1, right_1, bottom_1 = b1.coords
print('Box1: {}'.format(b1))
b2 = Box((.4, .5, .2, .5))
x_2, y_2, w_2, h_2 = b2.box
left_2, top_2, right_2, bottom_2 = b2.coords
print('Box2: {}'.format(b2))
print('Intersection: {:.3f}'.format(b1.intersection(b2)))
print('Union: {:.3f}'.format(b1.union(b2)))
print('IOU: {:.3f}'.format(b1.iou(b2)))
print('rmse: {:.3f}'.format(b1.rmse(b2)))
plt.figure()
axis = plt.gca()
axis.add_patch(Rectangle(xy=(left_1, top_1), width=w_1, height=h_1, alpha=.5, linewidth=2, color='blue'))
axis.add_patch(Rectangle(xy=(left_2, top_2), width=w_2, height=h_2, alpha=.5, linewidth=2, color='red'))
plt.show()
| [
"nico.curti2@unibo.it"
] | nico.curti2@unibo.it |
0cbc26a7c531c9e66e72aff03e1ef1e05d090406 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2542/60761/235001.py | 0f6cce935b31eb1a6dc6d3e0854022eb80c48159 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | import math
arr=input("")
arr=arr.replace("[","")
arr=arr.replace("]","")
arr=list(map(int,arr.split(",")))
arr.sort()
i=1
maxlen=1
templen=1
while(i<len(arr)):
if(arr[i]==arr[i-1]+1):
templen=templen+1
else:
maxlen=max(templen,maxlen)
templen=1
i=i+1
print(maxlen) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
abcd9cf3a6a72e23d78bf410cfbdac852343d238 | eb40dce4039d528b9cd06dbeda75da09d09d7fc5 | /need_install/Django-1.8.17/tests/basic/models.py | 0ebe3e0b4af812d92177a78a86fa007380fb0e16 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MulticsYin/MulticsSH | 39b62189446787c7f0f037b1640c9c780bd1dddd | 5837a0bff0e7da0e8535e4e0b31ef6baf24274b4 | refs/heads/master | 2021-08-28T07:53:51.759679 | 2017-12-11T15:31:03 | 2017-12-11T15:31:03 | 82,428,902 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | # -*- coding: utf-8 -*-
"""
Bare-bones model
This is a basic model with only two non-primary-key fields.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
class Meta:
ordering = ('pub_date', 'headline')
def __str__(self):
return self.headline
class ArticleSelectOnSave(Article):
class Meta:
proxy = True
select_on_save = True
@python_2_unicode_compatible
class SelfRef(models.Model):
selfref = models.ForeignKey('self', null=True, blank=True,
related_name='+')
article = models.ForeignKey(Article, on_delete=models.SET_NULL, null=True, blank=True)
def __str__(self):
# This method intentionally doesn't work for all cases - part
# of the test for ticket #20278
return SelfRef.objects.get(selfref=self).pk
| [
"multics_luo@163.com"
] | multics_luo@163.com |
13e7dfb79f5a9e988593ddae9d68927018ac1463 | f070c3acba7da2254adc2c12f80e54b830396d40 | /test/venv/bin/futurize | 65e98b939532e827e94109ba696ca6402ce2bfc3 | [] | no_license | liruidesysu/cloudCluster | 241a6ac472ecce9c6b4c966a44304128d258fc9b | fc558b464c3052f59cb1e6326aa22bade556b0c8 | refs/heads/master | 2022-11-06T03:51:31.954607 | 2019-08-22T12:47:53 | 2019-08-22T12:47:53 | 200,144,454 | 0 | 1 | null | 2022-03-29T21:56:02 | 2019-08-02T01:42:17 | Python | UTF-8 | Python | false | false | 252 | #!/home/liruide/Desktop/cloudCluster/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from libfuturize.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"liruide_read@outlook.com"
] | liruide_read@outlook.com | |
309503445ede6d2acdd13fee339865beec91d5f7 | 17625b317bdd2111453bdcf05f0e7cdb140b888e | /keepitpossible/backup/unity_lib.py | 82f854bcb2e98ae3d45d901386ba15f6466f3970 | [
"Apache-2.0"
] | permissive | ChenKuanSun/TheObstacleTowerChallenge | e93bdeb74dc0790d965111c9ee41c03472ba7a35 | c2de16930dd88949c0bc6a460f378beae3a04204 | refs/heads/master | 2021-02-27T06:31:10.861317 | 2020-03-07T07:35:25 | 2020-03-07T07:35:25 | 245,587,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,491 | py | # coding=utf-8
# Copyright 2018 The Dopamine Authors.
# Modifications copyright 2019 Unity Technologies.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Obstacle Tower-specific utilities including Atari-specific network architectures.
This includes a class implementing minimal preprocessing, which
is in charge of:
. Converting observations to greyscale.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from obstacle_tower_env import ObstacleTowerEnv
import gym
from gym.spaces.box import Box
import numpy as np
import tensorflow as tf
import gin.tf
import cv2
slim = tf.contrib.slim
NATURE_DQN_OBSERVATION_SHAPE = (84, 84) # Size of downscaled Atari 2600 frame.
NATURE_DQN_DTYPE = tf.uint8 # DType of Atari 2600 observations.
NATURE_DQN_STACK_SIZE = 4 # Number of frames in the state stack.
@gin.configurable
def create_otc_environment(environment_path=None):
"""Wraps an Obstacle Tower Gym environment with some basic preprocessing.
Returns:
An Obstacle Tower environment with some standard preprocessing.
"""
assert environment_path is not None
env = ObstacleTowerEnv(environment_path, 0, retro=False)
env = OTCPreprocessing(env)
return env
def nature_dqn_network(num_actions, network_type, state):
"""The convolutional network used to compute the agent's Q-values.
Args:
num_actions: int, number of actions.
network_type: namedtuple, collection of expected values to return.
state: `tf.Tensor`, contains the agent's current state.
Returns:
net: _network_type object containing the tensors output by the network.
"""
net = tf.cast(state, tf.float32)
net = tf.div(net, 255.)
net = slim.conv2d(net, 32, [8, 8], stride=4)
net = slim.conv2d(net, 64, [4, 4], stride=2)
net = slim.conv2d(net, 64, [3, 3], stride=1)
net = slim.flatten(net)
net = slim.fully_connected(net, 512)
q_values = slim.fully_connected(net, num_actions, activation_fn=None)
return network_type(q_values)
def rainbow_network(num_actions, num_atoms, support, network_type, state):
"""The convolutional network used to compute agent's Q-value distributions.
Args:
num_actions: int, number of actions.
num_atoms: int, the number of buckets of the value function distribution.
support: tf.linspace, the support of the Q-value distribution.
network_type: namedtuple, collection of expected values to return.
state: `tf.Tensor`, contains the agent's current state.
Returns:
net: _network_type object containing the tensors output by the network.
"""
weights_initializer = slim.variance_scaling_initializer(
factor=1.0 / np.sqrt(3.0), mode='FAN_IN', uniform=True)
net = tf.cast(state, tf.float32)
net = tf.div(net, 255.)
net = slim.conv2d(
net, 32, [8, 8], stride=4, weights_initializer=weights_initializer)
net = slim.conv2d(
net, 64, [4, 4], stride=2, weights_initializer=weights_initializer)
net = slim.conv2d(
net, 64, [3, 3], stride=1, weights_initializer=weights_initializer)
net = slim.flatten(net)
net = slim.fully_connected(
net, 512, weights_initializer=weights_initializer)
net = slim.fully_connected(
net,
num_actions * num_atoms,
activation_fn=None,
weights_initializer=weights_initializer)
logits = tf.reshape(net, [-1, num_actions, num_atoms])
probabilities = tf.contrib.layers.softmax(logits)
q_values = tf.reduce_sum(support * probabilities, axis=2)
return network_type(q_values, logits, probabilities)
def implicit_quantile_network(num_actions, quantile_embedding_dim,
network_type, state, num_quantiles):
"""The Implicit Quantile ConvNet.
Args:
num_actions: int, number of actions.
quantile_embedding_dim: int, embedding dimension for the quantile input.
network_type: namedtuple, collection of expected values to return.
state: `tf.Tensor`, contains the agent's current state.
num_quantiles: int, number of quantile inputs.
Returns:
net: _network_type object containing the tensors output by the network.
"""
weights_initializer = slim.variance_scaling_initializer(
factor=1.0 / np.sqrt(3.0), mode='FAN_IN', uniform=True)
state_net = tf.cast(state, tf.float32)
state_net = tf.div(state_net, 255.)
state_net = slim.conv2d(
state_net, 32, [8, 8], stride=4,
weights_initializer=weights_initializer)
state_net = slim.conv2d(
state_net, 64, [4, 4], stride=2,
weights_initializer=weights_initializer)
state_net = slim.conv2d(
state_net, 64, [3, 3], stride=1,
weights_initializer=weights_initializer)
state_net = slim.flatten(state_net)
state_net_size = state_net.get_shape().as_list()[-1]
state_net_tiled = tf.tile(state_net, [num_quantiles, 1])
batch_size = state_net.get_shape().as_list()[0]
quantiles_shape = [num_quantiles * batch_size, 1]
quantiles = tf.random_uniform(
quantiles_shape, minval=0, maxval=1, dtype=tf.float32)
quantile_net = tf.tile(quantiles, [1, quantile_embedding_dim])
pi = tf.constant(math.pi)
quantile_net = tf.cast(tf.range(
1, quantile_embedding_dim + 1, 1), tf.float32) * pi * quantile_net
quantile_net = tf.cos(quantile_net)
quantile_net = slim.fully_connected(
quantile_net,
state_net_size,
weights_initializer=weights_initializer)
# Hadamard product.
net = tf.multiply(state_net_tiled, quantile_net)
net = slim.fully_connected(
net, 512, weights_initializer=weights_initializer)
quantile_values = slim.fully_connected(
net,
num_actions,
activation_fn=None,
weights_initializer=weights_initializer)
return network_type(quantile_values=quantile_values, quantiles=quantiles)
@gin.configurable
class OTCPreprocessing(object):
"""A class implementing image preprocessing for OTC agents.
Specifically, this converts observations to greyscale. It doesn't
do anything else to the environment.
"""
def __init__(self, environment):
"""Constructor for an Obstacle Tower preprocessor.
Args:
environment: Gym environment whose observations are preprocessed.
"""
self.environment = environment
self.game_over = False
self.lives = 0 # Will need to be set by reset().
self.stage_reward = 0.0
self.previous_stage_time_remaining = 3000
self.previous_reward = 0
self.previous_keys = 0
self.previous_time_remaining = 3000
self.tableAction = self.createActionTable()
def createActionTable(self):
tableAction = []
for a in range(0, 3):
for b in range(0, 3):
for c in range(0, 2):
tableAction.append([a, b, c, 0])
# print("Action option: ", tableAction[6:12])
return tableAction
@property
def observation_space(self):
return self.environment.observation_space
@property
def action_space(self):
return self.environment.action_space
@property
def reward_range(self):
return self.environment.reward_range
@property
def metadata(self):
return self.environment.metadata
def reset(self):
"""Resets the environment. Converts the observation to greyscale,
if it is not.
Returns:
observation: numpy array, the initial observation emitted by the
environment.
"""
observation = self.environment.reset()
observation = observation[0]
self.stage_reward = 0.0
self.previous_stage_time_remaining = 3000
self.previous_reward = 0
self.previous_keys = 0
self.previous_time_remaining = 3000
self.previous_stage_time_remaining = 3000
if(len(observation.shape) > 2):
observation = cv2.cvtColor(cv2.convertScaleAbs(observation, alpha=(255.0 / 1.0)), cv2.COLOR_RGB2GRAY)
observation = cv2.resize(observation, (84, 84))
return observation
def render(self, mode):
"""Renders the current screen, before preprocessing.
This calls the Gym API's render() method.
Args:
mode: Mode argument for the environment's render() method.
Valid values (str) are:
'rgb_array': returns the raw ALE image.
'human': renders to display via the Gym renderer.
Returns:
if mode='rgb_array': numpy array, the most recent screen.
if mode='human': bool, whether the rendering was successful.
"""
return self.environment.render(mode)
def step(self, action):
"""Applies the given action in the environment. Converts the observation to
greyscale, if it is not.
Remarks:
* If a terminal state (from life loss or episode end) is reached, this may
execute fewer than self.frame_skip steps in the environment.
* Furthermore, in this case the returned observation may not contain valid
image data and should be ignored.
Args:
action: The action to be executed.
Returns:
observation: numpy array, the observation following the action.
reward: float, the reward following the action.
is_terminal: bool, whether the environment has reached a terminal state.
This is true when a life is lost and terminal_on_life_loss, or when the
episode is over.
info: Gym API's info data structure.
"""
observation, reward, game_over, info = self.environment.step(np.array(self.tableAction[int(action)-1]))
observation, keys, time_remaining = observation
self.stage_reward, previous_stage_time_remaining = self.reward_compute(done=game_over,
reward_total=self.stage_reward,
keys=keys,
previous_keys=self.previous_keys,
reward=reward,
previous_reward=self.previous_reward,
time_remaining=time_remaining,
previous_time_remaining=self.previous_time_remaining,
previous_stage_time_remaining=self.previous_stage_time_remaining)
self.previous_reward = reward
self.previous_keys = keys
self.previous_time_remaining = time_remaining
self.game_over = game_over
if(len(observation.shape) > 2):
observation = cv2.cvtColor(cv2.convertScaleAbs(observation, alpha=(255.0 / 1.0)), cv2.COLOR_RGB2GRAY)
observation = cv2.resize(observation, (84, 84))
return observation, self.stage_reward, game_over, info
def reward_compute(
self,
done,
reward_total,
keys,
previous_keys,
reward,
previous_reward,
time_remaining,
previous_time_remaining,
previous_stage_time_remaining):
# 定義獎勵公式
# reward 是從環境傳來的破關數
# keys 是撿到鑰匙的數量
# time_remaining 是剩餘時間
# 過關最大獎勵為10
# 一把鑰匙為5
# 時間果實暫時只給0.5,因為結束會結算剩餘時間,會有獎勵累加的問題。
# 如果過關,給予十倍過關獎勵 - (場景開始的時間-剩餘時間)/1000
# print("time_remaining ", time_remaining,
# " previous_time_remaining ", previous_time_remaining,
# " reward ", reward)
if reward < 0.2:
reward = 0
if (reward - previous_reward) > 0.8:
# ***如果剩餘時間比場景時間多會變成加分獎勵,可能會極大增加Agent吃時間果實的機率。
# ***另一種方式是剩餘的時間直接/1000加上去,這樣就沒有累加效果。
print("Pass ", reward, " Stage!")
reward_total += (reward - previous_reward) * 100 - \
(previous_stage_time_remaining - time_remaining)
# 過關之後把時間留到下一關,儲存這回合時間供下次計算過關使用
previous_time_remaining = time_remaining
previous_stage_time_remaining = time_remaining
# 假設過關的時候有順便吃到果實或鑰匙,所以預設為同時可以加成
if previous_keys > keys:
print("Get Key")
reward_total += 5
if previous_time_remaining < time_remaining and previous_time_remaining != 0:
print("Get time power up")
reward_total += 0.5
else:
reward_total -= 0.1
if done and previous_time_remaining > 100:
print("Agent died")
# 如果剩餘時間越多就掛點,扣更多
reward_total -= (10 + time_remaining / 100)
return reward_total, previous_stage_time_remaining
| [
"fgriasa123@gmail.com"
] | fgriasa123@gmail.com |
577a5e847afa46777a5a089b90511294bade82fa | 64261553066b91dd774f3e051658f83fd41dc415 | /MediumLevel/LongestSubstringWithoutRepeatingCharacters.py | f38736bb7f38f3fb696a229a234ae0b6ab97781e | [] | no_license | sahilshah1610/LeetCode | 1b5ec827cce3c4d66eda24fd16a1d7266ff8fc47 | 28e65a2caba7cf0c757195f76bcaf17e8dd28419 | refs/heads/master | 2023-03-02T15:36:15.325209 | 2021-02-12T01:48:15 | 2021-02-12T01:48:15 | 314,371,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
if len(s)==0:
return 0
mapping = {}
maxLenght=start=0
for i in range(len(s)):
if s[i] in mapping and start <= mapping[s[i]]:
start= mapping[s[i]] +1
else:
maxLenght = max(maxLenght, i-start+1)
mapping[s[i]] = i
#print(start, maxLenght, mapping)
return maxLenght
if __name__ == "__main__":
s = "abcabcbb"
objSol = Solution()
print(objSol.lengthOfLongestSubstring(s)) | [
"sahil.shah56@gmail.com"
] | sahil.shah56@gmail.com |
b81df29cd5bc086a10b9d34a9da102aebefd5724 | 47766dd4a7fe325ff53db9754d9962d54c3bce20 | /week3/review/3_iterator_filter.py | 80ea40d7c9f41ed54fa37c8d4e41776379da7cfc | [
"Apache-2.0"
] | permissive | skku-overflow/python-2020-2 | 1ed327a075b4ddd6b1feecb5cabc75e8913de726 | def09d9a8ff32ee085edaa5eca89ccc03c29af2a | refs/heads/main | 2023-03-06T16:13:40.439521 | 2021-02-21T07:23:07 | 2021-02-21T07:23:07 | 316,898,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py |
def is_even(n):
return n % 2 == 0
l = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print(l)
evens = list(filter(is_even, l))
print(evens)
def is_first_even(arg):
a, b = arg
return a % 2 == 0
l2 = [(1, 2), (2, 3)]
print(l2)
print(list(filter(is_first_even, l2)))
| [
"kdy1997.dev@gmail.com"
] | kdy1997.dev@gmail.com |
2905b09b116aa8620144bb52dcea36c4afdbe2c0 | f3065d37482cffadb5fe8caa397a8cc946710485 | /django_structure/src/profiles/migrations/0012_auto_20180321_1745.py | 279bfa92783124289a640b745c930b92b7f4703f | [] | no_license | AlfredMulder/Django_work | 6d006f8b1e0c679ed43377fb891d215e870114a9 | 5d347d6897f58f6977ab584a80edb111ed2dabaf | refs/heads/master | 2020-04-10T21:56:01.475953 | 2019-06-25T15:13:19 | 2019-06-25T15:13:19 | 124,296,776 | 0 | 0 | null | 2018-03-07T21:34:41 | 2018-03-07T21:29:05 | null | UTF-8 | Python | false | false | 440 | py | # Generated by Django 2.0.3 on 2018-03-21 17:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0011_auto_20180321_1744'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(default='path/to/my/default/image.jpg', upload_to='profile_image'),
),
]
| [
"xanderhref@protonmail.com"
] | xanderhref@protonmail.com |
0b82a15f2668611f51876dc75eb02c6ccb69ac64 | d320dc1177bd40be1ace9e5b76f25b93a22a6e25 | /appium_test/app_base.py | f12af230cec4031049b0a89eb5d3a39812e07a82 | [] | no_license | qq1403332591/Hogwarts15 | 26c6424246ee3677920a5f44967d1ba8c611bc85 | d6cd852fff89b8f901e449276eb95d64ec3e14f3 | refs/heads/master | 2023-01-09T13:46:01.067470 | 2020-11-11T13:15:51 | 2020-11-11T13:28:15 | 304,650,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,394 | py | from appium import webdriver
class Base():
def setup(self):
desired_caps = {}
desired_caps['platformName'] = 'Android' # 打开什么平台的app,固定的 > 启动安卓平台
desired_caps['platformVersion'] = '10' # 安卓系统的版本号:adb shell getprop ro.build.version.release
desired_caps['deviceName'] = 'V1938T' # 手机/模拟器的型号:adb shell getprop ro.product.model
desired_caps['appPackage'] = 'com.tencent.wework' # app的名字:adb shell dumpsys package XXX
desired_caps[
'appActivity'] = '.launch.LaunchSplashActivity' # 同上↑ # .pages.splash.SplashActivity pages.main.MainActivity
desired_caps['automationName'] = 'uiautomator2'
desired_caps['unicodeKeyboard'] = True # 为了支持中文
desired_caps['resetKeyboard'] = True # 设置成appium自带的键盘
desired_caps['noReset'] = True # 使用app的缓存
desired_caps['skipDeviceInitialization'] = True # 跳过设备初始化
# desired_caps['autoLaunch'] = False # 直接使用打开的app进行测试
# desired_caps['settings[settingsKey]'] = 0 # 动态元素查找的最大等待时间
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
self.driver.implicitly_wait(5)
def teardown(self):
self.driver.quit()
| [
"1403332591@qq.com"
] | 1403332591@qq.com |
7c658b02af1216d35936435030ac30caedbcf48f | e79888cd68177e7ec5125270cdc52f888e211e78 | /hirao/chapter01/knock04.py | de4c0c4219da8267d76dd51e2e4cbcf9b31ea0fd | [] | no_license | cafenoctua/100knock2019 | ec259bee27936bdacfe0097d42f23cc7500f0a07 | 88717a78c4290101a021fbe8b4f054f76c9d3fa6 | refs/heads/master | 2022-06-22T04:42:03.939373 | 2019-09-03T11:05:19 | 2019-09-03T11:05:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | s = "Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can."
drop = ",."
print(s)
for c in list(drop):
s = s.replace(c, "")
s = s.split()
display_list = [1, 5, 6, 7, 8, 9, 15, 16, 19]
ans_dict = {}
for i, word in enumerate(s):
if i + 1 in display_list:
ans = word[0]
else:
ans = word[:2]
ans_dict[ans] = i + 1
print(ans_dict)
| [
"reohirao116@gmail.com"
] | reohirao116@gmail.com |
85c63654cacf60d20305f0e502209f0294028474 | 0734f6b90379ba0850426db18a9470a161151805 | /setup.py | 46572860be6de6e827439be07ebb60a686330c3e | [
"MIT"
] | permissive | alexhayes/schematics-xml | af69c6ed4437603bc478d0fc66a4f79d0a204d16 | 36ac15a5e2891d238bebe0b3bb3ef8d5e8425ed9 | refs/heads/develop | 2022-08-01T00:03:14.428519 | 2016-11-11T04:54:32 | 2016-11-11T04:54:32 | 72,046,221 | 2 | 3 | MIT | 2022-07-06T19:48:26 | 2016-10-26T21:32:10 | Python | UTF-8 | Python | false | false | 3,726 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup, find_packages
from setuptools.command.test import test
is_setuptools = True
except ImportError:
raise
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages # noqa
from setuptools.command.test import test # noqa
is_setuptools = False
import os
import sys
import codecs
NAME = 'schematics-xml'
entrypoints = {}
extra = {}
# -*- Classifiers -*-
classes = """
Development Status :: 4 - Beta
License :: OSI Approved :: MIT License
Topic :: Other/Nonlisted Topic
Topic :: Software Development :: Libraries :: Python Modules
Intended Audience :: Developers
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
Operating System :: OS Independent
Operating System :: POSIX
Operating System :: Microsoft :: Windows
Operating System :: MacOS :: MacOS X
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
# -*- Distribution Meta -*-
import re
re_meta = re.compile(r'__(\w+?)__\s*=\s*(.*)')
re_vers = re.compile(r'VERSION\s*=.*?\((.*?)\)')
re_doc = re.compile(r'^"""(.+?)"""')
rq = lambda s: s.strip("\"'")
def add_default(m):
attr_name, attr_value = m.groups()
return ((attr_name, rq(attr_value)), )
def add_version(m):
v = list(map(rq, m.groups()[0].split(', ')))
return (('VERSION', '.'.join(v[0:3]) + ''.join(v[3:])), )
def add_doc(m):
return (('doc', m.groups()[0]), )
pats = {re_meta: add_default,
re_vers: add_version,
re_doc: add_doc}
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'schematics_xml/__init__.py')) as meta_fh:
meta = {}
for line in meta_fh:
if line.strip() == '# -eof meta-':
break
for pattern, handler in pats.items():
m = pattern.match(line.strip())
if m:
meta.update(handler(m))
# -*- Installation Requires -*-
py_version = sys.version_info
def strip_comments(l):
return l.split('#', 1)[0].strip()
def _pip_requirement(req):
if req.startswith('-r '):
_, path = req.split()
return reqs(*path.split('/'))
return [req]
def _reqs(*f):
return [
_pip_requirement(r) for r in (
strip_comments(l) for l in open(
os.path.join(here, 'requirements', *f)).readlines()
) if r]
def reqs(*f):
return [req for subreq in _reqs(*f) for req in subreq]
install_requires = reqs('default.txt')
# -*- Tests Requires -*-
tests_require = reqs('test.txt')
# -*- Long Description -*-
if os.path.exists('README.rst'):
long_description = codecs.open('README.rst', 'r', 'utf-8').read()
else:
long_description = 'See http://pypi.python.org/pypi/schematics-xml'
setup(
name=NAME,
version=meta['VERSION'],
description=meta['doc'],
author=meta['author'],
author_email=meta['contact'],
url=meta['homepage'],
platforms=['any'],
license='MIT',
packages=find_packages(),
package_data={'schematics_xml': ['tests/templates/*.html']},
zip_safe=False,
install_requires=install_requires,
tests_require=tests_require,
test_suite='nose.collector',
classifiers=classifiers,
entry_points=entrypoints,
long_description=long_description,
keywords=['schematics', 'xml', 'model', 'modelling', 'dicttoxml', 'xmltodict'],
)
| [
"alex@commoncode.io"
] | alex@commoncode.io |
3c22bf817ee148fbc70da528dfb8cff5991cedb0 | f12fac0dd5c9c9eeedff16377d1f57a3cd02ef32 | /Python游戏编程入门/02.初识Pygame:Pie游戏/绘制弧形.py | 8031255f9f3580e0e721331544bdda1f67ae9357 | [] | no_license | SesameMing/PythonPygame | 61fe09a38d1729963b86f348b349572760676195 | ca0554427cd30838d56630e8b1e04aa0e26834a1 | refs/heads/master | 2020-12-07T21:23:56.271193 | 2016-11-25T06:38:06 | 2016-11-25T06:38:06 | 66,639,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | #!/usr/bin/env python3
# -*-coding:utf-8-*-
# Author:SesameMing <blog.v-api.cn>
# Email:admin@v-api.cn
# Time:2016-11-25 12:51
import sys
import math
import pygame
from pygame.locals import *
pygame.init()
screen = pygame.display.set_mode((600, 500))
pygame.display.set_caption("Drawing Arcs")
while True:
for event in pygame.event.get():
if event.type in (QUIT, KEYDOWN):
sys.exit()
screen.fill((0, 0, 200))
color = 255, 0, 255
position = 200, 150, 200, 200
start_angle = math.radians(0)
end_angle = math.radians(180)
width = 8
pygame.draw.arc(screen, color, position, start_angle, end_angle, width)
pygame.display.update() | [
"admin@v-api.cn"
] | admin@v-api.cn |
9a0274111f19a917ed032d7aca9be73c9e930a24 | 02a9e1be198a82793a9e9aea1f0f46cde0e8c85e | /prac_01/broken_score.py | feaede5759932a9410dbe13cbc382d805aaeb5cf | [] | no_license | DaniRyland-Lawson/CP1404-cp1404practicals- | 04cfce69f55b26cf7cf30d2ea2a1198d9514c527 | 8b8776ca5a8dee8f8c21697c79dbbd2cd95320f7 | refs/heads/master | 2020-07-06T22:50:21.130873 | 2019-11-12T03:45:50 | 2019-11-12T03:45:50 | 203,162,819 | 0 | 0 | null | 2019-11-12T03:49:59 | 2019-08-19T11:52:56 | Python | UTF-8 | Python | false | false | 384 | py | """
CP1404/CP5632 - Practical
Broken program to determine score status
"""
# In this section, it took me a little while to realise it is best in order. Highest to lowest.
score = float(input("Enter score: "))
if score < 0 or score >100:
print("Invalid score")
elif score >= 90:
print("Excellent")
elif score >= 50:
print("Passable")
else:
print("Bad") | [
"noreply@github.com"
] | noreply@github.com |
04e3a1cfd126c0710557c5f5944b73240af4deec | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/vb25/plugins/TexSwirl.py | 9ca7e67f86475efdb3be99c3fa816a582b516141 | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,575 | py | #
# V-Ray/Blender
#
# http://vray.cgdo.ru
#
# Author: Andrey M. Izrantsev (aka bdancer)
# E-Mail: izrantsev@cgdo.ru
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# All Rights Reserved. V-Ray(R) is a registered trademark of Chaos Software.
#
# Blender module
import bpy
from bpy.props import *
# V-Ray/Blender modules
from vb25.utils import *
from vb25.ui import ui
from vb25.plugins import *
from vb25.texture import *
from vb25.uvwgen import *
TYPE = 'TEXTURE'
ID = 'TexSwirl'
PLUG = 'TexSwirl'
NAME = 'Swirl'
DESC = "TexSwirl"
PID = 15
PARAMS = (
'uvwgen',
'color1',
'color2',
'swirl_intensity',
'color_contrast',
'swirl_amount',
'constant_detail',
'center_x',
'center_y',
'random_seed',
'twist',
)
def add_properties(rna_pointer):
class TexSwirl(bpy.types.PropertyGroup):
pass
bpy.utils.register_class(TexSwirl)
rna_pointer.TexSwirl= PointerProperty(
name= "TexSwirl",
type= TexSwirl,
description= "V-Ray TexSwirl settings"
)
TexSwirl.color1= FloatVectorProperty(
name= "Color 1",
description= "First color",
subtype= 'COLOR',
min= 0.0,
max= 1.0,
soft_min= 0.0,
soft_max= 1.0,
default= (1,1,1)
)
# color2
TexSwirl.color2= FloatVectorProperty(
name= "Color 2",
description= "Second color",
subtype= 'COLOR',
min= 0.0,
max= 1.0,
soft_min= 0.0,
soft_max= 1.0,
default= (0,0,0)
)
# swirl_intensity
TexSwirl.swirl_intensity= FloatProperty(
name= "Swirl Intensity",
description= "Swirl Intensity",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 2
)
# color_contrast
TexSwirl.color_contrast= FloatProperty(
name= "Color Contrast",
description= "Color Contrast",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 0.4
)
# swirl_amount
TexSwirl.swirl_amount= FloatProperty(
name= "Swirl Amount",
description= "Swirl Amount",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 1
)
# constant_detail
TexSwirl.constant_detail= IntProperty(
name= "Constant Detail",
description= "Constant Detail",
min= 0,
max= 100,
soft_min= 0,
soft_max= 10,
default= 4
)
# center_x
TexSwirl.center_x= FloatProperty(
name= "Center X",
description= "Center Position X",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= -0.5
)
# center_y
TexSwirl.center_y= FloatProperty(
name= "Center Y",
description= "Center Position Y",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= -0.5
)
# random_seed
TexSwirl.random_seed= FloatProperty(
name= "Random Seed",
description= "Random Seed",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 0
)
# twist
TexSwirl.twist= FloatProperty(
name= "Twist",
description= "Twist",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 1
)
'''
OUTPUT
'''
def write(bus):
scene= bus['scene']
ofile= bus['files']['textures']
slot= bus['mtex']['slot']
texture= bus['mtex']['texture']
tex_name= bus['mtex']['name']
uvwgen= write_uvwgen(bus)
TexSwirl= getattr(texture.vray, PLUG)
ofile.write("\n%s %s {"%(PLUG, tex_name))
PLUGINS['TEXTURE']['TexCommon'].write(bus)
for param in PARAMS:
if param == 'uvwgen':
value= uvwgen
else:
value= getattr(TexSwirl, param)
ofile.write("\n\t%s= %s;"%(param, a(scene, value)))
ofile.write("\n}\n")
return tex_name
'''
GUI
'''
class VRAY_TP_TexSwirl(ui.VRayTexturePanel, bpy.types.Panel):
bl_label = NAME
COMPAT_ENGINES = {'VRAY_RENDER','VRAY_RENDER_PREVIEW'}
@classmethod
def poll(cls, context):
tex = context.texture
return tex and tex.type == 'VRAY' and tex.vray.type == ID and ui.engine_poll(cls, context)
def draw(self, context):
wide_ui = context.region.width > ui.narrowui
layout = self.layout
tex= context.texture
TexSwirl= getattr(tex.vray, PLUG)
split= layout.split()
col= split.column()
col.prop(TexSwirl, 'color1', text="")
if wide_ui:
col= split.column()
col.prop(TexSwirl, 'color2', text="")
split= layout.split()
col= split.column(align=True)
col.prop(TexSwirl, 'swirl_amount', text="Amount")
col.prop(TexSwirl, 'swirl_intensity', text="Intensity")
col.prop(TexSwirl, 'color_contrast', text="Color Contrast")
if not wide_ui:
split= layout.split()
col= split.column(align=True)
col.prop(TexSwirl, 'twist')
col.prop(TexSwirl, 'constant_detail')
split= layout.split()
row= split.row(align=True)
row.prop(TexSwirl, 'center_x')
row.prop(TexSwirl, 'center_y')
split= layout.split()
col= split.column()
col.prop(TexSwirl, 'random_seed', text="Seed")
def GetRegClasses():
return (
VRAY_TP_TexSwirl,
)
def register():
for regClass in GetRegClasses():
bpy.utils.register_class(regClass)
def unregister():
for regClass in GetRegClasses():
bpy.utils.unregister_class(regClass)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
9368e2c5913f38ac451218f6b28dc42e68db257c | c7178837cf98e1bc89027d8462c57727ea05de58 | /elections/urls.py | 4474d57f79fb8941a6cafe1abfa416547c0b3f7c | [] | no_license | sook1421/django_j | a04c223a49fb6f7cb9426eb929722eeb70857ba5 | 7d11792fcf723156d6e3667c5df7ef0201ce415f | refs/heads/master | 2021-09-06T23:04:42.955695 | 2018-02-13T05:26:09 | 2018-02-13T05:26:09 | 103,805,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | from django.conf.urls import url, include
from . import views
urlpatterns = [
url(r'^$', views.index),
]
| [
"sook1421@chol.com"
] | sook1421@chol.com |
c9e5718005ed29e21e424f7db8f4e8370bf44c9a | b23a9b67159c94d881513d3c19351d591329476f | /mc/OffPolicyMc.py | 2ef1b7efe5a29f7169ed8d14df05c038efa94954 | [] | no_license | zuofeng1997/rl | 1a0933296cd2319375fc10bf69fde74e0fe15be1 | 90d64bbc5335e255417b0d1a91b5555f17d4d8cb | refs/heads/master | 2020-03-22T11:28:41.107250 | 2018-08-21T07:15:40 | 2018-08-21T07:15:40 | 139,973,196 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,837 | py | from copy import deepcopy
import numpy as np
class FiniteMcModel:
def __init__(self,state_space,action_space,gamma=0.99,epsilon=0.3):
self.gamma = gamma
self.epsilon = epsilon
self.Q = None
if isinstance(action_space,int):
self.action_space = np.arange(action_space)
actions = [0]*action_space
self._act_rep = "list"
else:
self.action_space = action_space
actions = {k:0 for k in action_space}
self._act_rep = "dict"
if isinstance(state_space,int):
self.state_space = np.arange(state_space)
self.Q = [deepcopy(actions) for _ in range(state_space)]
else:
self.state_space = state_space
self.Q = {k:deepcopy(actions) for k in state_space}
self.count = deepcopy(self.Q)
self.C = deepcopy(self.Q)
def policy(self,action,state): #target policy
if self._act_rep == "list":
if action == np.argmax(self.Q[state]):
return 1
return 0
elif self._act_rep == "dict":
if action == max(self.Q[state], key=self.Q[state].get):
return 1
return 0
def behave(self, action, state): # behave policy
return self.epsilon / len(self.action_space) + (1 - self.epsilon) * self.policy(action, state)
def generate_returns(self,ep):
G = {} # return on state
cumC = 0 # cumulative reward
W = {}
w = 1
for tpl in reversed(ep):
observation, action, reward = tpl
G[(observation, action)] = cumC = reward + self.gamma * cumC
self.C[observation][action] += w
W[(observation,action)] = w
if self.policy(action,observation) == 0:
break
w = w*(1/self.behave(action,observation))
return G,W
def choose_action(self, policy, state):
probs = [policy(a, state) for a in self.action_space]
return np.random.choice(self.action_space, p=probs)
def update_Q(self,ep):
G,W = self.generate_returns(ep)
for s in G:
state,action = s
self.count[state][action] += 1
self.Q[state][action] += (W[(state,action)]/self.C[state][action])*(G[s]-self.Q[state][action])
def score(self,env,policy,n_samples=100):
rewards = []
for _ in range(n_samples):
observation = env.reset()
cum_rewards = 0
while True:
action = self.choose_action(policy, observation)
observation, reward, done, _ = env.step(action)
cum_rewards += reward
if done:
rewards.append(cum_rewards)
break
return np.mean(rewards)
| [
"1065504865@qq.com"
] | 1065504865@qq.com |
6388a3763a8737a12142d29fd6a3202fbd197144 | 31ff066c1e0b3d2e88326ca66584ef579484ba09 | /final/fractal/fractal/settings.py | ca82802c1d21423383704d39fceb886d1d6f8b8e | [] | no_license | Lightningbread76/YellowBrick | c82b757c4f2bf5fe6b6ce8c42efb231c1653575d | dd144da0e6676a0d6df669b8caeb7a9c5220c5e7 | refs/heads/main | 2023-04-15T15:30:48.179818 | 2021-04-23T16:39:07 | 2021-04-23T16:39:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,172 | py | """
Django settings for fractal project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'le%+hw+4r3v-(x@x_$j!l!28w#xar4fbm1$b5=b!r%+s+u3f$='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api.apps.ApiConfig',
'rest_framework',
'spotify.apps.SpotifyConfig']
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fractal.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fractal.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"quinnrelyea@gmail.com"
] | quinnrelyea@gmail.com |
299ff26f17f629c6eed7981129ec94c0daf43684 | f5f92ee9f6a4ae8c7a1c029ccab6386b338069d3 | /wk3/f8.py | 1e8bb4259716d19e5d3e8fe13d7fc39ecf21701f | [] | no_license | JustinAnthonyB/Python | 7300a6e25a314b202c098d08e49037557046fb4c | e647376cf43c6ccdceb38f5d7d34ccda7550ea27 | refs/heads/master | 2020-12-10T03:44:34.555223 | 2020-04-02T19:28:56 | 2020-04-02T19:28:56 | 233,493,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | """
Ask the user to enter a password
Determine if password is
Not acceptable
less than 5 chars
Weak
between 5 and 7
Medium
between 8 and 10 chars
Strong
11 chars or more
Using an if statement, output whether text
meets requirement
Input from user?
1: no, default = text
outputs(s):
message of whether text meets requirement
data structures / sanitation:
no. not really
"""
default = input("Enter a password. 8 char or more")
message = "Password is "
strength = "unacceptable"
password_length = len(default)
if password_length >= 5 and password_length < 8:
strength = "weak"
elif password_length > 7 and password_length < 11:
strength = "medium"
elif password_length > 10:
strength = "strong"
print(message + strength)
| [
"noreply@github.com"
] | noreply@github.com |
26030f347e76ad066fd9e10dc35bf688f5b153f8 | b8fd409ee054f68807e607b01f9c7e9bf9e3cbc8 | /newPollApp/asgi.py | 196033b636c49dbe18a7484aa38b2d7d265f3dc0 | [] | no_license | diptajit-dev-biswas/python-startapp | 1fa4e7b5ab9e75f0e139473e82eda1a31a98a90b | f6cd5000cf08a9dba0c5c6b903d70dd88e13449d | refs/heads/main | 2023-07-17T18:54:18.826199 | 2021-08-16T08:11:30 | 2021-08-16T08:11:30 | 396,665,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for newPollApp project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'newPollApp.settings')
application = get_asgi_application()
| [
"diptajit@appstangodev.com"
] | diptajit@appstangodev.com |
c04479133e596d0015f9df6569bf7d2c2283e6d1 | b23c6c02d9b54c987bca2e36c3506cf80fa28239 | /python databse connectivity progs/bind variable.py | a9bf8a8d9dcc71bd722251121197416765b6ba4e | [] | no_license | nishikaverma/Python_progs | 21190c88460a79f5ce20bb25d1b35f732fadd642 | 78f0cadde80b85356b4cb7ba518313094715aaa5 | refs/heads/master | 2022-06-12T14:54:03.442837 | 2020-05-08T10:28:58 | 2020-05-08T10:28:58 | 262,293,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | import cx_Oracle
try:
conn=cx_Oracle.connect("system/oracle123@localhost/orcl")
print("connection established")
cur=conn.cursor()
print("cursor created!")
print("***********************")
cur.execute("Select Book_name,Book_price from Books")
for x in cur:
print(x)
print("***********************")
name=input("enter book name : ")
price=int(input("enter book price"))
cur.execute("Insert into Books (Book_name,Book_price)values(:1,:2)",(name,price))
n=cur.rowcount
print(n,'rows inserted')
conn.commit()
cur.execute("Select Book_name,Book_price from Books")
for x in cur:
print(x)
print("************************")
except(cx_Oracle.DatabaseError)as e:
print("Error in connectin: ",e)
finally:
if conn is not None:
cur.close()
print("curser closed!")
conn.close()
print("connection closed!")
| [
"nishika.verma@live.com"
] | nishika.verma@live.com |
40705c7d2144ce3f9aa84b4ded8f1f39a82e78c7 | 60ebf96283c09886366b7b03cf13a53551a9e33a | /movie.py | 2f9a266ca19eba3f04aad4dd762a0a2c3e773059 | [] | no_license | angietibbs618/movies | 85abee6564c21f0d74821d87d8ee56d0d97d09b6 | 792e1ab824e1bff5abd36456b9ee83e1d98ff554 | refs/heads/master | 2021-01-19T11:35:19.897539 | 2017-04-26T14:19:00 | 2017-04-26T14:19:00 | 87,980,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | import webbrowser
class Movie():
""" This class provides a way to store movie related information"""
valid_ratings = ["G", "PG", "PG-13", "R"]
def __init__(self, movie_title, movie_storyline, poster_image, trailer_youtube):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
| [
"noreply@github.com"
] | noreply@github.com |
7e389d1992dc7ee70f3f25ef939fa71ea01b2db2 | 3b64e5707567bfeed3bf69fdedc7702a75677115 | /List-2/count_evens.py | f9c71645d93dc0fd6fe9ed479bac07775f67eb85 | [] | no_license | fearlessfreap24/codingbatsolutions | 1b730f4e76da6c6dca9670dcea3f3092ba690d44 | 7d3bcd6f347db41794572700d05e87db3f0ca478 | refs/heads/master | 2020-03-25T04:03:01.802798 | 2018-08-18T11:47:17 | 2018-08-18T11:47:17 | 143,374,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | def count_evens(nums):
sum = 0
for i in range(len(nums)):
if nums[i] % 2 == 0:
sum += 1
return sum
if __name__ == "__main__":
print(count_evens([2, 1, 2, 3, 4])) # 3
print(count_evens([2, 2, 0])) # 3
print(count_evens([1, 3, 5])) # 0
| [
"dylanaperez@yahoo.com"
] | dylanaperez@yahoo.com |
5eb0a435e2e4872f4a9c5255d5028bd2f73e4b3d | 89bbb2e46f844b2be046185a4ee54b6d8986a53b | /instagram/urls.py | e97967c6593fe974c8aae481aecf75dd5aa03231 | [
"Beerware"
] | permissive | SL0KY/instagram-like | aedb0ac222fac3350915199c0979e568ca0314e9 | 1641d44d03b0fb51a51cb96520fed279bd6c2e86 | refs/heads/master | 2020-09-21T14:02:01.761038 | 2020-02-13T09:08:58 | 2020-02-13T09:08:58 | 224,810,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,894 | py | """insta URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf import settings
from django.urls import include, path
from django.conf.urls.static import static
from rest_framework import routers
from photos.api import views as photo_api_views
from photos import views as photo_views
router = routers.DefaultRouter()
router.register(r'photos', photo_api_views.PhotoViewSet)
urlpatterns = [
path('api/v1', include(router.urls)),
path('^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path("", photo_views.index),
path('accounts/', include('django.contrib.auth.urls')),
path('admin/', admin.site.urls),
path('photos/', photo_views.index, name="list_photos"),
path("photos/create", photo_views.PhotoCreateView.as_view(), name='create_photo'),
path("photos/like", photo_views.PhotoLikeView.as_view(), name="like_photo"),
path("photos/<int:pk>", photo_views.PhotoDetailView.as_view(), name="detail_photo"),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls)),
# For django versions before 2.0:
# url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| [
"dominguesquentin@gmail.com"
] | dominguesquentin@gmail.com |
9336731a6cd88e7a8b99c2564c74415e27d8d968 | 1e697dc34e0e4ca3fc877209e21a99d15074e71c | /0x02-python-import_modules/1-calculation.py | f3f7b31bc85c69092d619f0b53f1055e8bd128f4 | [] | no_license | ZoltanMG/holbertonschool-higher_level_programming | 97e51030e2203efd0b9bc0539c9916e58e7b50fb | c50a7263531e115cdbd285bb67de34fed32a41a1 | refs/heads/master | 2023-03-03T03:17:28.447086 | 2021-02-13T02:40:33 | 2021-02-13T02:40:33 | 259,444,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | #!/usr/bin/python3
if __name__ == '__main__':
from calculator_1 import add, sub, mul, div
a = 10
b = 5
print('{} + {} = {}'.format(a, b, add(a, b)))
print('{} - {} = {}'.format(a, b, sub(a, b)))
print('{} * {} = {}'.format(a, b, mul(a, b)))
print('{} / {} = {}'.format(a, b, div(a, b)))
| [
"zoltanmoragarcia@gmail.com"
] | zoltanmoragarcia@gmail.com |
f267f2ee5db997cb0bd07c85bbca1589021cfe18 | 407d602bc989d0a7c49a441e75332c7f4d4db01d | /id3.py | 1c4d73e15e0caba9027a78fe65f17c9d08d7f7d1 | [] | no_license | thkm0620/goodWordAttack | 902e25080f844c6461bb5e4bcf803c1dd1e2b3eb | 9785290abae4cf60ab3652d7b00cb0471347b11e | refs/heads/master | 2023-02-01T05:47:49.381339 | 2020-12-19T05:59:06 | 2020-12-19T05:59:06 | 322,638,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,655 | py | # coding=utf-8
import openpyxl
import numpy as np
from cleanText import cleanString
from sklearn import tree
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
class id3Classifier:
check=0
model = tree.DecisionTreeClassifier(criterion="entropy")
vectorizer = TfidfVectorizer(stop_words='english', max_df=75)
# Get the original dataset
def store():
workBookOld = openpyxl.load_workbook('datasets/trainData.xlsx')
dataSheetOld = workBookOld['trainData']
xData = []
yData = []
rows = dataSheetOld.max_row
for i in range(2, rows+1):
if (str(dataSheetOld.cell(row = i, column = 2).value) != 'None'):
xData.append(str(cleanString(str(dataSheetOld.cell(row = i, column = 1).value))))
if (str(dataSheetOld.cell(row = i, column = 2).value) == "1"):
yData.append(1)
else:
yData.append(0)
# NOTE: to train data on the entire dataset, simply return xData and yData
# Splitting the data like this is to obtain test cases and calculate the F-score of the learning algorithm
xTrain, xTest, yTrain, yTest = train_test_split(xData, yData, test_size=0.2, random_state=0)
return xTrain, xTest, yTrain, yTest
# Calculating the F-score
def calcFScore(xTest, yTest, model, vectorizer):
xTestMatrix = vectorizer.transform(xTest)
yTestMatrix = np.asarray(yTest)
result = model.predict(xTestMatrix)
matrix = confusion_matrix(yTestMatrix, result)
fScore = f1_score(yTestMatrix, result, pos_label = 0)
precision = precision_score(yTestMatrix, result, pos_label=0)
recall = recall_score(yTestMatrix, result, pos_label=0)
return fScore, precision, recall, matrix
def predict(msg):
if id3Classifier.check==0:
# Create training data
xTrain, xTest, yTrain, yTest = id3Classifier.store()
id3Classifier.vectorizer = TfidfVectorizer(stop_words='english', max_df=75)
yTrainMatrix = np.asarray(yTrain)
xTrainMatrix = id3Classifier.vectorizer.fit_transform(xTrain)
# Training ID3 classifier
id3Classifier.model.fit(xTrainMatrix, yTrainMatrix)
fScore, precision, recall, matrix = id3Classifier.calcFScore(xTest, yTest, id3Classifier.model, id3Classifier.vectorizer)
print("fScore, precision, recall :")
print(fScore, precision, recall)
id3Classifier.check=1
return id3Classifier.predict2(msg,id3Classifier.vectorizer,id3Classifier.model)
# Test new data for Spam
def predict2(emailBody,vectorizer,model):
featureMatrix = vectorizer.transform([cleanString(emailBody)])
result = model.predict(featureMatrix)
if (1 in result):
#return "Spam"
return True
else:
#return "Not Spam"
return False
'''
print(id3Classifier.predict("FreeMsg: Claim ur 250 SMS messages-Text OK to 84025 now!Use web2mobile 2 ur mates etc. Join Txt250.com for 1.50p/wk. T&C BOX139, LA32WU. 16 . Remove txtX or stop"))
print(id3Classifier.predict("FREE for 1st week! No1 Nokia tone 4 ur mob every week just txt NOKIA to 87077 Get txting and tell ur mates. zed POBox 36504 W45WQ norm150p/tone 16+"))
print(id3Classifier.predict("I have a tad issue here about the thorough refining column"))
'''
| [
"thkm0620@naver.com"
] | thkm0620@naver.com |
9fc5c37346192644bc12a0b5a2a6817f98a26c5e | f384d0bd72edf56edc06b2bba84fc8ca8e73e6e5 | /calculos/salinidad.py | 61c1537b557a0c5704df400ab75a1f2a59373a73 | [] | no_license | jvaldesa/lia | ec44ed98f5efcd2cc1689040684a210dc6dadea1 | 7d362fb351142463f5e35bb0fbe8773efee5158c | refs/heads/master | 2020-04-04T20:33:56.740231 | 2019-05-07T14:53:01 | 2019-05-07T14:53:01 | 156,251,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,166 | py | from decimal import Decimal
def cationes(titulacionCaMg, normalidadEDTA, titulacionCa, alicuota, Na, K):
titulacionCaMg = Decimal(titulacionCaMg)
normalidadEDTA = Decimal(normalidadEDTA)
titulacionCa = Decimal(titulacionCa)
alicuota = Decimal(alicuota)
Na = Decimal(Na)
K = Decimal(K)
"""
Ca + Mg (meq/L) = (ml titulación Ca+Mg * Normalidad EDTA * 1000)/ml Alicuota
Ca = ml titulación Ca
Mg (meq/L) = (Ca + Mg (meq/L)) - Ca
Na (meq/L) = Na(ppm)/23 => peso equivalente de Na = 23
k (meq/L) = Na(ppm)/39.1 => peso equivalente de Na = 39.1
"""
CaMg = (titulacionCaMg * normalidadEDTA * Decimal(1000)) / alicuota
Ca = titulacionCa
Mg = CaMg -Ca
Nameq = Na / Decimal(23)
Kmeq = K / Decimal(39.1)
Ca = round(Ca, 2)
Mg = round(Mg, 2)
Nameq = round(Nameq, 2)
Kmeq = round(Kmeq, 2)
return {'Ca': Ca, 'Mg':Mg, 'Na':Nameq, 'K':Kmeq}
def aniones(titulacionCar, titulacionBlancoCar, normalidadH2SO4, alicuotaCar, titulacionBic, titulacionBlancoBic, alicuotaBic, titulacionClo, titulacionBlancoClo, normalidadAgNO3, alicuotaClo, conductividadEl, unidad):
titulacionCar = Decimal(titulacionCar)
titulacionBlancoCar = Decimal(titulacionBlancoCar)
normalidadH2SO4 = Decimal(normalidadH2SO4)
alicuotaCar = Decimal(alicuotaCar)
titulacionBic = Decimal(titulacionBic)
titulacionBlancoBic = Decimal(titulacionBlancoBic)
alicuotaBic = Decimal(alicuotaBic)
titulacionClo = Decimal(titulacionClo)
titulacionBlancoClo = Decimal(titulacionBlancoClo)
normalidadAgNO3 = Decimal(normalidadAgNO3)
alicuotaClo = Decimal(alicuotaClo)
conductividadEl = Decimal(conductividadEl)
if unidad == 'µS/cm':
Ce = conductividadEl / 1000
elif unidad == 'mS/cm':
Ce = conductividadEl
"""
x = Volumen gastado en titulación carbonatos - volumen gastado en titulación blanco carbonatos
Carbonatos (meq/L) = (2 * x * Normalidad del H2SO4 * 1000) / mililitros de Alicuota Carbonatos
y = Volumen gastado en titulación bicarbonatos - volumen gastado en titulación blanco bicarbonatos
Bicarbonatos = (y - (2 * x) * Normalidad del H2SO4 * 1000) / mililitros de Alicuota Bicarbonatos
z = volumen gastado en titulación cloruros - volumen gastado en titulación blanco cloruros
Cloruros = (z * Normalidad AgNO3 * 1000) / mililitros de Alicuota Cloruros
Sulfatos = Conductividad electrica (mS/cm) * 10 -(Carbonatos + Bicarbonatos + Cloruros)
"""
x = titulacionCar - titulacionBlancoCar
y = titulacionBic - titulacionBlancoBic
z = titulacionClo - titulacionBlancoClo
Carbonatos = (2 * x * normalidadH2SO4 * 1000) / alicuotaCar
Bicarbonatos = ((y - (2 * x)) * normalidadH2SO4 * 1000) / alicuotaBic
Cloruros = (z * normalidadAgNO3 * 1000) / alicuotaClo
Sulfatos = Ce * 10 -(Carbonatos + Bicarbonatos + Cloruros)
Carbonatos = round(Carbonatos, 2)
Bicarbonatos = round(Bicarbonatos, 2)
Cloruros = round(Cloruros, 2)
Sulfatos = round(Sulfatos, 2)
return {'Carbonatos':Carbonatos, 'Bicarbonatos': Bicarbonatos, 'Cloruros': Cloruros, 'Sulfatos':Sulfatos}
| [
"jvaldes_a@hotmail.com"
] | jvaldes_a@hotmail.com |
ced5ac21aec05c62ab3a5d490799f6547ae76833 | 40686808bb915db93c03069bfe4ab9ec73663c19 | /classification_testing.py | dac47c7d32750fcf6ad52f3914b4abf012619c31 | [] | no_license | AzogDefiler/classification_wav | 340f2be9352c284fc23d6f3df3d38a15d30d97a6 | 3a36d4533f13afda181a8f6a442b879cb7a8fa72 | refs/heads/master | 2020-03-22T03:25:43.989167 | 2018-07-02T19:21:09 | 2018-07-02T19:21:09 | 139,431,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,696 | py |
# coding: utf-8
# In[1]:
# Aminov Rezo
import numpy as np
import wave
import pandas as pd
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
from glob import glob
import random
import struct
from keras.models import *
from keras.layers import *
from keras.callbacks import *
import librosa
import soundfile as sf
from keras.models import load_model
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Dense, Dropout
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalAveragePooling1D, MaxPooling1D
from keras.optimizers import SGD
from sklearn.model_selection import train_test_split
# In[2]:
DATA_DIR = 'data_v_7_stc'
meta_file = "{}/meta/meta.txt".format(DATA_DIR)
df = pd.read_csv(meta_file, sep='\t',header=None)
labels_name = df[4].unique()
# In[3]:
# кодирование лейблов
onehot_dict = {}
for ii, lab in enumerate(labels_name):
y_ = np.zeros(len(labels_name))
y_[ii] = 1
onehot_dict.update({lab:ii})
# обратный хэш
hot_to_one = {}
for k,v in onehot_dict.items():
hot_to_one.update({v:k})
# In[4]:
# экстрактор фич: Мел-кепстральные коэффициенты (MFCC). https://habr.com/post/140828/
def extract_feature(file_name):
X, sample_rate = sf.read(file_name, dtype='float32')
if X.ndim > 1:
X = X[:,0]
X = X.T
# преобразование Фурье
stft = np.abs(librosa.stft(X))
# MFCC
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=128).T,axis=0)
# chroma
# chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)
# мэл спектр
mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)
# спектр-ный контраст
# contrast = np.mean(librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T,axis=0)
# tonnetz = np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X), sr=sample_rate).T,axis=0)
return mfccs,mel
# In[5]:
files_test = glob(DATA_DIR+'/test/*.wav')
# In[6]:
model = load_model('weights/model.hdf5')
model.load_weights('weights/model_weights.hdf5')
# In[7]:
CNT=0 # кол-во всех не 'unknown', подмножество 'A'
GOOD=0 # кол-во правильно опред-ых файлов в подмножестве 'A'
BAD=0 # кол-во не правильно опред-ых файлов в подмножестве 'A'
filew = open("result.txt","a")
features_test = np.empty((0,256))
for file in files_test:
try:
mfccs,mel = extract_feature(file)
except Exception as e:
print("[Error] extract feature error. %s" % (e))
continue
ext_features_test = np.hstack([mfccs,mel])
# features_test = np.vstack([features_test,ext_features_test])
pred = model.predict(np.expand_dims([ext_features_test],axis=2))
score = pred.max()
class_ = hot_to_one[np.argmax(pred)]
filename = file.split('/')[2]
filew.write(filename+'\t'+str(score)+'\t'+class_+'\n')
print(filename+' '+str(score)+' '+class_)
# если файл не 'unknown', делаю подсчет совпадений лейбла и наз. файла
# примерный подсчет, т.к. неизвестно к какому классу относятся файлы
# с наз. 'unknown'
if 'unknown' not in filename:
CNT+=1
if class_ in filename:
GOOD+=1
else:
BAD+=1
filew.close()
# In[8]:
CNT, GOOD, BAD
# In[9]:
GOOD/CNT
# In[ ]:
| [
"aminov@skytracking.ru"
] | aminov@skytracking.ru |
3e50eb432278799573f1eb0faf52a1893f21fc48 | 3fe6529e3733a0d703e3ce12790e90195037ada0 | /interview/interview/urls.py | 5859e55f97526d0abfd0db7dc90aa30c0124b0e5 | [] | no_license | vkit/zypol | cc536b66b45333211f7be3ee131a4325665fd2d6 | db7306c32f473b6852b3561f97ca9c8c50be2709 | refs/heads/master | 2021-07-09T08:11:49.920719 | 2017-10-06T11:40:37 | 2017-10-06T11:40:37 | 105,997,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | """interview URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^student/',include('student.urls')),
] | [
"noreply@github.com"
] | noreply@github.com |
b48a2e29d81c5d7ddbf5cc76cd714fe6c1483872 | 9e27f91194541eb36da07420efa53c5c417e8999 | /twilio/twiml/messaging_response.py | abb58ff2c6d33ad1d66998d8f9520dd3786f329a | [] | no_license | iosmichael/flask-admin-dashboard | 0eeab96add99430828306b691e012ac9beb957ea | 396d687fd9144d3b0ac04d8047ecf726f7c18fbd | refs/heads/master | 2020-03-24T05:55:42.200377 | 2018-09-17T20:33:42 | 2018-09-17T20:33:42 | 142,508,888 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,875 | py | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
import json
from admin.twilio.twiml import (
TwiML,
format_language,
)
class MessagingResponse(TwiML):
""" <Response> TwiML for Messages """
def __init__(self, **kwargs):
super(MessagingResponse, self).__init__(**kwargs)
self.name = 'Response'
def message(self, body=None, to=None, from_=None, action=None, method=None,
status_callback=None, **kwargs):
"""
Create a <Message> element
:param body: Message Body
:param to: Phone Number to send Message to
:param from: Phone Number to send Message from
:param action: Action URL
:param method: Action URL Method
:param status_callback: Status callback URL. Deprecated in favor of action.
:param kwargs: additional attributes
:returns: <Message> element
"""
return self.nest(Message(
body=body,
to=to,
from_=from_,
action=action,
method=method,
status_callback=status_callback,
**kwargs
))
def redirect(self, url, method=None, **kwargs):
"""
Create a <Redirect> element
:param url: Redirect URL
:param method: Redirect URL method
:param kwargs: additional attributes
:returns: <Redirect> element
"""
return self.nest(Redirect(url, method=method, **kwargs))
class Redirect(TwiML):
""" <Redirect> TwiML Verb """
def __init__(self, url, **kwargs):
super(Redirect, self).__init__(**kwargs)
self.name = 'Redirect'
self.value = url
class Message(TwiML):
""" <Message> TwiML Verb """
def __init__(self, body=None, **kwargs):
super(Message, self).__init__(**kwargs)
self.name = 'Message'
if body:
self.value = body
def body(self, message, **kwargs):
"""
Create a <Body> element
:param message: Message Body
:param kwargs: additional attributes
:returns: <Body> element
"""
return self.nest(Body(message, **kwargs))
def media(self, url, **kwargs):
"""
Create a <Media> element
:param url: Media URL
:param kwargs: additional attributes
:returns: <Media> element
"""
return self.nest(Media(url, **kwargs))
class Media(TwiML):
""" <Media> TwiML Noun """
def __init__(self, url, **kwargs):
super(Media, self).__init__(**kwargs)
self.name = 'Media'
self.value = url
class Body(TwiML):
""" <Body> TwiML Noun """
def __init__(self, message, **kwargs):
super(Body, self).__init__(**kwargs)
self.name = 'Body'
self.value = message
| [
"michaelliu@iresearch.com.cn"
] | michaelliu@iresearch.com.cn |
be3f6184a13b0ec8ef97011496b815a287a03c3b | 6a76709e8a7fbcdfc3372120a4086718a07a270a | /dataset/dataset.py | 8517ccd17567d2c124b6211acfeb950ee765e64f | [] | no_license | jungdaechul-coderepo/EfficientDet | 17e831f5146934e276e243529709b94bbfbfa3f1 | ff132eb985676edc6df8e6c5a629f4974df47010 | refs/heads/main | 2023-05-12T08:28:57.840999 | 2020-11-26T08:54:11 | 2020-11-26T08:54:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,902 | py | import torch
import torch.nn as nn
import os
import glob
from PIL import Image
import time
import cv2
class Retina_dataset(object):
def __init__(self, input_dir,transform, input_format='RGB', output_format='RGB'):
self.input_imgs=[]
self.input_format=input_format
self.output_format=output_format
self.transform=transform
if input_dir is not None:
for ext in ['jpeg', 'jpg', 'png', 'JPEG', 'PNG', 'JPG']:
self.input_imgs+=sorted(glob.glob('{}/*.{}'.format(input_dir, ext)))
# self.transform_rot=[0, 90, 180, 270]
def __getitem__(self,idx):
img=Image.open(self.input_imgs[idx])
start_time=time.time()
if self.transform:
img=self.transform(img)
total_time=time.time()-start_time
return img, total_time
def __len__(self):
return print(len(self.input_imgs))
class Retina_dataset_albumentation(object):
def __init__(self, input_dir,transform, input_format='RGB', output_format='RGB'):
self.input_imgs=[]
self.input_format=input_format
self.output_format=output_format
self.transform=transform
if input_dir is not None:
for ext in ['jpeg', 'jpg', 'png', 'JPEG', 'PNG', 'JPG']:
self.input_imgs+=sorted(glob.glob('{}/*.{}'.format(input_dir, ext)))
# self.transform_rot=[0, 90, 180, 270]
def __getitem__(self,idx):
img=cv2.imread(self.input_imgs[idx])
img=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img=Image.open(self.input_imgs[idx])
start_time=time.time()
if self.transform:
augmented=self.transform(image=img)
img=augmented['image']
total_time=time.time()-start_time
return img, total_time
def __len__(self):
return print(len(self.input_imgs)) | [
"bigpicture.jh@gmail.com"
] | bigpicture.jh@gmail.com |
a0f39619acc90f05c7ecc8ea66be9b18ee6058d7 | a63edabd559753582d464460afe0d8f2a3377b37 | /SpiderNode/preWork.py | 3ab2c0e734bfd7d77290f157c6363853e21188ad | [] | no_license | liangxs0/lagouCrawler | 6324d688a10fde86ecf813e696c164e4dbe72cf5 | d17d51e37f7078923e00baef40fc2440eeebf059 | refs/heads/master | 2020-03-13T17:32:31.645516 | 2018-04-15T14:09:15 | 2018-04-15T14:09:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,825 | py | #-*-coding:utf-8-*-
import requests
from lxml import etree
import json
import urllib.parse
#获取首页的工作分类
def getIndexPageJob():
url="https://www.lagou.com/"
header={'User-Agent':"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36"}
proxies={"http":"http://127.0.0.1:50157"}
html=requests.get(url,headers=header,proxies=proxies)
jobs=[]
if html.status_code==200:
html_parser=etree.HTML(html.text)
menu_sub=html_parser.xpath('//*[@id="sidebar"]/div/div[1]/div[2]')[0]
dls=menu_sub.xpath('./dl')
for dl in dls:
info=dl.xpath('./dd/a/text()')
jobs.extend(info)
print(jobs)
return jobs
#获取全部的热点城市
def getAllCity():
url="https://www.lagou.com/"
url_zhaopin="https://www.lagou.com/jobs/list_C%2B%2B?px=default&city=%E5%85%A8%E5%9B%BD#filterBox"
header={'User-Agent':'Mozilla/5.0(Macintosh;U;IntelMacOSX10_6_8;en-us)AppleWebKit/534.50(KHTML,likeGecko)Version/5.1Safari/534.50',
'Host': 'www.lagou.com',
'Referer': 'https://www.lagou.com/',
}
session=requests.Session()
session.get(url,headers=header)
proxies={"http":"http://127.0.0.1:50157"}
html=session.get(url_zhaopin,headers=header,proxies=proxies)
if html.status_code==200:
html_parser=etree.HTML(html.text)
hot=html_parser.xpath('//li[@class="hot"]/a[@class="more-city-name"]/text()')
other=html_parser.xpath('//li[@class="other"]/a[@class="more-city-name"]/text()')
cities=hot+other
print(cities)
for i in range(len(cities)):
cities[i]=urllib.parse.quote(cities[i])
return cities
"""if __name__=='__main__':
cities=getAllCity()
print(cities)"""
| [
"lrtxpra@163.com"
] | lrtxpra@163.com |
4450a20d24354e763675dde15bcfdb903c38b2d4 | f2982bb689fa9eecbf1503d0db5688923802d507 | /KRTimedDraw.py | efdfe94a8b8fce5b537587dcff98902d9ccd6bc1 | [] | no_license | KareshiKraise/timeDrawingHelper | 448d0340e1103f0a52f075137001c89c029b1247 | 809f2fa136300b04d8c27250d7f2165dfd73dd7f | refs/heads/main | 2023-06-04T06:50:17.993974 | 2021-06-22T21:34:38 | 2021-06-22T21:34:38 | 358,704,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,626 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\timedHelper.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
#
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import os
import random
import cv2
import qdarkstyle
# supported file extensions
# [".jpg", ".jpeg", ".png", ".gif"]
os.environ['QT_API'] = 'pyqt5'
max_res = (640,640)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(480, 640)
# MainWindow.setStyleSheet("background-color: rgb(150, 150, 150);")
MainWindow.setLocale(
QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedKingdom)
)
MainWindow.setAnimated(True)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
# My vars----------------------------------------------
self.wndHandle = MainWindow
self.timer_ = 60
self.counter_ = self.timer_
self.dir_ = "C:/"
self.imgList_ = []
self.lbl = QtWidgets.QLabel(self.centralwidget)
self.lbl.setGeometry(QtCore.QRect(0, 0, 480, 640))
# -----------------------------------------------------
self.res_ = (480, 640)
self.widRect_ = QtCore.QRect()
self.spinBoxHidden = True
self.spinBox = QtWidgets.QSpinBox(self.centralwidget)
self.spinBox.setGeometry(QtCore.QRect(410, 0, 71, 31))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(24)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.spinBox.setFont(font)
self.spinBox.setStyleSheet('font: 24pt "Calibri";')
self.spinBox.setObjectName("spinBox")
self.spinBox.setValue(self.counter_)
self.spinBox.setMinimum(1)
self.spinBox.setMaximum(600)
self.spinBox.setStyleSheet("background-color: rgba(255, 255, 255, 0);")
self.spinBox.show()
self.showTimeLabel = QtWidgets.QLabel(self.centralwidget)
self.showTimeLabel.setGeometry(QtCore.QRect(410, 0, 71, 31))
self.showTimeLabel.setFont(font)
self.showTimeLabel.setStyleSheet('font: 24pt "Calibri";')
self.showTimeLabel.setObjectName("showTimeLbl")
self.showTimeLabel.setText(str(self.counter_))
self.showTimeLabel.hide()
self.showTimeLabel.setStyleSheet("color: rgba(255, 255, 255, 255);")
# -----------------------------------------------------
MainWindow.setCentralWidget(self.centralwidget)
self.toolBar = QtWidgets.QToolBar(MainWindow)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
self.toolBar.setFont(font)
self.toolBar.setObjectName("toolBar")
self.toolBar.setMovable(False)
self.toolBar.setFloatable(False)
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionPath = QtWidgets.QAction(MainWindow)
self.actionPath.setObjectName("actionPath")
self.actionStart_Stop = QtWidgets.QAction(MainWindow)
self.actionStart_Stop.setObjectName("actionStart_Stop")
self.actionSetTime = QtWidgets.QAction(MainWindow)
self.actionPath.setObjectName("setTimeEnable")
self.toolBar.addAction(self.actionPath)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionStart_Stop)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionSetTime)
self.toolBar.addSeparator()
# additional logic--------------------------------------
self.is_running_ = False
self.clock = QTimer()
self.clock.timeout.connect(self.onTimeout)
self.clock.start(self.timer_)
self.clock.setInterval((1000))
self.actionPath.triggered.connect(self.browseFolder)
self.actionStart_Stop.triggered.connect(self.start)
self.actionSetTime.triggered.connect(self.enableSpinBox)
self.spinBox.valueChanged.connect(self.setTime)
self.checkConfig()
# ------------------------------------------------------
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def computeWidRect(self):
width = self.wndHandle.geometry().width()
height = self.wndHandle.geometry().height()
xpos = (width - 71, width)
ypos = (0, 31)
return xpos, ypos
def checkConfig(self):
exists = False
if os.path.isfile("folders.config"):
print("Found folders.config, will load images from there")
with open("folders.config") as f:
content = f.readlines()
if len(content) > 0:
self.dir_ = content[0]
else:
self.dir_ = "C:/"
exists = True
self.imgList_ = [
self.dir_ + "/" + f
for f in os.listdir(self.dir_)
if f.endswith(".jpg") or f.endswith(".jpeg") or f.endswith(".png")
]
else:
print("File folders.config doesnt exist. Will create.")
return exists
def enableSpinBox(self):
self.is_running_ = False
if not self.spinBoxHidden:
self.spinBox.show()
self.showTimeLabel.hide()
else:
self.showTimeLabel.setText(str(self.timer_))
self.showTimeLabel.show()
self.spinBox.hide()
self.spinBoxHidden = not self.spinBoxHidden
def browseFolder(self):
self.dir_ = QFileDialog.getExistingDirectory(
None, "Select a folder:", "C:\\", QFileDialog.ShowDirsOnly
)
f = open("folders.config", "w")
f.write(self.dir_)
f.close()
if self.dir_ == "" or self.dir_ == None:
self.dir_ = "C:/"
else:
self.imgList_ = [
self.dir_ + "/" + f
for f in os.listdir(self.dir_)
if f.endswith(".jpg") or f.endswith(".jpeg") or f.endswith(".png")
]
self.is_running_ = False
return
def start(self):
self.counter_ = self.timer_
self.is_running_ = not self.is_running_
if self.is_running_ == True:
if len(self.imgList_) == 0:
print("couldnt find image list")
self.is_running_ = False
else:
self.showTimeLabel.setText(str(self.timer_))
print("drawing at start")
randomVal = random.randint(0, len(self.imgList_) - 1)
self.draw(randomVal)
return
def resize(self, w, h):
self.wndHandle.resize(w, h)
self.lbl.setGeometry(QtCore.QRect(0, 0, w, h))
xpos, ypos = self.computeWidRect()
self.spinBox.setGeometry(QtCore.QRect(xpos[0], ypos[0], xpos[1], ypos[1]))
self.showTimeLabel.setGeometry(QtCore.QRect(xpos[0], ypos[0], xpos[1], ypos[1]))
def setTime(self):
if self.is_running_:
self.is_running_ = False
self.timer_ = self.spinBox.value()
self.counter_ = self.timer_
self.showTimeLabel.setText(str(self.counter_))
def onTimeout(self):
if len(self.imgList_) > 0:
if self.is_running_ == True:
if self.counter_ <= 0:
randomVal = random.randint(0, len(self.imgList_) - 1)
self.draw(randomVal)
self.counter_ = self.timer_
self.showTimeLabel.setText(str(self.counter_))
self.counter_ = self.counter_ - 1
return
def draw(self, randomVal):
cvImg = cv2.imread(self.imgList_[randomVal])
height, width, _ = cvImg.shape
cvImg = cv2.cvtColor(cvImg, cv2.COLOR_RGBA2BGR)
bytesPerLine = 3 * width
aspect = float(height)/float(width)
resh = 0
resw = 0
resize = False
if height > max_res[1]:
resh = max_res[1]
resw = int(resh/aspect)
resized = True
if width > max_res[0]:
resw = max_res[0]
resh = int(resw*aspect)
resize= True
if resize:
cvImg = cv2.resize(cvImg, (resw,resh), interpolation = cv2.INTER_CUBIC)
height = resh
width = resw
bytesPerLine = 3 * width
resize = False
qImg = QImage(
cvImg.data, width, height, bytesPerLine, QtGui.QImage.Format_RGB888
)
self.resize(width, height)
self.lbl.setPixmap(QtGui.QPixmap.fromImage(qImg))
return
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar"))
self.actionPath.setText(_translate("MainWindow", "Path"))
self.actionPath.setToolTip(
_translate(
"MainWindow", "Path to your image folder, shortcut: Ctrl+Shift+F"
)
)
self.actionPath.setShortcut(_translate("MainWindow", "Ctrl+Shift+F"))
self.actionStart_Stop.setText(_translate("MainWindow", "Start/Stop"))
self.actionStart_Stop.setToolTip(
_translate("MainWindow", "Start the Timed Drawing, shortcut: Ctrl+Space")
)
self.actionStart_Stop.setShortcut(_translate("MainWindow", "Ctrl+Space"))
self.actionSetTime.setText(_translate("MainWindow", "Set Time"))
self.actionSetTime.setToolTip(
_translate("MainWindow", "Enable Set Time spinbox")
)
self.actionSetTime.setShortcut(_translate("MainWindow", "Ctrl+T"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
app.setStyleSheet(qdarkstyle.load_stylesheet())
MainWindow = QtWidgets.QMainWindow()
MainWindow.setWindowFlags(
QtCore.Qt.Window
| QtCore.Qt.CustomizeWindowHint
| QtCore.Qt.WindowTitleHint
| QtCore.Qt.WindowCloseButtonHint
| QtCore.Qt.WindowStaysOnTopHint
)
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"vitormoraesaranha@gmail.com"
] | vitormoraesaranha@gmail.com |
498848a1ce67711fa364584705c0f90477f76fb5 | 90e049109be38889523b265d2683a4f29a57da30 | /flink-python/pyflink/table/tests/test_table_environment_api.py | 64080f1e53b36dd5df4f0c09993ae8772e33988c | [
"BSD-3-Clause",
"MIT",
"OFL-1.1",
"ISC",
"Apache-2.0",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC-BY-3.0",
"LGPL-2.1-only",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-jdom",
"GCC-exception-3.1",
"EPL-1.0",
"CC-BY-2.5",
"MPL-2.0-no-copyleft-exception",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"CDDL-1.0",
"AGPL-3.0-only",
"CC0-1.0",
"BSD-2-Clause-Views",
"MPL-2.0",
"CC-PDDC",
"MIT-0",
"CDDL-1.1"
] | permissive | Jasonpengrui/flink | bc7cf1baced87a72a75e2bd0e326a137ed0ab529 | 81a5212cb99b860de9c7384fa14caaa3f5af1c1f | refs/heads/master | 2020-06-10T16:44:23.895203 | 2019-12-09T06:35:08 | 2019-12-09T06:35:08 | 193,673,904 | 0 | 0 | Apache-2.0 | 2019-06-25T09:09:15 | 2019-06-25T09:09:14 | null | UTF-8 | Python | false | false | 10,360 | py | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
################################################################################
import datetime
import os
from py4j.compat import unicode
from pyflink.dataset import ExecutionEnvironment
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.table.table_environment import BatchTableEnvironment, StreamTableEnvironment
from pyflink.table.table_config import TableConfig
from pyflink.table.types import DataTypes, RowType
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase, PyFlinkBatchTableTestCase
class StreamTableEnvironmentTests(PyFlinkStreamTableTestCase):
def test_register_table_source_scan(self):
t_env = self.t_env
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
source_path = os.path.join(self.tempdir + '/streaming.csv')
csv_source = self.prepare_csv_source(source_path, [], field_types, field_names)
t_env.register_table_source("Source", csv_source)
result = t_env.scan("Source")
self.assertEqual(
'CatalogTable: (path: [default_catalog, default_database, Source], fields: [a, b, c])',
result._j_table.getQueryOperation().asSummaryString())
def test_register_table_sink(self):
t_env = self.t_env
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"Sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.from_elements([(1, "Hi", "Hello")], ["a", "b", "c"]).insert_into("Sinks")
t_env.exec_env().execute()
actual = source_sink_utils.results()
expected = ['1,Hi,Hello']
self.assert_equals(actual, expected)
def test_from_table_source(self):
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
source_path = os.path.join(self.tempdir + '/streaming.csv')
csv_source = self.prepare_csv_source(source_path, [], field_types, field_names)
result = self.t_env.from_table_source(csv_source)
self.assertEqual(
'TableSource: (fields: [a, b, c])',
result._j_table.getQueryOperation().asSummaryString())
def test_list_tables(self):
source_path = os.path.join(self.tempdir + '/streaming.csv')
field_names = ["a", "b", "c"]
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
data = []
csv_source = self.prepare_csv_source(source_path, data, field_types, field_names)
t_env = self.t_env
t_env.register_table_source("Orders", csv_source)
t_env.register_table_sink(
"Sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.register_table_sink(
"Results",
source_sink_utils.TestAppendSink(field_names, field_types))
actual = t_env.list_tables()
expected = ['Orders', 'Results', 'Sinks']
self.assert_equals(actual, expected)
def test_explain(self):
schema = RowType()\
.add('a', DataTypes.INT())\
.add('b', DataTypes.STRING())\
.add('c', DataTypes.STRING())
t_env = self.t_env
t = t_env.from_elements([], schema)
result = t.select("1 + a, b, c")
actual = t_env.explain(result)
assert isinstance(actual, str) or isinstance(actual, unicode)
def test_sql_query(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
result = t_env.sql_query("select a + 1, b, c from %s" % source)
result.insert_into("sinks")
t_env.exec_env().execute()
actual = source_sink_utils.results()
expected = ['2,Hi,Hello', '3,Hello,Hello']
self.assert_equals(actual, expected)
def test_sql_update(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.sql_update("insert into sinks select * from %s" % source)
t_env.exec_env().execute("test_sql_job")
actual = source_sink_utils.results()
expected = ['1,Hi,Hello', '2,Hello,Hello']
self.assert_equals(actual, expected)
def test_sql_update_with_query_config(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
query_config = t_env.query_config()
query_config.with_idle_state_retention_time(
datetime.timedelta(days=1), datetime.timedelta(days=2))
t_env.sql_update("insert into sinks select * from %s" % source, query_config)
t_env.exec_env().execute("test_sql_job")
actual = source_sink_utils.results()
expected = ['1,Hi,Hello', '2,Hello,Hello']
self.assert_equals(actual, expected)
def test_query_config(self):
query_config = self.t_env.query_config()
query_config.with_idle_state_retention_time(
datetime.timedelta(days=1), datetime.timedelta(days=2))
assert query_config.get_max_idle_state_retention_time() == 2 * 24 * 3600 * 1000
assert query_config.get_min_idle_state_retention_time() == 24 * 3600 * 1000
def test_create_table_environment(self):
table_config = TableConfig()
table_config.set_max_generated_code_length(32000)
table_config.set_null_check(False)
table_config.set_timezone("Asia/Shanghai")
table_config.set_built_in_catalog_name("test_catalog")
table_config.set_built_in_database_name("test_database")
env = StreamExecutionEnvironment.get_execution_environment()
t_env = StreamTableEnvironment.create(env, table_config)
readed_table_config = t_env.get_config()
self.assertFalse(readed_table_config.get_null_check())
self.assertEqual(readed_table_config.get_max_generated_code_length(), 32000)
self.assertEqual(readed_table_config.get_timezone(), "Asia/Shanghai")
self.assertEqual(table_config.get_built_in_catalog_name(), "test_catalog")
self.assertEqual(table_config.get_built_in_database_name(), "test_database")
class BatchTableEnvironmentTests(PyFlinkBatchTableTestCase):
def test_explain(self):
source_path = os.path.join(self.tempdir + '/streaming.csv')
field_names = ["a", "b", "c"]
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
data = []
csv_source = self.prepare_csv_source(source_path, data, field_types, field_names)
t_env = self.t_env
t_env.register_table_source("Source", csv_source)
source = t_env.scan("Source")
result = source.alias("a, b, c").select("1 + a, b, c")
actual = t_env.explain(result)
self.assertIsInstance(actual, (str, unicode))
def test_table_config(self):
table_config = TableConfig()
table_config.set_timezone("Asia/Shanghai")
table_config.set_max_generated_code_length(64000)
table_config.set_null_check(True)
table_config.set_built_in_catalog_name("test_catalog")
table_config.set_built_in_database_name("test_database")
self.assertTrue(table_config.get_null_check())
self.assertEqual(table_config.get_max_generated_code_length(), 64000)
self.assertEqual(table_config.get_timezone(), "Asia/Shanghai")
self.assertEqual(table_config.get_built_in_catalog_name(), "test_catalog")
self.assertEqual(table_config.get_built_in_database_name(), "test_database")
def test_create_table_environment(self):
table_config = TableConfig()
table_config.set_max_generated_code_length(32000)
table_config.set_null_check(False)
table_config.set_timezone("Asia/Shanghai")
table_config.set_built_in_catalog_name("test_catalog")
table_config.set_built_in_database_name("test_database")
env = ExecutionEnvironment.get_execution_environment()
t_env = BatchTableEnvironment.create(env, table_config)
readed_table_config = t_env.get_config()
self.assertFalse(readed_table_config.get_null_check())
self.assertEqual(readed_table_config.get_max_generated_code_length(), 32000)
self.assertEqual(readed_table_config.get_timezone(), "Asia/Shanghai")
self.assertEqual(readed_table_config.get_built_in_catalog_name(), "test_catalog")
self.assertEqual(readed_table_config.get_built_in_database_name(), "test_database")
| [
"sunjincheng121@gmail.com"
] | sunjincheng121@gmail.com |
cbd142b626698fe1debd6ecef0822cc0d7b13f7f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_150/ch50_2020_04_13_03_25_44_929209.py | a262c1522f55ac719f56e8c2e06b6e69fde73ed5 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | def junta_nome_sobrenome(nome, sobrenome):
nome_e_sobrenome = []
i = 0
while i < len(nome) and i < len(sobrenome):
nome_e_sobrenome.append(nome[i] + ' ' +sobrenome[i])
i += 1
return nome_e_sobrenome | [
"you@example.com"
] | you@example.com |
1bbf137aa6b4ac31b07c38f1907aa7110050b58e | 045385ba95f62658d15688e7dade4e7e618ebb08 | /report.py | c7c92504a04ca5fc40db5f9ceb120fc9823c7131 | [] | no_license | tarunbodapati/player.py | eb04739737223ed0e185f6cf62389a444e61875f | 97587573587dfec1a3b3f75b41bcb4cb4f3f741d | refs/heads/master | 2020-06-21T05:33:25.378068 | 2020-02-25T10:00:16 | 2020-02-25T10:00:16 | 197,356,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,250 | py | A NOVELL APPROACH FOR DETECTION OF LUNG TUMOUR
Abstract:
Delineation of lung tumor from adjacent tissue from different computed tomography images (CT) creates many difficulties due to the similarities in images. In the surrounding areas as well as it influence the respiration. so accurate segmentation and classification is required in order to detect the present condition of a person. So in order to apply the perfect radiation therapy to the person without effecting the remaining tissues besides the tumor part. we are going to use neural networks for classification so that to obtain the accuracy and to improve the parameters for the classification and proper diagnosing of tumor . this process uses the better classification of tumor to detect and to give the treatment. Therefore we uses the process of dwt for enhancing the image and median filter to remove any noise and to use the GLCM (gray level co occurrence matrix) for feature extraction and to use the process of neural network classification for proper classification to detect the tumor is in initial stage (benign) or it is in final stage (malignant).
Introduction:
There are many un necessary cells that are grown in our body the growing of these cells in an un even way in our body can cause many kind of problems. If these kind of cells grows unevenly will make risk to the life of an living being. In bad conditions the growth will be uncertain . these lung tumors are mainly formed by cigarettes and also by using different kind of eatables so that these are the main causes to grow these cells in an uncertain and also in abnormal manner. these tumors can also mainly contain of different stages . they make the living beings to get in to a situation that weakens the functions of lungs. Of mixing the functions of oxygen from blood.it is the main causes of cancer in many human beings. The percentage of cancer that most effected by human beings is lung cancer because of unnecessary activities like smoking. the percentage of the people died because of the lung cancer is very high. Approximately 154,050 people died due to lung cancer in 2018. And also the rate has been decreased in 2019 due to the awareness about cancer in some of rural areas 159,292 is in the year 2005. So many people uses the different techniques in order to avoid lung cancer. These cancer occurs due to the uneven growth of tumor in lungs. It is very required for any people to detect lung cancer in the initial stages to save one’s lives. The main purpose of this is to classify it as benign or malignant. The main purpose of this technique is to use the segmentation and classifying the tumor that it is more accurate and to detect the tumor fast and to diagnose in the initial stage to cure it. There is no cure for final stage of cancer. to diagnose it we need to identify it in the initial stage itself. We are going to use Matlab so that to process the image and to enhance the image and to convert the image from gray to binary and also to use the process of discrete wavelet transform for it and also to use fuzzy means clustering for segmenting the image . we have used feature extraction technique called GLCM to extract the features of the images . and we are using neural networks for classification. Here in matlab software we are going to create a GUI and by placing different blocks in GUI. So that by using code we need to extract different processes on the GUI. To process this we need a CT scan image that is computed tomography images to perform operations on the image using coding format. So we are going to use these CT scan images and they are essential in image modelling .
LITERAURE SURVEY:
This section represents the survey for classification of the tumor from previous references. There are many planning and previous references that are given in order to develop a different set of parameters and different types of algorithms are used to perform different types of techniques are followed in order to get the required output in different ways
.[1] Humera shaziya etal presented the automatic lung segmentation on thoracic CT scans using U NET Convolutional network they have used unet convolutional networks, deep learning, automatic lung segmentation. .
[2] ling zhang,etal used an algorithm such that self learning to detect and segment cysts in lung CT images without manual annotation it was also published in the year of 2018 using unsupervised segmentation,, segmentation networks.
[3] lilkAnifah,etal used a technique to detect cancer lungs detection and ct scan image using artificial neural network back propagation based gray level co occurence matrices feature. It was published in year 2017 artificial neural network back propagation based gray level co occurrence matrices.
[4] Tiany Zhao,et al uses a technique on lung segmentation in CT images using a fully convolutional neural networks Tiany Zhaol,etal it was published in the year 2018 . it was done with the help of convolutional neural network.
[5] sneha pothagam uses a technique named multilayer perception based lung tumor classification in the year 2018 algorithm used is k nearest neighbor algorithm
[6] k.gopi uses a technique named lung tumor area classification (recognition) using EKmean clustering and SVM and the technique used is SVM,EK thresholding.
[7] zigha zong uses a technique called 3D fully convolutional N/w’s for cosegmentation of tumors on PETCT images and algorithm used is 3DUnet and graph cut based cosegmentation.
[8] sheenan ratnam used a technique optimized lung cancer classification computed tomography the algorithm used is BAT algorithm and artificial neural networks.
Methodology:
We are going to use different set of images like normal,benign,malignant to check the image processing for image classification. We are going to use median filter for image preprocessing, and to remove any noise and we used glcm for feature extraction and also we are going to use DWT discrete wavelet transform is used for image enhancement. And we have used neural networks for classification.
| [
"noreply@github.com"
] | noreply@github.com |
9379fb1cf8f8a0ce4ebdd89c1b183018f97816d5 | 7b82cd6d0c8d7eb968ec4eb2dc7020316880e690 | /aswissues/migrations/0013_auto_20191119_2241.py | e3afc2b41389cc02b6ce0652b70c209ab9ffecb5 | [] | no_license | kennyangelfib/ASW-Issue-Tracker | abbeed5e5517f74beb66a06225df19de926fd250 | d9087ad422b6f97de8854bc936943001387d168e | refs/heads/master | 2022-04-11T21:27:46.933937 | 2020-01-14T13:43:37 | 2020-01-14T13:43:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,414 | py | # Generated by Django 2.2.7 on 2019-11-19 22:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('aswissues', '0012_merge_20191119_2215'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='issue',
name='assignee',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Assignee', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='issue',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Creator', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='vote',
name='voter',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='watch',
name='watcher',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"andreugallofre@gmail.com"
] | andreugallofre@gmail.com |
aab147e4b5ac5e64ffb29c9993c9b62cc92184ba | 562d45351e4e23f5548a325b884bd57f93e5373d | /venv/bin/ckeygen | cf97c6fe71ee55d25b62eb485e30a320fc7c17a0 | [] | no_license | manjumugali/BridgeLabz_PythonPrograms | 401efc320804d74017b1e6cbe99b45fd75193243 | ff4093cc9b0256ede9f7ef3a0e042fe352263f82 | refs/heads/master | 2020-04-19T22:17:07.050403 | 2019-02-13T05:18:56 | 2019-02-13T05:18:56 | 168,465,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | #!/home/admin1/Desktop/chatApp/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==18.9.0','console_scripts','ckeygen'
__requires__ = 'Twisted==18.9.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Twisted==18.9.0', 'console_scripts', 'ckeygen')()
)
| [
"manjumugali111@gmail.com"
] | manjumugali111@gmail.com | |
2b9d7f843e20d0de53d6b81d06487d22edf5c41c | ea93c59912c4654a87c06eba3dd4b7eda989463b | /my_blog_project/my_blog_project/asgi.py | 85753b9ab25a95115448a630ebf196277b6e070a | [] | no_license | ruhulamin1998/django-blog-project | e88b3fd8ef457c7eb4ee3149fa0f1969f5489c73 | 1f11f9eb2015a0cfb98dfb720c0379eee107e51c | refs/heads/main | 2023-03-04T20:47:59.453834 | 2021-02-09T06:25:34 | 2021-02-09T06:25:34 | 337,301,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
ASGI config for my_blog_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'my_blog_project.settings')
application = get_asgi_application()
| [
"ruhulamin.raj1998@gmail.com"
] | ruhulamin.raj1998@gmail.com |
708ef071fd1faafca78472a84eda933ffbb64c07 | 1702a4ffbc68266e4f232389f7a5f9cc44cb489b | /hello.py | 6575b82784245fb5e88b9822dc71c860e58085ec | [] | no_license | Caspeezie/TDDAssignments | 6d0c988b458797bc0826d1e1cf338df15d337f4c | c8830df877b85025c4064dc1c8933b8915d88aad | refs/heads/master | 2022-12-22T08:16:23.884642 | 2020-09-15T11:29:49 | 2020-09-15T11:29:49 | 276,622,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | #This program says hello and asks for your name
print('Hello World')
print('What is your name?')
myName = input()
print('It is good to meet you, ' +myName)
| [
"noreply@github.com"
] | noreply@github.com |
a1816820f2b5b9ae3603a6eb633cc4c41cc5b75c | 4825a6b905d49c787996058e23e99f2d1785e3d4 | /Lab12.py | a7f8dbb2c50fe805cb29362c411bfac41519aff8 | [] | no_license | dylansidney25/Lab-12 | 510fe50ab3b67d06b5feb913c2cc4dd836524794 | 0d00ac2966afb53f4799af6fb0488beb4f834ebc | refs/heads/main | 2023-03-29T19:31:27.323802 | 2021-04-02T20:18:00 | 2021-04-02T20:18:00 | 354,121,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | import cv2 as cv
import sys
img = cv.imread('dragon2.png') #Selects the file to be used
img = cv.resize(img,None,fx=3, fy=3, interpolation = cv.INTER_CUBIC) #This resized the window
if img is None: #Checks if there is a file
sys.exit("The image could not be read.") #Closes the window
cv.imshow("OpenCV Image", img) #Shows the image with the title given
cv.waitKey(0) #Waits untill a key is pressed
cv.destroyAllWindows() #closed the window | [
"noreply@github.com"
] | noreply@github.com |
deaa0857f040e4558c3a3aa27b0b1ff32bf995cc | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/CJ_16_1/16_1_3_ka_ya_c.py | 7735ad455887347c1c5a1e4c3582e3531bafa93a | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,141 | py | def solve(n, fs):
fs = [f-1 for f in fs]
lp = [None for p in xrange(n)]
for i in xrange(n):
chk = [False for p in xrange(n)]
p = i
cnt = 0
while not chk[p] and not lp[p]:
chk[p] = True
p = fs[p]
cnt += 1
if p == i:
while not lp[p]:
lp[p] = (cnt, 0)
p = fs[p]
for i in xrange(n):
p = i
cnt = 0
while not lp[p]:
p = fs[p]
cnt += 1
l, b = lp[p]
if cnt > b:
lp[p] = (l, cnt)
res = 0
tmp = 0
for i in xrange(n):
if lp[i]:
l, b = lp[i]
if l == 2:
j = fs[i]
_, bj = lp[j]
tmp += l + b + bj
else:
if l > res:
res = l
if tmp / 2 > res:
res = tmp / 2
return res
T = input()
for i in xrange(1, T+1):
N = input()
Fs = map(int, raw_input().split())
print 'Case #{}: {}'.format(i, solve(N, Fs))
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
e36fa96a6b594bb9c54e0f68eb1cdc6daad6b8e1 | 441508533c0f46814e5c5248ca31858afc3a2f7e | /a/b.py | ecf6c8fc51e30df4ea2a6b660a31b92d01ab8553 | [] | no_license | phiresky/poetry-install-self-issue | 7e72ef08821959ed346163756e2ed51f00fc0148 | 47b2e7842555c94f9f07ed80096b48010e6a9b1e | refs/heads/master | 2020-04-10T16:29:32.768434 | 2018-12-10T15:32:03 | 2018-12-10T15:32:03 | 161,147,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | import c.x
print(c.x.test)
| [
"robin.ruede@philips.com"
] | robin.ruede@philips.com |
e9bdcafd637d894057834d086307fedfa9f62b56 | 17a1529d0403a8f2b5b1080305a0b61de61c1477 | /api/radiam/api/tests/permissionstests/datasetdatacollectionmethodpermissionstests.py | 0617cc4fd8c24ef85695bc483c62d91b49779f31 | [
"MIT"
] | permissive | usask-rc/radiam | 903dc6f21d17e371141a642d94e877ec993c3a66 | 6db6794fd1811b316dee6f6661986e027d8a594b | refs/heads/master | 2022-05-15T07:48:48.183526 | 2022-04-19T15:01:48 | 2022-04-19T15:01:48 | 237,302,758 | 2 | 1 | MIT | 2022-04-19T14:58:27 | 2020-01-30T20:48:56 | Python | UTF-8 | Python | false | false | 14,941 | py | import json
from rest_framework.test import APITestCase
from rest_framework.test import APIRequestFactory
from rest_framework.test import force_authenticate
from django.urls import reverse
from radiam.api.models import (
User, Dataset, DataCollectionMethod, DatasetDataCollectionMethod
)
from radiam.api.views import DatasetDataCollectionMethodViewSet
class TestSuperuserDatasetDataCollectionMethodPermissions(APITestCase):
"""
Test Response codes for DataCollectionMethod endpoints for Superuser roles
"""
fixtures = ['userpermissions']
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.get(username='admin')
def test_superuser_read_datasetdatacollectionmethod_list(self):
"""
Test Superuser can read DatasetDataCollectionMethod list
"""
request = self.factory.get(reverse('datasetdatacollectionmethod-list'))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'get': 'list'})(request)
self.assertContains(
response=response,
text="",
status_code=200)
def test_superuser_write_datasetdatacollectionmethod_list(self):
"""
Test Superuser can write DatasetDataCollectionMethod list
"""
datacollectionmethod = DataCollectionMethod.objects.get(label='datacollection.method.other')
dataset = Dataset.objects.get(title='Research Is Fun')
body = {
'dataset': str(dataset.id),
'data_collection_method': str(datacollectionmethod.id)
}
request = self.factory.post(
reverse('datasetdatacollectionmethod-list'),
json.dumps(body),
content_type='application/json'
)
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'post': 'create'})(request)
self.assertContains(response=response, text="", status_code=201)
def test_superuser_read_datasetdatacollectionmethod_detail(self):
"""
Test Superuser can read a DatasetDataCollectionMethod detail
"""
detail_datasetdatacollectionmethod = \
DatasetDataCollectionMethod.objects.get(id='f9d1402a-2301-4bf8-b4cd-70590e3ca4b7')
request = self.factory.get(
reverse('datasetdatacollectionmethod-detail',
args=[detail_datasetdatacollectionmethod.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'get': 'retrieve'})(request, pk=detail_datasetdatacollectionmethod.id)
self.assertContains(
response=response,
text="",
status_code=200)
def test_superuser_write_datasetdatacollectionmethod_detail(self):
"""
Test Superuser can write a DatasetDataCollectionMethod detail
"""
detail_datasetdatacollectionmethod = \
DatasetDataCollectionMethod.objects.get(id='f9d1402a-2301-4bf8-b4cd-70590e3ca4b7')
data_collection_method = DataCollectionMethod.objects.get(label='datacollection.method.other')
body = {
"data_collection_method": reverse('datacollectionmethod-detail', kwargs={'pk': data_collection_method.id})
}
request = self.factory.patch(
reverse('datasetdatacollectionmethod-detail',
args=[detail_datasetdatacollectionmethod.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'patch': 'partial_update'})(request, pk=detail_datasetdatacollectionmethod.id)
self.assertContains(
response=response,
text="",
status_code=200)
class TestAdminUserDatasetDataCollectionMethodPermissions(APITestCase):
"""
Test Response codes for DataCollectionMethod endpoints for Admin User roles
"""
fixtures = ['userpermissions']
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.get(username='testuser1')
def test_admin_user_read_datasetdatacollectionmethod_list(self):
"""
Test Admin User can read DatasetDataCollectionMethod list
"""
request = self.factory.get(reverse('datasetdatacollectionmethod-list'))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'get': 'list'})(request)
self.assertContains(
response=response,
text="",
status_code=200)
def test_admin_user_write_datasetdatacollectionmethod_list(self):
"""
Test Member user cannot write DatasetDataCollectionMethod list
"""
datacollectionmethod = DataCollectionMethod.objects.get(label='datacollection.method.other')
dataset = Dataset.objects.get(title='Research Is Fun')
body = {
'dataset': str(dataset.id),
'data_collection_method': str(datacollectionmethod.id)
}
request = self.factory.post(
reverse('datasetdatacollectionmethod-list'),
json.dumps(body),
content_type='application/json'
)
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'post': 'create'})(request)
self.assertContains(
response=response,
text="",
status_code=201)
def test_adminuser_read_datasetdatacollectionmethod_detail(self):
"""
Test Admin user can read a DatasetDataCollectionMethod detail
"""
detail_datasetdatacollectionmethod = \
DatasetDataCollectionMethod.objects.get(id='f9d1402a-2301-4bf8-b4cd-70590e3ca4b7')
request = self.factory.get(
reverse('datasetdatacollectionmethod-detail',
args=[detail_datasetdatacollectionmethod.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'get': 'retrieve'})(request, pk=detail_datasetdatacollectionmethod.id)
self.assertContains(
response=response,
text="",
status_code=200)
def test_admin_user_write_datasetdatacollectionmethod_detail(self):
"""
Test Admin User can write a DatasetDataCollectionMethod detail
"""
detail_datasetdatacollectionmethod = \
DatasetDataCollectionMethod.objects.get(id='f9d1402a-2301-4bf8-b4cd-70590e3ca4b7')
data_collection_method = DataCollectionMethod.objects.get(label='datacollection.method.other')
body = {
"data_collection_method": reverse('datacollectionmethod-detail', kwargs={'pk': data_collection_method.id})
}
request = self.factory.patch(
reverse('datasetdatacollectionmethod-detail',
args=[detail_datasetdatacollectionmethod.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'patch': 'partial_update'})(request, pk=detail_datasetdatacollectionmethod.id)
self.assertContains(
response=response,
text="",
status_code=200)
class TestManagerUserDatasetDataCollectionMethodPermissions(APITestCase):
"""
Test Response codes for DataCollectionMethod endpoints for Manager User roles
"""
fixtures = ['userpermissions']
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.get(username='testuser2')
def test_manager_user_read_datasetdatacollectionmethod_list(self):
"""
Test Manager User can read DatasetDataCollectionMethod list
"""
request = self.factory.get(reverse('datasetdatacollectionmethod-list'))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'get': 'list'})(request)
self.assertContains(
response=response,
text="",
status_code=200)
def test_manager_user_write_datasetdatacollectionmethod_list(self):
"""
Test Member user cannot write DatasetDataCollectionMethod list
"""
datacollectionmethod = DataCollectionMethod.objects.get(label='datacollection.method.other')
dataset = Dataset.objects.get(title='Research Is Fun')
body = {
'dataset': str(dataset.id),
'data_collection_method': reverse('datacollectionmethod-detail', kwargs={'pk': datacollectionmethod.id})
}
request = self.factory.post(
reverse('datasetdatacollectionmethod-list'),
json.dumps(body),
content_type='application/json'
)
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'post': 'create'})(request)
self.assertContains(
response=response,
text="",
status_code=403)
def test_manager_user_read_datasetdatacollectionmethod_detail(self):
"""
Test Manager user can read a DatasetDataCollectionMethod detail
"""
detail_datasetdatacollectionmethod = \
DatasetDataCollectionMethod.objects.get(id='f9d1402a-2301-4bf8-b4cd-70590e3ca4b7')
request = self.factory.get(
reverse('datasetdatacollectionmethod-detail',
args=[detail_datasetdatacollectionmethod.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'get': 'retrieve'})(request, pk=detail_datasetdatacollectionmethod.id)
self.assertContains(
response=response,
text="",
status_code=200)
def test_manager_user_write_datasetdatacollectionmethod_detail(self):
"""
Test Manager User can write a DatasetDataCollectionMethod detail
"""
detail_datasetdatacollectionmethod = \
DatasetDataCollectionMethod.objects.get(id='f9d1402a-2301-4bf8-b4cd-70590e3ca4b7')
data_collection_method = DataCollectionMethod.objects.get(label='datacollection.method.other')
body = {
"data_collection_method": str(data_collection_method.id)
}
request = self.factory.patch(
reverse('datasetdatacollectionmethod-detail',
args=[detail_datasetdatacollectionmethod.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'patch': 'partial_update'})(request, pk=detail_datasetdatacollectionmethod.id)
self.assertContains(
response=response,
text="",
status_code=403)
class TestMemberUserDatasetDataCollectionMethodPermissions(APITestCase):
"""
Test Response codes for DataCollectionMethod endpoints for Member User roles
"""
fixtures = ['userpermissions']
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.get(username='testuser3')
def test_member_user_read_datasetdatacollectionmethod_list(self):
"""
Test Member User can read DatasetDataCollectionMethod list
"""
request = self.factory.get(reverse('datasetdatacollectionmethod-list'))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'get': 'list'})(request)
self.assertContains(
response=response,
text="",
status_code=200)
def test_member_user_write_datasetdatacollectionmethod_list(self):
"""
Test Member user cannot write DatasetDataCollectionMethod list
"""
datacollectionmethod = DataCollectionMethod.objects.get(label='datacollection.method.other')
dataset = Dataset.objects.get(title='Research Is Fun')
body = {
'dataset': str(dataset.id),
'data_collection_method': str(datacollectionmethod.id)
}
request = self.factory.post(
reverse('datasetdatacollectionmethod-list'),
json.dumps(body),
content_type='application/json'
)
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'post': 'create'})(request)
self.assertContains(
response=response,
text="",
status_code=403)
def test_member_user_read_datasetdatacollectionmethod_detail(self):
"""
Test Member user can read a DatasetDataCollectionMethod detail
"""
detail_datasetdatacollectionmethod = \
DatasetDataCollectionMethod.objects.get(id='f9d1402a-2301-4bf8-b4cd-70590e3ca4b7')
request = self.factory.get(
reverse('datasetdatacollectionmethod-detail',
args=[detail_datasetdatacollectionmethod.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'get': 'retrieve'})(request, pk=detail_datasetdatacollectionmethod.id)
self.assertContains(
response=response,
text="",
status_code=200)
def test_member_user_write_datasetdatacollectionmethod_detail(self):
"""
Test Member User can write a DatasetDataCollectionMethod detail
"""
detail_datasetdatacollectionmethod = \
DatasetDataCollectionMethod.objects.get(id='f9d1402a-2301-4bf8-b4cd-70590e3ca4b7')
data_collection_method = DataCollectionMethod.objects.get(label='datacollection.method.other')
body = {
"data_collection_method": str(data_collection_method.id)
}
request = self.factory.patch(
reverse('datasetdatacollectionmethod-detail',
args=[detail_datasetdatacollectionmethod.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'patch': 'partial_update'})(request, pk=detail_datasetdatacollectionmethod.id)
self.assertContains(
response=response,
text="",
status_code=403)
| [
"todd.trann@usask.ca"
] | todd.trann@usask.ca |
2ee2ccec5dbf7843302c65bae409bb7fdcc29b2a | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_noisy3325.py | ba3c7f7f745af20e6283d8398fd4aeb577461651 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,323 | py | # qubit number=4
# total number=44
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=23
prog.cz(input_qubit[0],input_qubit[3]) # number=24
prog.y(input_qubit[1]) # number=37
prog.h(input_qubit[3]) # number=25
prog.x(input_qubit[3]) # number=18
prog.cx(input_qubit[3],input_qubit[1]) # number=40
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=32
prog.h(input_qubit[0]) # number=41
prog.cz(input_qubit[3],input_qubit[0]) # number=42
prog.h(input_qubit[0]) # number=43
prog.cx(input_qubit[3],input_qubit[0]) # number=26
prog.z(input_qubit[3]) # number=27
prog.h(input_qubit[0]) # number=29
prog.cz(input_qubit[3],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=31
prog.h(input_qubit[0]) # number=33
prog.cz(input_qubit[3],input_qubit[0]) # number=34
prog.h(input_qubit[0]) # number=35
prog.h(input_qubit[2]) # number=36
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.y(input_qubit[2]) # number=38
prog.y(input_qubit[2]) # number=39
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy3325.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
45d758380acbf60b955a49e002bc95f70394a173 | d919fff21345c9553bd3095f639a8049be53c3d6 | /OrderPizza/migrations/0004_subtoppings.py | efb945020d66d556e8ae6ac99e546a4aca2c0f70 | [] | no_license | parathan/Pizza-Place | 36ae461e828c1191db9829cd5d870b820236acde | 56511f02df5d28196422f85f25a8230e3e730713 | refs/heads/master | 2022-07-24T20:40:26.603536 | 2020-05-23T16:24:36 | 2020-05-23T16:24:36 | 263,726,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | # Generated by Django 3.0.6 on 2020-05-15 02:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('OrderPizza', '0003_auto_20200514_2114'),
]
operations = [
migrations.CreateModel(
name='SubToppings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=50)),
('smallprice', models.DecimalField(decimal_places=2, max_digits=10)),
('largeprice', models.DecimalField(decimal_places=2, max_digits=10)),
],
options={
'verbose_name_plural': 'SubToppings',
},
),
]
| [
"parathan243@gmail.com"
] | parathan243@gmail.com |
69f0025f1980926d00241e4f3d009bc18b75f4f2 | dfca79dd44910df779eb33a5e9a5d2d6689eb23a | /gifs/migrations/0002_auto_20180818_2349.py | 993068e30a6674542c51f747984712ea60a1ad58 | [] | no_license | madevelascom/topgifs | 193094961fbdbc2773fa03cda1d0eade8e81b4dc | 94bfdc20eebda746f70148c0318f6ab23f31bbf6 | refs/heads/master | 2020-03-26T19:40:03.405553 | 2018-08-23T07:39:09 | 2018-08-23T07:39:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | # Generated by Django 2.1 on 2018-08-19 04:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gifs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='gif',
name='contador',
field=models.IntegerField(default='0'),
),
migrations.AddField(
model_name='gif',
name='descripcion',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='gif',
name='url',
field=models.TextField(default='#'),
),
migrations.AlterField(
model_name='gif',
name='id',
field=models.IntegerField(primary_key=True, serialize=False),
),
]
| [
"m_velasco93@hotmail.com"
] | m_velasco93@hotmail.com |
5d301c3d4a4175d621b5d2c952a421bcdec1afd2 | 8683b28b47acceb6fc7677dcea3248a926fb95dd | /cities/views.py | 66bc591e3bb5cd2b59f0018e82f5e910ccdf6d71 | [] | no_license | pavhrablis/find_routes | e2463714a94ee90411634934064cd56402d247ce | 228cdef0c553bab9f4c1328c3263209c974b8c57 | refs/heads/master | 2023-02-17T18:22:00.505675 | 2021-01-21T10:39:23 | 2021-01-21T10:39:23 | 330,508,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,211 | py | from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.core.paginator import Paginator
from django.shortcuts import render, get_object_or_404
from django.urls import reverse_lazy
from django.views.generic import DetailView, CreateView, UpdateView, DeleteView, ListView
from cities.forms import CityForm
from cities.models import City
__all__ = (
#'home',
'CityDetailView',
'CityCreateView',
'CityUpdateView',
'CityDeleteView',
'CityListView',
)
# def home(request, pk=None):
# if request.method == "POST":
# form = CityForm(request.POST)
# if form.is_valid():
# form.save()
# form = CityForm()
# cities = City.objects.all()
# paginator = Paginator(cities, 2)
# page_number = request.GET.get('page')
# page_obj = paginator.get_page(page_number)
# context = {'page_obj': page_obj, 'form': form}
# return render(request, 'cities/home.html', context)
class CityDetailView(DetailView):
queryset = City.objects.all()
template_name = 'cities/detail.html'
class CityCreateView(SuccessMessageMixin, CreateView):
model = City
form_class = CityForm
template_name = 'cities/create.html'
success_url = reverse_lazy('cities:home')
success_message = "City successfully created"
class CityUpdateView(SuccessMessageMixin, LoginRequiredMixin, UpdateView):
model = City
form_class = CityForm
template_name = 'cities/update.html'
success_message = "City successfully changed"
class CityDeleteView(SuccessMessageMixin, LoginRequiredMixin, DeleteView):
model = City
success_url = reverse_lazy('cities:home')
def get(self, request, *args, **kwargs):
messages.success(request, "City successfully deleated")
return self.post(request, *args, **kwargs)
class CityListView(ListView):
model = City
paginate_by = 5
template_name = 'cities/home.html'
form = CityForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
form = CityForm()
context['form'] = form
return context
| [
"palik1hrablis@gmail.com"
] | palik1hrablis@gmail.com |
6447271e439b59fe2951102126daafa3a88009bb | 36bc1dd6ebd845b8946a57851b4cc43dae9dc1ba | /accounts/urls.py | 083a65b74b9b8be5574d9c3aa24658e0ceb25ea3 | [] | no_license | bruce619/djangomedicalapp | eba6547092f95c843598c2ba88e2ac84d0b92c64 | e3a7f046677b5b6eadec5191842f372b4f0d259c | refs/heads/master | 2022-12-12T22:58:27.233939 | 2020-11-07T22:50:10 | 2020-11-07T22:50:10 | 254,091,758 | 0 | 0 | null | 2022-11-22T07:02:53 | 2020-04-08T13:13:12 | Python | UTF-8 | Python | false | false | 726 | py | from django.urls import path
from . import views
from accounts import views as user_views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('patients/signup/', views.patients_registration_form, name='patients-signup'),
path('practitioners/signup/', views.practitioners_registration_form, name='practitioners-signup'),
path('profile/', user_views.profile, name='profile'),
]
# Saves static files in static folder
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# Saves media files in media folder
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"chimuanyaibecheozor@gmail.com"
] | chimuanyaibecheozor@gmail.com |
be01e83be789080320012167b97227cca786a513 | 2488718297e47d726471f5aafb76369390912d5a | /Medicine predictor for The disease.py | 10ca4508a428e2c16874fae6221bb2084fe751b6 | [] | no_license | HarshaVardhanReddy18/HOUSE-MD | 9c9f887cdea34cf285e546ee01c9e60539bc2e6c | 659340a672ec9a89040ec614b8204b58c732339c | refs/heads/master | 2022-12-19T18:40:59.666603 | 2020-10-09T17:22:02 | 2020-10-09T17:22:02 | 296,778,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,423 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 19 12:26:24 2020
@author: shaur
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
os.chdir("D:\data sets")
dataset = pd.read_csv('web_sacrapped.csv', delimiter = ',', quoting = 3)
a=dataset.iloc[:,:2]
x=a.iloc[:,:1]
y=a.iloc[:,1:2]
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
corpus = []
for i in range(0, 213):
review = re.sub('[^a-zA-Z]', ' ', x['a'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
all_stopwords = stopwords.words('english')
review = ' '.join(review)
corpus.append(review)
corpus1 = []
for i in range(0, 213):
review1 = re.sub('[^a-zA-Z]', ' ', y['b'][i])
review1 = review1.lower()
review1 = review1.split()
ps = PorterStemmer()
review1 = ' '.join(review1)
corpus1.append(review1)
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features = 1500)
X = cv.fit_transform(corpus).toarray()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
| [
"noreply@github.com"
] | noreply@github.com |
739b69b48836c90b0897da6f89d31fa83298ca3a | ca683bdfd7e3a02056965f8f5452daa232455fe2 | /users/admin.py | e38ab5c53af47e20d9c8940e891f0af9da8c269a | [] | no_license | GuillermoRuizDev/DJango_RedSocial | 63e890b6ceae20ef78b382ab80850f94902ee67f | 19e77b3a2c924fec516426565843c87a6c3dc638 | refs/heads/main | 2023-06-27T00:21:13.059760 | 2021-08-01T12:17:46 | 2021-08-01T12:17:46 | 378,261,983 | 0 | 0 | null | 2021-07-26T09:05:02 | 2021-06-18T20:49:17 | Python | UTF-8 | Python | false | false | 1,964 | py | """ Admin Users classes. """
#Django
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
#Models
from django.contrib.auth.models import User
from users.models import Profile
from posts.models import Post
# Register your models here.
#admin.site.register(Profile)
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
""" Profile admin. """
list_display = ('pk','user', 'phone_number', 'website', 'picture')
list_display_links = ('pk', 'user')
list_editable = ('phone_number','website','picture')
search_fields = (
'user__username',
'user__email',
'user__first_name',
'user__last_name',
'phone_number',
'website'
)
list_filter = (
'created',
'modified',
'user__is_active',
'user__is_staff'
)
fieldsets = (
('Profile',{
'fields': (
('user','picture'),
),
}),
('Extra info',{
'fields': (
('website', 'phone_number'),
('biography')
)
}),
('Metadata',{
'fields': (
('created','modified'),
)
})
)
readonly_fields = ('created','modified')
class ProfileInLine(admin.StackedInline):
""" Profile in-line admin for users. """
model = Profile
can_delete = False
verbose_name_plural = 'profiles'
class PostAdmin(admin.ModelAdmin):
list_display = ('user', 'title', 'photo', )
class UserAdmin(BaseUserAdmin):
""" Add profile admin to base user admin. """
inlines = (ProfileInLine,)
list_display = (
'username',
'email',
'first_name',
'last_name',
'is_active',
'is_staff'
)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Post, PostAdmin) | [
"gui.ruiz.alvarado@gmail.com"
] | gui.ruiz.alvarado@gmail.com |
9646ac4cc55d9a5e30e41d7546f3ca1df7b888f9 | f0d9ba8456cdad2b2fa711fa8975b41da7af1784 | /worms/tests/__init__.py | 2b9503765bab2d60bb03f655ddf70c5209239ab5 | [
"Apache-2.0"
] | permissive | willsheffler/worms | f1d893d4f06b421abdd4d1e526b43c2e132e19a2 | 27993e33a43474d647ecd8277b210d4206858f0b | refs/heads/master | 2023-04-08T01:18:33.656774 | 2022-06-09T20:04:55 | 2022-06-09T20:04:55 | 118,678,808 | 6 | 5 | NOASSERTION | 2021-10-05T22:28:24 | 2018-01-23T22:30:45 | Python | UTF-8 | Python | false | false | 670 | py | # -*- coding: utf-8 -*-
"""Unit test package for worms."""
import os
import pytest
try:
import pyrosetta
HAVE_PYROSETTA = True
only_if_pyrosetta = lambda x: x
try:
import pyrosetta.distributed
HAVE_PYROSETTA_DISTRIBUTED = True
only_if_pyrosetta_distributed = lambda x: x
except ImportError:
HAVE_PYROSETTA_DISTRIBUTED = False
only_if_pyrosetta_distributed = pytest.mark.skip
except ImportError:
HAVE_PYROSETTA = HAVE_PYROSETTA_DISTRIBUTED = False
only_if_pyrosetta = only_if_pyrosetta_distributed = pytest.mark.skip
only_if_jit = lambda x: x
if "NUMBA_DISABLE_JIT" in os.environ:
only_if_jit = pytest.mark.skip
| [
"willsheffler@gmail.com"
] | willsheffler@gmail.com |
b48acca645c875be946e114f798a9563f27d31d1 | e56e7d398376e575d0b42fa36070f071277fae75 | /blog/models.py | 547c946c1179a5983e2a2428333286dd1a097a47 | [] | no_license | AdebambiComfort/My-Blog | 00ef8cce812a941c58483b1f9d01afe9b4067f72 | bd504390c983eed2ce7f308e9e00b6bcc676bf5c | refs/heads/master | 2020-07-01T04:27:36.641489 | 2019-08-07T13:14:32 | 2019-08-07T13:14:32 | 201,048,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
| [
"olatundecomfort94@gmail.com"
] | olatundecomfort94@gmail.com |
45f343096530fa01c5f2708f14403031fa6baa1f | 5332fef91e044555e605bb37cbef7c4afeaaadb0 | /hy-data-analysis-with-python-2020/part06-e07_binding_sites/src/binding_sites.py | 6baad43f425d059dd9d258e457e1d88a1b708b0e | [] | no_license | nopomi/hy-data-analysis-python-2019 | f3baa96bbe9b6ee7f0b3e6f6b8b0f3adfc3b6cc8 | 464685cb377cfdeee890a008fbfbd9ed6e3bcfd0 | refs/heads/master | 2021-07-10T16:16:56.592448 | 2020-08-16T18:27:38 | 2020-08-16T18:27:38 | 185,044,621 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,379 | py | #!/usr/bin/env python3
import pandas as pd
import numpy as np
import scipy
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import accuracy_score
from sklearn.metrics import pairwise_distances
from matplotlib import pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
import scipy.spatial as sp
import scipy.cluster.hierarchy as hc
def find_permutation(n_clusters, real_labels, labels):
permutation=[]
for i in range(n_clusters):
idx = labels == i
# Choose the most common label among data points in the cluster
new_label=scipy.stats.mode(real_labels[idx])[0][0]
permutation.append(new_label)
return permutation
def toint(x):
return 'ACGT'.find(x)
def get_features_and_labels(filename):
df = pd.read_csv(filename, sep="\t")
X = [[toint(c) for c in s] for s in df["X"]]
return (np.array(X), np.array(df["y"]))
def plot(distances, method='average', affinity='euclidean'):
mylinkage = hc.linkage(sp.distance.squareform(distances), method=method)
g=sns.clustermap(distances, row_linkage=mylinkage, col_linkage=mylinkage )
g.fig.suptitle(f"Hierarchical clustering using {method} linkage and {affinity} affinity")
plt.show()
def cluster_euclidean(filename):
X, y = get_features_and_labels(filename)
model = AgglomerativeClustering(linkage="average", affinity="euclidean")
model.fit(X)
permutation = find_permutation(2, y, model.labels_)
new_labels = [permutation[label] for label in model.labels_]
score = accuracy_score(y, new_labels)
distances=pairwise_distances(X, metric="euclidean")
#plot(distances)
return score
def cluster_hamming(filename):
X, y = get_features_and_labels(filename)
distances = pairwise_distances(X, metric="hamming")
model = AgglomerativeClustering(affinity="precomputed", linkage="average")
model.fit_predict(distances)
permutation = find_permutation(2, y, model.labels_)
new_labels = [permutation[label] for label in model.labels_]
score = accuracy_score(y, new_labels)
#plot(distances, method="average", affinity="hamming")
return score
def main():
print("Accuracy score with Euclidean affinity is", cluster_euclidean("src/data.seq"))
print("Accuracy score with Hamming affinity is", cluster_hamming("src/data.seq"))
if __name__ == "__main__":
main()
| [
"miska.noponen@gmail.com"
] | miska.noponen@gmail.com |
b9e1f9a4e83ce496a09c5efd5b204b28738b2214 | 4ab9b679881e80b1e277d4d08840e5e62cc91c5a | /learngh/settings/base.py | cd75583de0fc19fa427def4322f4fe2022f693e3 | [] | no_license | agbekofrank/learnghb | 8987fd4c543a85752c03f473124e8ca82b959545 | a43560a18d807aae3d3f507b4f37f07e6e889852 | refs/heads/main | 2023-07-22T19:06:07.654386 | 2021-08-28T01:39:15 | 2021-08-28T01:39:15 | 400,674,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,148 | py |
import os
import dotenv
from datetime import timedelta
# import django_heroku
# import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
dotenv_file = os.path.join(BASE_DIR, ".env")
if os.path.isfile(dotenv_file):
dotenv.load_dotenv(dotenv_file)
# SECRET_KEY = '%p8#dko(q2l2d+9-k(f)6w-1p$(*3*y2v#+^ebjxka@og*oocd'
SECRET_KEY = os.environ['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
print('Using base')
# ALLOWED_HOSTS = ['localhost', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
# third party
'rest_framework',
'rest_framework.authtoken',
'crispy_forms',
'corsheaders',
# All auth
'allauth',
'allauth.account',
'rest_auth',
'rest_auth.registration',
# local apps
'accounts',
'posts',
'course_content',
'questions',
'solutions',
'lessons',
'heroes',
'file_upload'
]
# allauth
SITE_ID = 1
ACCOUNT_EMAIL_VERIFICATION = 'none' # change on production
# JWT settings
REST_USE_JWT = True
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'learngh.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
WSGI_APPLICATION = 'learngh.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'learngh',
'USER': 'agbeko',
'PASSWORD': os.environ['PASSWORD'],
'HOST': '*',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
# static files source for the project during development
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
# static files source for the project during production
# STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static_cdn', 'media_root')
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR),
"static_cdn", "static_root")
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# where uploaded files would be kept
MEDIA_URL = '/media/'
# MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'media_cdn', 'media_root')
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR),
"static_cdn", "media_root")
CRISPY_TEMPLATE_PACK = 'uni_form'
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': [
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
],
'DEFAULT_PARSER_CLASSES': [
'rest_framework.parsers.JSONParser',
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication'
],
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
],
}
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': timedelta(days=30),
'JWT_REFRESH_EXPIRATION_DELTA': timedelta(days=30),
'JWT_ALLOW_REFRESH': True,
# 'JWT_ALGORITHM': 'RS256',
'JWT_AUTH_HEADER_PREFIX': 'JWT',
}
CORS_ORIGIN_WHITELIST = [
'http://localhost:4200'
]
ACCESS_CONTROL_ALLOW_ORIGIN = [
'http://localhost:4200'
]
ACCESS_CONTROL_ALLOW_CREDENTIAL = True
CORS_ORIGIN_ALLOW_ALL = True
# DATABASES = {}
# DATABASES['default'] = dj_database_url.config(conn_max_age=600)
# DATABASES['default'] = dj_database_url.config(default='postgres://...')
# django_heroku.settings(locals())
# del DATABASES['default']['OPTIONS']['sslmode']
| [
"frank.agbeko@amalitech.org"
] | frank.agbeko@amalitech.org |
419c0aaf771b74098121ba21ec364f0ae708f144 | a672f92dba39ce7cab2bf6ce3276ee06ff20b3be | /classification/util.py | b8cb62de26385e36f144c3a59e5d6378b687ea07 | [
"MIT"
] | permissive | BobbyZhouZijian/AI-Algo-Implmentations | 6af10a23276492d735686aeacdfa29257d406295 | 5592d3c358cc1611a1bde61797b93c0d6eee10c6 | refs/heads/main | 2023-08-05T06:37:04.027003 | 2021-09-30T05:30:31 | 2021-09-30T05:30:31 | 370,375,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,164 | py | import pandas as pd
import numpy as np
import math
def get_input_label_split(train_data, label_name=None):
if label_name == None:
train = train_data.iloc[:,:].to_numpy()
return train
y = train_data[label_name].to_numpy()
train = train_data.drop(columns=[label_name])
train = train.iloc[:,:].to_numpy()
return train, y
def get_accuracy(pred, y, thres=0.5):
if len(pred) != len(y):
raise Exception(f"size of pred is inconsistent with y. Expected pred \
to have size {len(y)} but got {len(pred)}")
total = len(pred)
acc_cnt = 0
for i in range(total):
cur_pred = 1 if pred[i] > thres else 0
if cur_pred == y[i]:
acc_cnt += 1
return acc_cnt / total
def get_precision(pred, y, thres=0.5):
if len(pred) != len(y):
raise Exception(f"size of pred is inconsistent with y. Expected pred \
to have size {len(y)} but got {len(pred)}")
total = 0
acc_cnt = 0
for i in range(len(pred)):
if y[i] == 0:
continue
total += 1
cur_pred = 1 if pred[i] > thres else 0
if cur_pred == y[i]:
acc_cnt += 1
return acc_cnt / total
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def calculate_entropy(df):
instances = df.shape[0]
columns = df.shape[1]
decisions = df['Decision'].value_counts().keys().tolist()
entropy = 0
for i in range(0, len(dicisions)):
decision = decisions[i]
num_of_decisons = df['Decision'].value_counts().tolist()[i]
class_probability = num_of_decisions / instances
entropy = entropy - class_probability * math.log(class_probability, 2)
return entropy
def discretize(df_col):
'''
Discretize a column if it contains more than 10 distinct values
Returns:
None if the column needs to be discretized
the discretized column in a numpy array
'''
distinct = np.unique(df_col.to_numpy())
if len(distinct) < 7:
# if number of distinct elements is less than 7
# do nothing and return false
return None
else:
# get the mean, std, min and max of the df column
mean = df_col.mean()
std = df_col.std()
minm = df_col.min()
maxm = df_col.max()
# sort values into 7 buckets
scaler = [-3, -2, -1, 0, 1, 2, 3]
values = []
for i, scale in enumerate(scaler):
if i == 0:
values.append((float('-inf'), scale * std + mean))
if i == len(scaler)-1:
values.append((scale * std + mean, float('inf')))
else:
next_scale = scaler[i+1]
values.append((scale * std + mean, next_scale * std + mean))
# assign the values to the discretized intervals
to_replace = np.zeros(len(df_col), dtype=tuple)
for i in range(len(df_col)):
cur_val = df_col.iloc[i]
for v in values:
if cur_val >= v[0] and cur_val <= v[1]:
to_replace[i] = v
break
return to_replace
| [
"zephyroszhou@gmail.com"
] | zephyroszhou@gmail.com |
100bda6658216fe7f659d6bfc212e9f3abf66c93 | 40b407afc90402e8374f44a504c286c962f72f76 | /run.py | 763bbea9b9844c06c4c16006597f6ee15e168b7d | [] | no_license | ThatTechGuy/RF-DCA | bce60ca5de196fa4408ff229cefb01b1ec06746b | ccfd25f6e51515ed47127f95c3dc7ac61f5e1c4c | refs/heads/master | 2021-01-17T20:02:40.371378 | 2016-05-07T14:59:02 | 2016-05-07T14:59:02 | 45,054,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | from dca import app
if __name__ == '__main__':
app.run()
| [
"rmartin@fullsail.edu"
] | rmartin@fullsail.edu |
e25fd776db4cf8dfcdb7f6e854d3db92deb6dbc6 | 00da73f35308b860ef9a3c6eb6cdaf8c89608f57 | /deps/requests/adapters.py | cdaabdbee6f16c829f051891b4fe6ff7b718df96 | [
"MIT"
] | permissive | kylebebak/Requester | 32abf8a56ba0e9e42fdd25b13ce48d40a87f20e0 | 7f177bc417c45fd1792c6020543a4c6909e3ea21 | refs/heads/master | 2022-07-17T11:09:30.238568 | 2022-05-05T17:31:48 | 2022-05-05T17:38:56 | 89,746,594 | 333 | 16 | MIT | 2021-02-23T14:43:12 | 2017-04-28T21:37:08 | Python | UTF-8 | Python | false | false | 20,880 | py | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import os.path
import socket
from urllib3.poolmanager import PoolManager, proxy_from_url
from urllib3.response import HTTPResponse
from urllib3.util import Timeout as TimeoutSauce
from urllib3.util.retry import Retry
from urllib3.exceptions import ClosedPoolError
from urllib3.exceptions import ConnectTimeoutError
from urllib3.exceptions import HTTPError as _HTTPError
from urllib3.exceptions import MaxRetryError
from urllib3.exceptions import NewConnectionError
from urllib3.exceptions import ProxyError as _ProxyError
from urllib3.exceptions import ProtocolError
from urllib3.exceptions import ReadTimeoutError
from urllib3.exceptions import SSLError as _SSLError
from urllib3.exceptions import ResponseError
from .models import Response
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths,
get_encoding_from_headers, prepend_scheme_if_needed,
get_auth_from_url, urldefragauth, select_proxy)
from .structures import CaseInsensitiveDict
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError, InvalidSchema)
from .auth import _basic_auth_str
try:
from urllib3.contrib.socks import SOCKSProxyManager
except ImportError:
def SOCKSProxyManager(*args, **kwargs):
raise InvalidSchema("Missing dependencies for SOCKS support.")
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
DEFAULT_POOL_TIMEOUT = None
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self, request, stream=False, timeout=None, verify=True,
cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
raise NotImplementedError
def close(self):
"""Cleans up adapter specific items."""
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
:rtype: urllib3.ProxyManager
"""
if proxy in self.proxy_manager:
manager = self.proxy_manager[proxy]
elif proxy.lower().startswith('socks'):
username, password = get_auth_from_url(proxy)
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
proxy,
username=username,
password=password,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs
)
else:
proxy_headers = self.proxy_headers(proxy)
manager = self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return manager
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)
if not cert_loc or not os.path.exists(cert_loc):
raise IOError("Could not find a suitable TLS CA certificate bundle, "
"invalid path: {0}".format(cert_loc))
conn.cert_reqs = 'CERT_REQUIRED'
if not os.path.isdir(cert_loc):
conn.ca_certs = cert_loc
else:
conn.ca_cert_dir = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
conn.ca_cert_dir = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
conn.key_file = None
if conn.cert_file and not os.path.exists(conn.cert_file):
raise IOError("Could not find the TLS certificate file, "
"invalid path: {0}".format(conn.cert_file))
if conn.key_file and not os.path.exists(conn.key_file):
raise IOError("Could not find the TLS key file, "
"invalid path: {0}".format(conn.key_file))
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
:rtype: requests.Response
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this closes the PoolManager and any active ProxyManager,
which closes any pooled connections.
"""
self.poolmanager.clear()
for proxy in self.proxy_manager.values():
proxy.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
:rtype: str
"""
proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
is_proxied_http_request = (proxy and scheme != 'https')
using_socks_proxy = False
if proxy:
proxy_scheme = urlparse(proxy).scheme.lower()
using_socks_proxy = proxy_scheme.startswith('socks')
url = request.path_url
if is_proxied_http_request and not using_socks_proxy:
url = urldefragauth(request.url)
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:rtype: dict
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple or urllib3 Timeout object
:param verify: (optional) Either a boolean, in which case it controls whether
we verify the server's TLS certificate, or a string, in which case it
must be a path to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
:rtype: requests.Response
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
elif isinstance(timeout, TimeoutSauce):
pass
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
# Receive the response from the server
try:
# For Python 2.7+ versions, use buffering of HTTP
# responses
r = low_conn.getresponse(buffering=True)
except TypeError:
# For compatibility with Python 2.6 versions and back
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
# TODO: Remove this in 3.0.0: see #2811
if not isinstance(e.reason, NewConnectionError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
if isinstance(e.reason, _ProxyError):
raise ProxyError(e, request=request)
if isinstance(e.reason, _SSLError):
# This branch is for urllib3 v1.22 and later.
raise SSLError(e, request=request)
raise ConnectionError(e, request=request)
except ClosedPoolError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
# This branch is for urllib3 versions earlier than v1.22
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| [
"kylebebak@gmail.com"
] | kylebebak@gmail.com |
c694bd620048754305caaa671e59cd8415f16dab | 077e1d088b31e8858e6f2d7f855e18d9a7a0ac09 | /creditManage/views.py | 753f8c72207ffeb123a0ec7552b56cb4b619ded3 | [] | no_license | sagatachatterjee/Credit-Management | 9382a16cf4c10269a2d5f882d880c2647f40b5a4 | 6a7b57222f2e2f3b9891ce418cb325d420d64533 | refs/heads/master | 2020-05-03T11:41:21.998242 | 2019-03-21T09:37:53 | 2019-03-21T09:37:53 | 178,606,533 | 1 | 0 | null | 2019-03-30T20:11:25 | 2019-03-30T20:11:25 | null | UTF-8 | Python | false | false | 2,100 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.models import User
from .models import user_detail,transaction
from django.views.decorators.csrf import csrf_exempt
# Create your views here.
ids=0
def index(request):
return render(request,'creditManage/index.html',{})
def view_usr(request):
all_entries=user_detail.objects.all()
detail_send={"data":all_entries}
return render(request,'creditManage/view_usr.html',detail_send)
def view_trans(request):
all_entries=transaction.objects.all()
detail_send={"data":all_entries}
return render(request,'creditManage/view_trans.html',detail_send)
def transfer(request):
all_entries=user_detail.objects.all()
detail_send={"data":all_entries}
return render(request,'creditManage/transfer.html',detail_send)
def transfer_to(request):
all_entries=user_detail.objects.all()
detail_send={"data":all_entries}
return render(request,'creditManage/transfer_to.html',detail_send)
@csrf_exempt
def detail1(request):
print(" hello")
t_from=""
t_to=""
if(request.POST):
print(" hello1")
print(request.POST)
c=request.POST.get('id')
c1=request.POST.get('credit')
print(type(c1))
all_entries=user_detail.objects.all()
for i in all_entries:
print("hola")
if(i.id==ids):
if(i.credit>=int(c1)):
print (i.credit)
user_detail.objects.filter(id=i.id).update(credit=i.credit-int(c1))
t_from=i.name
else:
return HttpResponse('error')
if(i.id==int(c)):
print("hh")
user_detail.objects.filter(id=i.id).update(credit=i.credit+int(c1))
t_to=i.name
uu=transaction.objects.create(trac_from=t_from,trac_to=t_to,credit=int(c1))
return HttpResponse('success')
@csrf_exempt
def detail(request):
global ids
print(" hello")
if(request.POST):
print(" hello1")
print(request.POST)
c=request.POST.get('id')
all_entries=user_detail.objects.all()
for i in all_entries:
print("hola")
if(i.id==int(c)):
ids=i.id
print("hh")
return HttpResponse('success')
| [
"noreply@github.com"
] | noreply@github.com |
440528b10e009c56ce24ed669064d788fd80bd40 | 5e6a21328057f91d489319533e1927b8107b9e0c | /Tests/test_mixture.py | 92eabbb87afe89edf29f0e699e6b252eaf5bcab2 | [
"BSD-3-Clause"
] | permissive | murrayrm/BioCRNPyler | d84437326742a04ac508a7e068c19a8c8816d7d7 | 2e7d4c521b1ebdf7cff6867b25cbee014e0ee1a3 | refs/heads/master | 2020-07-05T10:12:56.382470 | 2020-04-23T20:03:01 | 2020-04-23T20:03:01 | 202,620,151 | 1 | 0 | BSD-3-Clause | 2019-08-15T22:35:16 | 2019-08-15T22:35:16 | null | UTF-8 | Python | false | false | 3,540 | py | # Copyright (c) 2019, Build-A-Cell. All rights reserved.
# See LICENSE file in the project root directory for details.
from unittest import TestCase
class TestMixture(TestCase):
def test_add_species(self):
from biocrnpyler import Mixture
from biocrnpyler import Species
species = Species('test_species')
mixture = Mixture()
mixture.add_species(species)
self.assertEqual([species], mixture.added_species)
with self.assertRaises(AssertionError):
mixture.add_species(['ok', 'ok'])
def test_add_components(self):
from biocrnpyler import Mixture
from biocrnpyler import Component
from biocrnpyler import Species
mixture = Mixture()
self.assertTrue(len(mixture.components) == 0)
component = Component('test_comp')
mixture.add_components(component)
self.assertTrue(component in mixture.components)
species = Species('test_species')
with self.assertRaises(AssertionError):
mixture.add_components(species)
def test_update_species(self):
from biocrnpyler import Mixture
from biocrnpyler import Species
from biocrnpyler import DNA
# from biocrnpyler import Dilution
species = Species(name='H2O')
mixture = Mixture(species=[species])
self.assertTrue(species in mixture.update_species())
dna = DNA(name='test_DNA')
mixture.add_components(dna)
crn_list = mixture.update_species()
for s_dna in dna.update_species():
self.assertTrue(s_dna in crn_list)
# Currently, there is no global mechanism that creates new species
# dilution_mechanism = Dilution()
# global_mechanisms = {"dilution": dilution_mechanism}
#
# mixture = Mixture(global_mechanisms=global_mechanisms)
# mixture.update_species()
def test_update_reactions(self):
from biocrnpyler import Mixture
from biocrnpyler import Reaction
from biocrnpyler import Component
mixture = Mixture()
with self.assertRaises(AttributeError):
mixture.update_reactions()
component = Component(name='test_component')
def mock_update_reactions():
rxn = Reaction(inputs=[], outputs=[], k=0.1)
return [rxn]
component.update_reactions = mock_update_reactions
mixture.add_components(component)
mixture.update_species()
crn_rxn = mixture.update_reactions()
crn_rxn_mock = mock_update_reactions()
self.assertEqual(crn_rxn, crn_rxn_mock)
# TODO add test for reactions added by global mechanisms
def test_compile_crn(self):
from biocrnpyler import ChemicalReactionNetwork
from biocrnpyler import Species
from biocrnpyler import Reaction
from biocrnpyler import Mixture
a = Species(name='a')
b = Species(name='b')
species_list = [a, b]
def mock_update_reactions():
rxn = Reaction(inputs=[a], outputs=[b], k=0.1)
return [rxn]
rxn = Reaction(inputs=[a], outputs=[b], k=0.1)
CRN = ChemicalReactionNetwork(species_list, [rxn])
mixture = Mixture(species=species_list)
mixture.update_reactions = mock_update_reactions
crn_from_mixture = mixture.compile_crn()
self.assertEqual(CRN.species, crn_from_mixture.species)
self.assertEqual(CRN.reactions, crn_from_mixture.reactions)
| [
"zoltuz@gmail.com"
] | zoltuz@gmail.com |
705b762aac98d0ee84e4cbc313bdf63e847cf8f6 | 79b2b24205f17ade5b41f3df5bd0869b87b3fa1e | /xfrmer.py | 659d75702950f2f0104abd94f46b802ffa978ae5 | [] | no_license | metrologist/Current-Transformer-Scale | 3539d129c31934053e286c621cf71748a5d00ee4 | 004987f058796de19cbc2efbed8ae305c1b74f5b | refs/heads/master | 2020-05-07T21:45:48.044136 | 2019-04-12T02:49:19 | 2019-04-12T02:49:19 | 180,917,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,867 | py | from __future__ import division
from __future__ import print_function
class TRANSFORMER(object):
"""
The TRANSFORMER class captures the essential structure of a transformer. It is specifically constructed for use
with MSL's two-stage primary reference current transformers. These transformers have a main secondary and a second
core auxiliary secondary. Primary windings are around both cores layered in groups with each group having a series
or parallel connection. It should also be useable for a single core transformer with multiple secondary taps and
fixed or window wound primary windings.
There will likely be some experimentation with lists and dictionaries as to how best to describe a transfsormer.
It should be possible to include calibration constants.
"""
def __init__(self, primaries, secondaries, cores, type):
"""
:param primaries: a list of sets of primary windings
:param sedondaries: a list of sets of secondary windings
:param cores: a list of cores (either 1 or 2 cores)
:param type: either current or voltage
"""
self.primaries = primaries
self.secondaries = secondaries
self.cores = cores
assert type in ['voltage' , 'current'], "transformer type must be voltage or current"
self.type = type
def nominal_ratio(self, primary, secondary):
if self.type == 'voltage':
rat = primary/secondary
elif self.type == 'current':
rat = secondary/primary
return rat
def series(self, primary):
return sum(primary)
def parallel(self, primary):
for i in range(1, len(primary)):
assert primary[i] == primary[0], 'parallel windings must all have identical number of turns'
return primary[0] | [
"noreply@github.com"
] | noreply@github.com |
f81d9c26faead21f3a35dec21c9bbdcb0bf3d125 | 7d887c7faca559007eb2000cc5663267db70407d | /Intern Project/Importing Data AMFI/Importing data from API MF.py | 9c9947a7901fd7553663c063a12c776f592ab305 | [] | no_license | yuanmimi/Intern-V2 | b67175850273fe51036656cf33612a6e7b461ab3 | a167003121649f9f5bfd8fb5a3d6143ed70ac27c | refs/heads/master | 2020-07-03T10:47:08.886698 | 2018-05-03T13:01:18 | 2018-05-03T13:01:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 19 11:47:20 2018
@author: ashwin.monpur
"""
import requests
import pandas as pd
#from pymongo import MongoClient
from bs4 import BeautifulSoup
conn = MongoClient()['mf']
from sqlalchemy import create_engine
engine = create_engine("mysql://neel:pass@123@localhost/mf")
dt_range = pd.date_range(start='04-01-2006', end=pd.datetime.today()).tolist()
while dt_range:
dt = dt_range.pop(0).strftime('%d-%b-%Y')
url_tmplt = 'http://portal.amfiindia.com/DownloadNAVHistoryReport_Po.aspx' \
'?frmdt={0}&todt={1}'
txt = requests.get(url_tmplt.format(dt, dt)).text
dat = [i.strip() for i in txt.split('\n') if ';' in i]
dat = [i.split(';') for i in dat]
df = pd.DataFrame(dat)
df.columns = [i.replace(' ', '_') for i in df.iloc[0]]
df = df.drop(0)
df.Date = pd.to_datetime(df.Date, format='%d-%b-%Y')
conn.insert_many(df.to_dict(orient='record'))
df.to_sql('daily_data_'+dt.split('-')[-1], engine, if_exists='append')
print(dt)
txt = requests.get('http://fundpicker.thefundoo.com/FundCard/1916/Tata-Equity-PE--G-').text
soup = BeautifulSoup(txt,'lxml')
soup.find_all('tbody',{'id':'tbody_consist'})
tbody_consist | [
"noreply@github.com"
] | noreply@github.com |
d850a1c802074e8537dcabef5abf6157fab5c3a0 | db5e84b22b184242b406bb689b84dba4813b179d | /package_example/example_package/__init__.py | ea725f6c05a849f2cdf0a9597195459448fe1733 | [] | no_license | ErikBjare/python-examples | 46b23629d380efe94ae965256083060f616031c1 | 0f030392bfdf17b93dd14844cb5c35c09f6e8840 | refs/heads/master | 2021-01-16T18:19:14.676416 | 2015-05-08T11:28:40 | 2015-05-08T11:28:40 | 35,238,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | # If you want to import all variables in the functions.py file directly into the example_package namespace, use this
from .functions import *
# If you want to import the functions.py file into it's own sub-namespace (example_package.functions), use this
from . import functions
| [
"erik.bjareholt@gmail.com"
] | erik.bjareholt@gmail.com |
a270947c1b4f962a0d9e5be8ec990bbefd2b4a32 | 3a39ddc4a8600ffc5110453867370c1d8e2da121 | /x11-libs/libXcomposite/libXcomposite-0.4.3.py | 8ce4b041dc0124e9f86b8c9c3514052f3dd809a7 | [] | no_license | seqizz/hadron64 | f2276133786c62f490bdc0cbb6801491c788520f | ca6ef5df3972b925f38e3666ccdc20f2d0bfe87e | refs/heads/master | 2021-01-18T04:53:09.597388 | 2013-02-25T21:25:32 | 2013-02-25T21:25:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | metadata = """
summary @ X11 Composite extension library
homepage @ http://xorg.freedesktop.org/
license @ MIT
src_url @ http://xorg.freedesktop.org/releases/individual/lib/libXcomposite-$version.tar.bz2
arch @ ~x86
"""
depends = """
runtime @ x11-libs/libXfixes x11-proto/compositeproto
"""
#srcdir = "libXcomposite-%s" % version
def configure():
conf(
"--disable-static")
def install():
raw_install("DESTDIR=%s" % install_dir)
insdoc("COPYING")
| [
"bburaksezer@gmail.com"
] | bburaksezer@gmail.com |
661cac8acf0eadfcb8a1d63605e97bdbdb2e9740 | 2652fd6261631794535589427a384693365a585e | /trunk/workspace/Squish/src/TestScript/UI/suite_UI_62/tst_UI_62_Cellular_design/test.py | 4b116d08c137cfe84f4e37aea4edc7de3cf116e4 | [] | no_license | ptqatester1/ptqa | 88c652380167f64a953bfd7a65041e7d8ac48c90 | 5b5997ea459e9aac17db8da2041e2af331927104 | refs/heads/master | 2021-01-21T19:06:49.275364 | 2017-06-19T03:15:00 | 2017-06-19T03:15:00 | 92,115,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,323 | py | ######################
#Author: Alex Leung ##
######################
from API.Utility import UtilConst
from API.Utility.Util import Util
from API.ComponentBox import ComponentBoxConst
from API.Device.EndDevice.PC.PC import PC
from API.Device.CellTower.CellTower import CellTower
from API.Device.COServer.COServer import COServer
from API.Toolbar.GoldenPhysicalToolbar.GoldenPhysicalToolbarConst import GoldenPhysicalToolbarConst
from API.Toolbar.GoldenPhysicalToolbar.GoldenPhysicalToolbar import GoldenPhysicalToolbar
from API.SimulationPanel.EventList.EventList import EventList
from API.SimulationPanel.PlayControls.PlayControls import PlayControls
from API.functions import check
from API.Workspace.Physical import Physical
from API.Device.DeviceBase.ServicesBase.ServicesBaseConst import ServicesConst
#function initialization
util = Util()
pda0 = PC(ComponentBoxConst.DeviceModel.PDA, 200, 100, "Pda0")
pda1 = PC(ComponentBoxConst.DeviceModel.PDA, 200, 200, "Pda1")
ct = CellTower(ComponentBoxConst.DeviceModel.CELL_TOWER, 100, 100, "Cell Tower0")
cos = COServer(ComponentBoxConst.DeviceModel.CO_SERVER, 100, 200, "Central OfficeServer0")
gpt = GoldenPhysicalToolbar()
gptc = GoldenPhysicalToolbarConst()
def main():
util.init()
maketop()
checksettings()
movephysical()
def maketop():
pda0.create()
pda1.create()
ct.create()
cos.create()
ct.connect(cos, ComponentBoxConst.Connection.CONN_COAXIAL, "Coaxial0", "Coaxial0/0")
util.speedUpConvergence()
def checksettings():
ct.select()
ct.clickConfigTab()
ct.close()
cos.select()
cos.clickConfigTab()
cos.config.selectInterface('Cell Tower')
cos.config.interface.cellTower.check.ip("172.16.1.1")
cos.config.interface.cellTower.check.subnet('255.255.255.0')
cos.config.interface.cellTower.check.ipv6("2001::1")
cos.config.interface.cellTower.check.subnetv6("64")
cos.config.interface.cellTower.check.linkLocal("FE80::[A-F\d]{1,4}:[A-F\d]{1,4}:[A-F\d]{1,4}:[A-F\d]{1,4}")
cos.clickServicesTab()
cos.services.selectInterface('DHCP')
cos.services.dhcp.check.ip("172.16.1.1")
cos.services.dhcp.check.subnet("255.255.255.0")
cos.services.dhcp.check.startIp1("172")
cos.services.dhcp.check.startIp2('16')
cos.services.dhcp.check.startIp3('1')
cos.services.dhcp.check.startIp4('100')
cos.services.dhcp.check.maxUsers('50')
cos.services.selectInterface('DHCPv6')
#cos.services.dhcpv6.on()
cos.services.dhcpv6.check.on(True)
test.compare(findObject(cos.squishName + ServicesConst.dhcpv6.PREFIX_TABLE).rowCount, 1)
test.compare(findObject(cos.squishName + ServicesConst.dhcpv6.LOCAL_TABLE).rowCount, 1)
cos.services.selectInterface("CELL TOWER")
test.compare(findObject(cos.squishName + ServicesConst.cellTower.CELL_TOWER_LIST).rowCount, 1)
cos.services.cellTower.refreshButton()
test.compare(findObject(cos.squishName + ServicesConst.cellTower.CELL_TOWER_LIST).rowCount, 1)
cos.services.cellTower.clickItem("0/0")
test.compare(findObject(cos.squishName + ServicesConst.cellTower.CELL_DEVICE_LIST).rowCount, 2)
cos.services.selectInterface("PAP/CHAP")
cos.close()
def movephysical():
util.clickOnPhysical()
gpt.clickButton(gptc.NAVIGATION)
gpt.clickItem(gptc.NAVIGATION_LIST, "Intercity_1.Home City.Corporate Office.Smartphone0")
gpt.clickButton(gptc.JUMP_TO_SELECTED_LOCATION)
# gpt.scrollTo(gptc.RACK_VIEW_V_SCROLL_BAR, 409)
# gpt.scrollTo(gptc.RACK_VIEW_V_SCROLL_BAR, 818)
gpt.clickButton(gptc.MOVE_OBJECT)
util.clickOnPhysicalWorkspace(172, 215)
#mouseClick(waitForObject(gptc.TABLE1_DEVICE1), 39, 848, 0, Qt.LeftButton)
#sendEvent("QMouseEvent", waitForObject(gptc.TABLE1_DEVICE1), QEvent.MouseButtonRelease, 38, 95, Qt.LeftButton, 0, 0)
activateItem(waitForObjectItem(gptc.MOVE_DROPDOWN, "Move to Intercity"))
snooze(5)
#gpt.clickButton(gptc.NAVIGATION)
gpt.clickItem(gptc.NAVIGATION_LIST, "Intercity_1")
gpt.clickButton(gptc.JUMP_TO_SELECTED_LOCATION)
smartphone = Physical().getObject('Smartphone0')
util.dragAndDrop(smartphone, 10, 10, UtilConst.PHYSICAL_WORKSPACE, 500, 300)
util.clickOnLogical()
pda0.select()
pda0.clickDesktopTab()
pda0.desktop.applications.commandPrompt()
pda0.desktop.commandPrompt.setText("ping 172.16.1.1")
util.fastForwardTime()
pda0.desktop.commandPrompt.textCheckPoint("Received = 0", 1)
#checkpoint phone outside range
#checkpoint phone not getting reception
pda0.close()
util.clickOnPhysical()
smartphone = Physical().getObject('Smartphone0')
util.dragAndDrop(smartphone, 10, 10, UtilConst.PHYSICAL_WORKSPACE, 200, 200)
util.clickOnLogical()
util.clickOnSimulation()
pda0.select()
pda0.clickTab('Desktop')
pda0.desktop.applications.commandPrompt()
pda0.desktop.commandPrompt.setText('ping 172.16.255.255')
PlayControls().captureForward(10)
foundEvent = []
foundEvent.append(EventList().findEventAt('Smartphone0', 'Cell Tower0', 'ICMP'))
foundEvent.append(EventList().findEventAt('Smartphone1', 'Cell Tower0', 'ICMP'))
foundEvent.append(EventList().findEventAt('Central Office Server0', 'Cell Tower0', 'ICMP'))
check(not False in foundEvent)
| [
"ptqatester1@gmail.com"
] | ptqatester1@gmail.com |
383fb7131e139dd7887a2437e60848af1d58580f | 70add60ba088146dd4984d232c6a7136f3f57765 | /hack/gopath_from_workspace.py | cd64f6b3532712864f3d8896b95453fbe8b4447d | [
"Apache-2.0"
] | permissive | ericchiang/cluster-registry | 1983a213ede3e23cd2ea3565912787ba706fbe49 | 3551dbcb0da06364fc8c0c1e9c3f1c9230d9f537 | refs/heads/master | 2021-03-30T17:12:13.950798 | 2017-10-20T20:06:49 | 2017-10-23T22:34:44 | 108,303,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,338 | py | #!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Creates a populated GOPATH from the repositories in a bazel workspace.
# Used to ensure that code generation scripts are running against the versions
# of external libraries in the workspace.
#
# Requires an empty temporary directory to be provided as its first argument:
# ./gopath_from_workspace.sh <tmpdir>
#
# This populates the provided directory as a GOPATH.
import os.path
import argparse
import shutil
import string
import subprocess
import xml.etree.ElementTree as ElementTree
def main(tmpdir):
subprocess.check_call(["bazel", "fetch", "//:genfiles_deps"])
bazel_external_dir = os.path.join(
string.strip(subprocess.check_output(["bazel", "info", "output_base"])),
"external")
workspace_dir = string.strip(
subprocess.check_output(["bazel", "info", "workspace"]))
query_result = subprocess.check_output([
"bazel", "query", "kind(go_repository, //external:*)", "--output", "xml"
])
xml = ElementTree.fromstring(query_result)
elements = xml.findall("./rule")
for e in elements:
name = e.find("./string[@name='name']").attrib["value"]
importpath_element = e.find("./string[@name='importpath']")
if importpath_element is not None:
import_path = importpath_element.attrib["value"]
srcdir = os.path.join(bazel_external_dir, name)
if os.path.exists(srcdir):
shutil.copytree(
srcdir, os.path.join(tmpdir, "src", import_path), symlinks=True)
shutil.copytree(
workspace_dir,
os.path.join(tmpdir, "src", "k8s.io", "cluster-registry"),
symlinks=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("tmpdir")
args = parser.parse_args()
main(string.strip(args.tmpdir))
| [
"dvorakviolinist@gmail.com"
] | dvorakviolinist@gmail.com |
8324ea9c3819e8342075531c35de89cf7e1ffaae | 41495ab6e6e646866d8fb26af504214384fa18f6 | /cloud_scheduler/filters/disk_filter.py | bbdb1c65b8c8256add7e8be0f0425efb5f8190a1 | [] | no_license | glfpes/cloud_scheduler | 8760c0c3be9e537ae2ce040f5fdffd18e8920c0a | 09d9a58a9c23f0794098381e4a9e38336fc69d90 | refs/heads/master | 2021-01-19T00:06:37.698895 | 2016-06-07T09:28:23 | 2016-06-07T09:28:23 | 54,368,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py |
from cloud_scheduler import filters
class DiskFilter(filters.BaseCloudFilter):
def cloud_passes(self, cloud_state, filter_properties):
disk_limit_gb = filter_properties['disk_gb']
return cloud_state['disk_gb'] >= disk_limit_gb
@staticmethod
def get_mark(self):
return 'disk_gb'
| [
"glfpes@gmail.com"
] | glfpes@gmail.com |
410f0bef7e07bb907331e8704631f93f9140cc98 | edfc985df440d4f5fee3ebbf6a52dfd70baa06e4 | /Funktiot/average.py | 859e2faeca5e0127ba8a1d6ee43c9385377a97a6 | [] | no_license | Sanbu94/Python-kurssi2021 | 9fe5e832b1268a89b2fc9efcaa7062ad307163d4 | e8d5deb7b03eb6f15b5f846d899f221bd5568efe | refs/heads/master | 2023-04-19T04:22:23.281939 | 2021-03-25T15:14:31 | 2021-03-25T15:14:31 | 331,665,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | numbers = [1,2,3,4,5]
sum = 0
for number in numbers:
sum -= number
average = sum / len(numbers)
print(average)
print("\n")
numbers = [9,8,7,6,5]
sum = 0
for number in numbers:
sum -= number
average = sum / len(numbers)
print(average)
#Funktio määritetään def-avainsanalla. Funktio input (parametrit) määritetään sulkeiden sisällä.
def average(numbers):
sum = 0
for number in numbers:
sum -= number
average = sum / len(numbers) | [
"Seppälä"
] | Seppälä |
c39dd68cbb8cce48de736b436b57f5a98d8f5348 | cdbf35888ab546e44cb07e48ca538f1c3184554b | /project00--part1/farid/model/model.py | 0dea252c19a2be5b9217276edc59bc6bb5f30f05 | [] | no_license | farid01683/AI | 85350b85940a2d0c456a3262c7b14d9dfef42f12 | d472d5b14282dea6690cb46af329f90a5cdf09fa | refs/heads/master | 2022-01-22T19:55:44.655532 | 2019-07-24T17:16:34 | 2019-07-24T17:16:34 | 198,030,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | from keras.models import Sequential
from keras.layers.core import Dense
def creat_mlp(dim,regress=False):
model = Sequential()
model.add(Dense(8,inpute_dim=dim,activation='relu'))
model.add(Dense(4,activation='relu'))
if regress:
model.add(Dense(1,activation='relu'))
return model | [
"noreply@github.com"
] | noreply@github.com |
3f61ca63f7dff361e526f2cf6d6398aa22d96de4 | 398815d5f6afc295a09badb4ea12f4911aaf39ba | /flasky/migrations/versions/f4efa5ae67c7_.py | 46d8ea1f5ec1fa3f06925c65221bb3646cf3138e | [] | no_license | mookrs/laboratory | 920cc92e116a26d632ec3e8d79438bdd80c4595c | 797191caaf4b15b8e87ec7bc103e25c7244c6b05 | refs/heads/master | 2021-01-17T01:57:10.123565 | 2018-10-02T09:55:51 | 2018-10-02T09:55:51 | 31,777,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | """empty message
Revision ID: f4efa5ae67c7
Revises: f4fb3aa6b327
Create Date: 2016-08-19 11:11:31.888282
"""
# revision identifiers, used by Alembic.
revision = 'f4efa5ae67c7'
down_revision = 'f4fb3aa6b327'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_posts_timestamp'), 'posts', ['timestamp'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_posts_timestamp'), table_name='posts')
op.drop_table('posts')
### end Alembic commands ###
| [
"mookrs@gmail.com"
] | mookrs@gmail.com |
59534247ee1449496330021da54fc527d05a14e3 | 34a043e6961639657e36e7ac9fd459ad5b1f6de1 | /openpathsampling/experimental/storage/test_mdtraj_json.py | f3c57c4ad31a103b69866649884b52ccf8542b6a | [
"MIT"
] | permissive | dwhswenson/openpathsampling | edaddc91e443e7ffc518e3a06c99fc920ad9d053 | 3d02df4ccdeb6d62030a28e371a6b4ea9aaee5fe | refs/heads/master | 2023-02-04T12:31:17.381582 | 2023-01-30T21:17:01 | 2023-01-30T21:17:01 | 23,991,437 | 3 | 1 | MIT | 2022-08-12T17:48:04 | 2014-09-13T10:15:43 | Python | UTF-8 | Python | false | false | 2,273 | py | from .mdtraj_json import *
import pytest
import numpy as np
import numpy.testing as npt
from ..simstore.custom_json import bytes_codec, numpy_codec, custom_json_factory
from ..simstore.test_custom_json import CustomJSONCodingTest
from openpathsampling.tests.test_helpers import data_filename
class MDTrajCodingTest(CustomJSONCodingTest):
def setup(self):
if not HAS_MDTRAJ:
pytest.skip()
self.filename = data_filename('ala_small_traj.pdb')
def test_default(self):
# custom for handling numpy
for (obj, dct) in zip(self.objs, self.dcts):
default = self.codec.default(obj)
numpy_attrs = [attr for attr, val in dct.items()
if isinstance(val, np.ndarray)]
other_attrs = [attr for attr, val in dct.items()
if not isinstance(val, np.ndarray)]
for attr in numpy_attrs:
npt.assert_array_equal(default[attr], dct[attr])
for attr in other_attrs:
assert default[attr] == dct[attr]
def test_round_trip(self):
codecs = [numpy_codec, bytes_codec] + mdtraj_codecs
encoder, decoder = custom_json_factory(codecs)
self._test_round_trip(encoder, decoder)
class TestTopologyCoding(MDTrajCodingTest):
def setup(self):
super(TestTopologyCoding, self).setup()
self.codec = top_codec
top = md.load(self.filename).topology
dataframe, bonds = top.to_dataframe()
self.objs = [top]
self.dcts = [{
'__class__': 'Topology',
'__module__': 'mdtraj.core.topology',
'atoms': dataframe.to_json(),
'bonds': bonds
}]
class TestTrajectoryCoding(MDTrajCodingTest):
def setup(self):
super(TestTrajectoryCoding, self).setup()
self.codec = traj_codec
traj = md.load(self.filename)
self.objs = [traj]
self.dcts = [{
'__class__': 'Trajectory',
'__module__': 'mdtraj.core.trajectory',
'xyz': traj.xyz,
'topology': traj.topology,
'time': traj.time,
'unitcell_lengths': traj.unitcell_lengths,
'unitcell_angles': traj.unitcell_angles
}]
| [
"dwhs@hyperblazer.net"
] | dwhs@hyperblazer.net |
f59837294f8f44c5babd41a112e886e751a61e97 | 31401549d7a342b3fcb0f276f20e18f130730c69 | /utils/loadweight.py | 05c9d7ff211cd6d9235020fb2c41f2ffb3f1af14 | [] | no_license | takeitea/Attention-Echino | e79f207010ad9c57b31d39ba8681d2cb0e59643f | e157c99e5784c8dc2470b0d3f3ffa61b7921ce09 | refs/heads/master | 2020-05-21T00:01:06.170506 | 2019-03-06T13:27:52 | 2019-03-06T13:27:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,897 | py | """
load part of the pre-trained parameters
"""
import os
import torch
import torch.utils.model_zoo as model_zoo
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
def loadcheckpoint(model, optimizer, args):
if args.resume:
if os.path.isfile(args):
print("load checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print(" loaded checkpoint '{}'({}) best_prec: {}".format(args.resume, checkpoint['epoch'], best_prec1))
else:
print("no checkpoint found at {}".format(args.resume))
def loadpartweight(model):
old_dict=model.state_dict()
new_dict=model_zoo.load_url(model_urls['vgg16_bn'])
count_feat=0
count_fetch=0
skip=0
for k,_ in new_dict.items():
if 'features' in k:
count_feat=count_feat+1
for i in range(count_feat):
for k in range(i,len(old_dict)):
if 'num_batches_tracked' in list(old_dict.keys())[k+skip]:
skip+=1
if new_dict[list(new_dict.keys())[i]].size()==old_dict[list(old_dict.keys())[k+skip]].size():
old_dict[list(old_dict.keys())[k+skip]]=list(new_dict.values())[i]
count_fetch+=1
break
old_dict.update()
model.load_state_dict(old_dict)
return model
| [
"945193029@qq.com"
] | 945193029@qq.com |
5c5a5c54b2a1b520926479a05fc3e52a64526556 | 76e6d039e5be0df7b100ee1f91587412b878279e | /Homeworks/HW2.py | a9092f80134194a8a48d3e9b3af6500da56bec9a | [] | no_license | vahaponur/GlobalAIHubPythonCourse | 6b5128807f81c6d0b4db99ff3a20e046552b57cb | f11b1187a5ec189b9656f03fac208b1926cd21c5 | refs/heads/main | 2023-03-02T05:24:01.409809 | 2021-02-08T15:31:43 | 2021-02-08T15:31:43 | 334,997,537 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | #GlobalAIHub Homework 2
#user name and password specified
#UserName and Password
user_name="vonuryil"
password="globalaihub46@"
#get from user
get_user_name=input("User Name: ")
get_password=input("password: ")
#check if it is true
if (user_name == get_user_name and password==get_password):
print("Access Granted")
else:
print("Access Denied")
user_info={"user_name":"vonuryil","password":"dumbpassword"}
get_d_username=input("Dictionary Username: ")
get_d_password=input("Dictionary Password: ")
if (user_info["user_name"] == get_d_username and user_info["password"]==get_d_password):
print("Access Granted")
else:
print("Access Denied")
| [
"noreply@github.com"
] | noreply@github.com |
d6ff585c2ffc693538bd8228c085b5e4a11c85cb | 7bdd1cbff549d403446b9a827b5e7436785885fe | /dashboard/views.py | d37476a0d37763dc0bca569357a46f954e2e2ed6 | [] | no_license | Vaishnavi109/MobileSensorCloud | 6594ee0de89cc7640f46927782e90a2954fb1db1 | 70b09c56b3b0fe48b95c604810fe346acdaf5d5c | refs/heads/master | 2021-01-17T23:58:51.120130 | 2016-04-13T06:59:53 | 2016-04-13T06:59:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | from django.shortcuts import render
def dashboard(request):
return render(request, 'dashboard.html', {})
| [
"rishirajdigambar.randive@sjsu.edu"
] | rishirajdigambar.randive@sjsu.edu |
fe1fc30924e2f2f7aaddfc9e65cfe82ff258f58f | b27f5bff09fab83d2a5970034cd2c2351a8346f2 | /outsource/migrations/0007_auto_20170726_1838.py | 37c8a92c36f8a3c4cf274b86c7bb7e7850228f05 | [] | no_license | cafemoa/takeit_server | 079b4561c1c970a6fa5f508a54fb84d6c8d63610 | fa674ae25d8eb3671f2f73ef43fee7744d257814 | refs/heads/master | 2022-12-10T20:41:17.716496 | 2019-01-14T17:47:50 | 2019-01-14T17:47:50 | 100,565,936 | 1 | 0 | null | 2022-11-22T01:45:43 | 2017-08-17T05:44:01 | JavaScript | UTF-8 | Python | false | false | 592 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-26 09:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('outsource', '0006_mydevice'),
]
operations = [
migrations.AlterField(
model_name='mydevice',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='device', to=settings.AUTH_USER_MODEL),
),
]
| [
"canon0115@naver.com"
] | canon0115@naver.com |
2fe653f3c427c1407ff776b05974647bae83e94b | e5504d8c4880993b82d5583a11c5cc4623e0eac2 | /Arrays/twoSum2.py | dacf7a07e9511280bc0929061c05928bfd38bb93 | [] | no_license | noorulameenkm/DataStructuresAlgorithms | e5f87f426fc444d18f830e48569d2a7a50f5d7e0 | 7c3bb89326d2898f9e98590ceb8ee5fd7b3196f0 | refs/heads/master | 2023-06-08T19:29:42.507761 | 2023-05-28T16:20:19 | 2023-05-28T16:20:19 | 219,270,731 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | def pair_with_targetsum(arr, target_sum):
result = []
start, end = 0, len(arr) - 1
while start < end:
sum_ = arr[start] + arr[end]
# sum == target
if sum_ == target_sum:
result.append(start)
result.append(end)
break
# sum > target
elif sum_ > target_sum:
end -= 1
else:
start += 1
return result
def two_sum_pair(arr, target_sum):
nums = {}
for i, num in enumerate(arr):
if target_sum - num in nums:
return [nums[target_sum - num], i]
else:
nums[num] = i
return [-1, -1]
print(pair_with_targetsum([1, 2, 3, 4, 6], 6))
print(pair_with_targetsum([2, 5, 9, 11], 11))
print(two_sum_pair([1, 2, 3, 4, 6], 6))
print(two_sum_pair([2, 5, 9, 11], 11)) | [
"noorul.km@people10.com"
] | noorul.km@people10.com |
608b82ddbdfb60e287c7eefdc12c1720cb30fdaf | 497ead1ee1e09a2530aa771ae059989e341684d7 | /python/cuml/tests/test_preprocessing.py | 5f571d8fd3b65acdba8035c0565dded708eeb5ec | [
"Apache-2.0"
] | permissive | xieliaing/cuml | 193f5753696bbfd4de8e3eaef919c18da2fd1d1a | 78092ddde28d5a810e45d6186f049c1309121408 | refs/heads/master | 2022-11-10T16:45:38.818055 | 2022-11-03T23:12:07 | 2022-11-03T23:12:07 | 159,592,316 | 0 | 0 | Apache-2.0 | 2018-11-29T01:59:07 | 2018-11-29T01:59:07 | null | UTF-8 | Python | false | false | 36,389 | py | # Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from cuml.preprocessing import \
Binarizer as cuBinarizer, \
FunctionTransformer as cuFunctionTransformer, \
KBinsDiscretizer as cuKBinsDiscretizer, \
KernelCenterer as cuKernelCenterer, \
MaxAbsScaler as cuMaxAbsScaler, \
MinMaxScaler as cuMinMaxScaler, \
MissingIndicator as cuMissingIndicator, \
Normalizer as cuNormalizer, \
PolynomialFeatures as cuPolynomialFeatures, \
PowerTransformer as cuPowerTransformer, \
QuantileTransformer as cuQuantileTransformer, \
RobustScaler as cuRobustScaler, \
SimpleImputer as cuSimpleImputer, \
StandardScaler as cuStandardScaler
from cuml.preprocessing import \
add_dummy_feature as cu_add_dummy_feature, \
binarize as cu_binarize, \
maxabs_scale as cu_maxabs_scale, \
minmax_scale as cu_minmax_scale, \
normalize as cu_normalize, \
power_transform as cu_power_transform, \
quantile_transform as cu_quantile_transform, \
robust_scale as cu_robust_scale, \
scale as cu_scale
from sklearn.preprocessing import \
Binarizer as skBinarizer, \
FunctionTransformer as skFunctionTransformer, \
KBinsDiscretizer as skKBinsDiscretizer, \
KernelCenterer as skKernelCenterer, \
MaxAbsScaler as skMaxAbsScaler, \
MinMaxScaler as skMinMaxScaler, \
Normalizer as skNormalizer, \
PolynomialFeatures as skPolynomialFeatures, \
PowerTransformer as skPowerTransformer, \
QuantileTransformer as skQuantileTransformer, \
RobustScaler as skRobustScaler, \
StandardScaler as skStandardScaler
from sklearn.preprocessing import \
add_dummy_feature as sk_add_dummy_feature, \
binarize as sk_binarize, \
maxabs_scale as sk_maxabs_scale, \
minmax_scale as sk_minmax_scale, \
normalize as sk_normalize, \
power_transform as sk_power_transform, \
quantile_transform as sk_quantile_transform, \
robust_scale as sk_robust_scale, \
scale as sk_scale
from sklearn.impute import \
MissingIndicator as skMissingIndicator, \
SimpleImputer as skSimpleImputer
from cuml.testing.test_preproc_utils import \
clf_dataset, int_dataset, blobs_dataset, \
nan_filled_positive, \
sparse_nan_filled_positive, \
sparse_clf_dataset, \
sparse_blobs_dataset, \
sparse_int_dataset, \
sparse_imputer_dataset, \
sparse_dataset_with_coo # noqa: F401
from cuml.testing.test_preproc_utils import assert_allclose
from cuml.metrics import pairwise_kernels
import numpy as np
import cupy as cp
import cupyx as cpx
import scipy
@pytest.mark.parametrize("feature_range", [(0, 1), (.1, 0.8)])
def test_minmax_scaler(failure_logger, clf_dataset, # noqa: F811
feature_range):
X_np, X = clf_dataset
scaler = cuMinMaxScaler(feature_range=feature_range, copy=True)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
assert type(t_X) == type(X)
assert type(r_X) == type(t_X)
scaler = skMinMaxScaler(feature_range=feature_range, copy=True)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("feature_range", [(0, 1), (.1, 0.8)])
def test_minmax_scale(failure_logger, clf_dataset, # noqa: F811
axis, feature_range):
X_np, X = clf_dataset
t_X = cu_minmax_scale(X, feature_range=feature_range, axis=axis)
assert type(t_X) == type(X)
sk_t_X = sk_minmax_scale(X_np, feature_range=feature_range, axis=axis)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("with_mean", [True, False])
@pytest.mark.parametrize("with_std", [True, False])
def test_standard_scaler(failure_logger, clf_dataset, # noqa: F811
with_mean, with_std):
X_np, X = clf_dataset
scaler = cuStandardScaler(with_mean=with_mean,
with_std=with_std,
copy=True)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
assert type(t_X) == type(X)
assert type(r_X) == type(t_X)
scaler = skStandardScaler(with_mean=with_mean,
with_std=with_std,
copy=True)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("with_std", [True, False])
def test_standard_scaler_sparse(failure_logger,
sparse_clf_dataset, # noqa: F811
with_std):
X_np, X = sparse_clf_dataset
scaler = cuStandardScaler(with_mean=False, with_std=with_std, copy=True)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
# assert type(t_X) == type(X)
# assert type(r_X) == type(t_X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
if cpx.scipy.sparse.issparse(t_X):
assert cpx.scipy.sparse.issparse(r_X)
if scipy.sparse.issparse(t_X):
assert scipy.sparse.issparse(r_X)
scaler = skStandardScaler(copy=True, with_mean=False, with_std=with_std)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("with_mean", [True, False])
@pytest.mark.parametrize("with_std", [True, False])
# The numerical warning is triggered when centering or scaling
# cannot be done as single steps. Its display can be safely disabled.
# For more information see : https://github.com/rapidsai/cuml/issues/4203
@pytest.mark.filterwarnings("ignore:Numerical issues::")
def test_scale(failure_logger, clf_dataset, axis, # noqa: F811
with_mean, with_std):
X_np, X = clf_dataset
t_X = cu_scale(X, axis=axis, with_mean=with_mean,
with_std=with_std, copy=True)
assert type(t_X) == type(X)
sk_t_X = sk_scale(X_np, axis=axis, with_mean=with_mean,
with_std=with_std, copy=True)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("with_std", [True, False])
def test_scale_sparse(failure_logger, sparse_clf_dataset, # noqa: F811
with_std):
X_np, X = sparse_clf_dataset
t_X = cu_scale(X, with_mean=False, with_std=with_std, copy=True)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
sk_t_X = sk_scale(X_np, with_mean=False, with_std=with_std, copy=True)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("axis", [0, 1])
def test_maxabs_scale(failure_logger, clf_dataset, axis): # noqa: F811
X_np, X = clf_dataset
t_X = cu_maxabs_scale(X, axis=axis)
assert type(t_X) == type(X)
sk_t_X = sk_maxabs_scale(X_np, axis=axis)
assert_allclose(t_X, sk_t_X)
def test_maxabs_scaler(failure_logger, clf_dataset): # noqa: F811
X_np, X = clf_dataset
scaler = cuMaxAbsScaler(copy=True)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
assert type(t_X) == type(X)
assert type(r_X) == type(t_X)
scaler = skMaxAbsScaler(copy=True)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
def test_maxabs_scaler_sparse(failure_logger,
sparse_clf_dataset): # noqa: F811
X_np, X = sparse_clf_dataset
scaler = cuMaxAbsScaler(copy=True)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
# assert type(t_X) == type(X)
# assert type(r_X) == type(t_X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
if cpx.scipy.sparse.issparse(t_X):
assert cpx.scipy.sparse.issparse(r_X)
if scipy.sparse.issparse(t_X):
assert scipy.sparse.issparse(r_X)
scaler = skMaxAbsScaler(copy=True)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("norm", ['l1', 'l2', 'max'])
def test_normalizer(failure_logger, clf_dataset, norm): # noqa: F811
X_np, X = clf_dataset
normalizer = cuNormalizer(norm=norm, copy=True)
t_X = normalizer.fit_transform(X)
assert type(t_X) == type(X)
normalizer = skNormalizer(norm=norm, copy=True)
sk_t_X = normalizer.fit_transform(X_np)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("norm", ['l1', 'l2', 'max'])
def test_normalizer_sparse(failure_logger, sparse_clf_dataset, # noqa: F811
norm):
X_np, X = sparse_clf_dataset
if X.format == 'csc':
pytest.skip("Skipping CSC matrices")
normalizer = cuNormalizer(norm=norm, copy=True)
t_X = normalizer.fit_transform(X)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
normalizer = skNormalizer(norm=norm, copy=True)
sk_t_X = normalizer.fit_transform(X_np)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("norm", ['l1', 'l2', 'max'])
@pytest.mark.parametrize("return_norm", [True, False])
def test_normalize(failure_logger, clf_dataset, axis, norm, # noqa: F811
return_norm):
X_np, X = clf_dataset
if return_norm:
t_X, t_norms = cu_normalize(X, axis=axis, norm=norm,
return_norm=return_norm)
sk_t_X, sk_t_norms = sk_normalize(X_np, axis=axis, norm=norm,
return_norm=return_norm)
assert_allclose(t_norms, sk_t_norms)
else:
t_X = cu_normalize(X, axis=axis, norm=norm, return_norm=return_norm)
sk_t_X = sk_normalize(X_np, axis=axis, norm=norm,
return_norm=return_norm)
assert type(t_X) == type(X)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("norm", ['l1', 'l2', 'max'])
def test_normalize_sparse(failure_logger, sparse_clf_dataset, # noqa: F811
norm):
X_np, X = sparse_clf_dataset
axis = 0 if X.format == 'csc' else 1
t_X = cu_normalize(X, axis=axis, norm=norm)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
sk_t_X = sk_normalize(X_np, axis=axis, norm=norm)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent",
"constant"])
@pytest.mark.parametrize("missing_values", [0, 1, np.nan])
@pytest.mark.parametrize("add_indicator", [False, True])
def test_imputer(failure_logger, random_seed, int_dataset, # noqa: F811
strategy, missing_values, add_indicator):
zero_filled, one_filled, nan_filled = int_dataset
if missing_values == 0:
X_np, X = zero_filled
elif missing_values == 1:
X_np, X = one_filled
else:
X_np, X = nan_filled
np.random.seed(random_seed)
fill_value = np.random.randint(10, size=1)[0]
imputer = cuSimpleImputer(copy=True, missing_values=missing_values,
strategy=strategy, fill_value=fill_value,
add_indicator=add_indicator)
t_X = imputer.fit_transform(X)
assert type(t_X) == type(X)
imputer = skSimpleImputer(copy=True, missing_values=missing_values,
strategy=strategy, fill_value=fill_value,
add_indicator=add_indicator)
sk_t_X = imputer.fit_transform(X_np)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent",
"constant"])
def test_imputer_sparse(sparse_imputer_dataset, # noqa: F811
strategy):
missing_values, X_sp, X = sparse_imputer_dataset
if X.format == 'csr':
pytest.skip("Skipping CSR matrices")
fill_value = np.random.randint(10, size=1)[0]
imputer = cuSimpleImputer(copy=True, missing_values=missing_values,
strategy=strategy, fill_value=fill_value)
t_X = imputer.fit_transform(X)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
imputer = skSimpleImputer(copy=True, missing_values=missing_values,
strategy=strategy, fill_value=fill_value)
sk_t_X = imputer.fit_transform(X_sp)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("degree", [2, 3])
@pytest.mark.parametrize("interaction_only", [True, False])
@pytest.mark.parametrize("include_bias", [True, False])
@pytest.mark.parametrize("order", ['C', 'F'])
def test_poly_features(failure_logger, clf_dataset, degree, # noqa: F811
interaction_only, include_bias, order):
X_np, X = clf_dataset
polyfeatures = cuPolynomialFeatures(degree=degree, order=order,
interaction_only=interaction_only,
include_bias=include_bias)
t_X = polyfeatures.fit_transform(X)
assert type(X) == type(t_X)
cu_feature_names = polyfeatures.get_feature_names()
if isinstance(t_X, np.ndarray):
if order == 'C':
assert t_X.flags['C_CONTIGUOUS']
elif order == 'F':
assert t_X.flags['F_CONTIGUOUS']
polyfeatures = skPolynomialFeatures(degree=degree, order=order,
interaction_only=interaction_only,
include_bias=include_bias)
sk_t_X = polyfeatures.fit_transform(X_np)
sk_feature_names = polyfeatures.get_feature_names()
assert_allclose(t_X, sk_t_X, rtol=0.1, atol=0.1)
assert sk_feature_names == cu_feature_names
@pytest.mark.parametrize("degree", [2, 3])
@pytest.mark.parametrize("interaction_only", [True, False])
@pytest.mark.parametrize("include_bias", [True, False])
def test_poly_features_sparse(failure_logger, sparse_clf_dataset, # noqa: F811
degree, interaction_only, include_bias):
X_np, X = sparse_clf_dataset
polyfeatures = cuPolynomialFeatures(degree=degree,
interaction_only=interaction_only,
include_bias=include_bias)
t_X = polyfeatures.fit_transform(X)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
polyfeatures = skPolynomialFeatures(degree=degree,
interaction_only=interaction_only,
include_bias=include_bias)
sk_t_X = polyfeatures.fit_transform(X_np)
assert_allclose(t_X, sk_t_X, rtol=0.1, atol=0.1)
@pytest.mark.parametrize("value", [1.0, 42])
def test_add_dummy_feature(failure_logger, clf_dataset, value): # noqa: F811
X_np, X = clf_dataset
t_X = cu_add_dummy_feature(X, value=value)
assert type(t_X) == type(X)
sk_t_X = sk_add_dummy_feature(X_np, value=value)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("value", [1.0, 42])
def test_add_dummy_feature_sparse(failure_logger,
sparse_dataset_with_coo, # noqa: F811
value):
X_np, X = sparse_dataset_with_coo
t_X = cu_add_dummy_feature(X, value=value)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
sk_t_X = sk_add_dummy_feature(X_np, value=value)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("threshold", [0., 1.])
def test_binarize(failure_logger, clf_dataset, threshold): # noqa: F811
X_np, X = clf_dataset
t_X = cu_binarize(X, threshold=threshold, copy=True)
assert type(t_X) == type(X)
sk_t_X = sk_binarize(X_np, threshold=threshold, copy=True)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("threshold", [0., 1.])
def test_binarize_sparse(failure_logger, sparse_clf_dataset, # noqa: F811
threshold):
X_np, X = sparse_clf_dataset
t_X = cu_binarize(X, threshold=threshold, copy=True)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
sk_t_X = sk_binarize(X_np, threshold=threshold, copy=True)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("threshold", [0., 1.])
def test_binarizer(failure_logger, clf_dataset, threshold): # noqa: F811
X_np, X = clf_dataset
binarizer = cuBinarizer(threshold=threshold, copy=True)
t_X = binarizer.fit_transform(X)
assert type(t_X) == type(X)
binarizer = skBinarizer(threshold=threshold, copy=True)
sk_t_X = binarizer.fit_transform(X_np)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("threshold", [0., 1.])
def test_binarizer_sparse(failure_logger, sparse_clf_dataset, # noqa: F811
threshold):
X_np, X = sparse_clf_dataset
binarizer = cuBinarizer(threshold=threshold, copy=True)
t_X = binarizer.fit_transform(X)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
binarizer = skBinarizer(threshold=threshold, copy=True)
sk_t_X = binarizer.fit_transform(X_np)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("with_centering", [True, False])
@pytest.mark.parametrize("with_scaling", [True, False])
@pytest.mark.parametrize("quantile_range", [(25., 75.), (10., 90.)])
def test_robust_scaler(failure_logger, clf_dataset, # noqa: F811
with_centering, with_scaling, quantile_range):
X_np, X = clf_dataset
scaler = cuRobustScaler(with_centering=with_centering,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
assert type(t_X) == type(X)
assert type(r_X) == type(t_X)
scaler = skRobustScaler(with_centering=with_centering,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("with_scaling", [True, False])
@pytest.mark.parametrize("quantile_range", [(25., 75.), (10., 90.)])
def test_robust_scaler_sparse(failure_logger, sparse_clf_dataset, # noqa: F811
with_scaling, quantile_range):
X_np, X = sparse_clf_dataset
if X.format != 'csc':
X = X.tocsc()
scaler = cuRobustScaler(with_centering=False,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
# assert type(t_X) == type(X)
# assert type(r_X) == type(t_X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
if cpx.scipy.sparse.issparse(t_X):
assert cpx.scipy.sparse.issparse(r_X)
if scipy.sparse.issparse(t_X):
assert scipy.sparse.issparse(r_X)
scaler = skRobustScaler(with_centering=False,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("with_centering", [True, False])
@pytest.mark.parametrize("with_scaling", [True, False])
@pytest.mark.parametrize("quantile_range", [(25., 75.), (10., 90.)])
def test_robust_scale(failure_logger, clf_dataset, # noqa: F811
with_centering, axis, with_scaling, quantile_range):
X_np, X = clf_dataset
t_X = cu_robust_scale(X, axis=axis,
with_centering=with_centering,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True)
assert type(t_X) == type(X)
sk_t_X = sk_robust_scale(X_np, axis=axis,
with_centering=with_centering,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("with_scaling", [True, False])
@pytest.mark.parametrize("quantile_range", [(25., 75.), (10., 90.)])
def test_robust_scale_sparse(failure_logger, sparse_clf_dataset, # noqa: F811
axis, with_scaling, quantile_range):
X_np, X = sparse_clf_dataset
if X.format != 'csc' and axis == 0:
X = X.tocsc()
elif X.format != 'csr' and axis == 1:
X = X.tocsr()
t_X = cu_robust_scale(X, axis=axis,
with_centering=False,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
sk_t_X = sk_robust_scale(X_np, axis=axis,
with_centering=False,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("n_bins", [5, 20])
@pytest.mark.parametrize("encode", ['ordinal', 'onehot-dense', 'onehot'])
@pytest.mark.parametrize("strategy", [
pytest.param('uniform', marks=pytest.mark.xfail(
strict=False,
reason='Intermittent mismatch with sklearn'
' (https://github.com/rapidsai/cuml/issues/3481)'
)),
pytest.param('quantile', marks=pytest.mark.xfail(
strict=False,
reason='Intermittent mismatch with sklearn'
' (https://github.com/rapidsai/cuml/issues/2933)'
)),
'kmeans'
])
def test_kbinsdiscretizer(failure_logger, blobs_dataset, n_bins, # noqa: F811
encode, strategy):
X_np, X = blobs_dataset
transformer = cuKBinsDiscretizer(n_bins=n_bins,
encode=encode,
strategy=strategy)
t_X = transformer.fit_transform(X)
r_X = transformer.inverse_transform(t_X)
if encode != 'onehot':
assert type(t_X) == type(X)
assert type(r_X) == type(t_X)
transformer = skKBinsDiscretizer(n_bins=n_bins,
encode=encode,
strategy=strategy)
sk_t_X = transformer.fit_transform(X_np)
sk_r_X = transformer.inverse_transform(sk_t_X)
if strategy == 'kmeans':
assert_allclose(t_X, sk_t_X, ratio_tol=0.2)
else:
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("missing_values", [0, 1, np.nan])
@pytest.mark.parametrize("features", ['missing-only', 'all'])
def test_missing_indicator(failure_logger, int_dataset, # noqa: F811
missing_values, features):
zero_filled, one_filled, nan_filled = int_dataset
if missing_values == 0:
X_np, X = zero_filled
elif missing_values == 1:
X_np, X = one_filled
else:
X_np, X = nan_filled
indicator = cuMissingIndicator(missing_values=missing_values,
features=features)
ft_X = indicator.fit_transform(X)
assert type(ft_X) == type(X)
indicator.fit(X)
t_X = indicator.transform(X)
assert type(t_X) == type(X)
indicator = skMissingIndicator(missing_values=missing_values,
features=features)
sk_ft_X = indicator.fit_transform(X_np)
indicator.fit(X_np)
sk_t_X = indicator.transform(X_np)
assert_allclose(ft_X, sk_ft_X)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("features", ['missing-only', 'all'])
def test_missing_indicator_sparse(failure_logger,
sparse_int_dataset, # noqa: F811
features):
X_np, X = sparse_int_dataset
indicator = cuMissingIndicator(features=features,
missing_values=1)
ft_X = indicator.fit_transform(X)
# assert type(ft_X) == type(X)
assert cpx.scipy.sparse.issparse(ft_X) or scipy.sparse.issparse(ft_X)
indicator.fit(X)
t_X = indicator.transform(X)
# assert type(t_X) == type(X)
assert cpx.scipy.sparse.issparse(t_X) or scipy.sparse.issparse(t_X)
indicator = skMissingIndicator(features=features,
missing_values=1)
sk_ft_X = indicator.fit_transform(X_np)
indicator.fit(X_np)
sk_t_X = indicator.transform(X_np)
assert_allclose(ft_X, sk_ft_X)
assert_allclose(t_X, sk_t_X)
def test_function_transformer(clf_dataset): # noqa: F811
X_np, X = clf_dataset
transformer = cuFunctionTransformer(func=cp.exp,
inverse_func=cp.log,
check_inverse=False)
t_X = transformer.fit_transform(X)
r_X = transformer.inverse_transform(t_X)
assert type(t_X) == type(X)
assert type(r_X) == type(t_X)
transformer = skFunctionTransformer(func=np.exp,
inverse_func=np.log,
check_inverse=False)
sk_t_X = transformer.fit_transform(X_np)
sk_r_X = transformer.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
def test_function_transformer_sparse(sparse_clf_dataset): # noqa: F811
X_np, X = sparse_clf_dataset
transformer = cuFunctionTransformer(func=lambda x: x * 2,
inverse_func=lambda x: x / 2,
accept_sparse=True)
t_X = transformer.fit_transform(X)
r_X = transformer.inverse_transform(t_X)
assert cpx.scipy.sparse.issparse(t_X) or scipy.sparse.issparse(t_X)
assert cpx.scipy.sparse.issparse(r_X) or scipy.sparse.issparse(r_X)
transformer = skFunctionTransformer(func=lambda x: x * 2,
inverse_func=lambda x: x / 2,
accept_sparse=True)
sk_t_X = transformer.fit_transform(X_np)
sk_r_X = transformer.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("n_quantiles", [30, 100])
@pytest.mark.parametrize("output_distribution", ['uniform', 'normal'])
@pytest.mark.parametrize("ignore_implicit_zeros", [False, True])
@pytest.mark.parametrize("subsample", [100])
def test_quantile_transformer(failure_logger,
nan_filled_positive, # noqa: F811
n_quantiles, output_distribution,
ignore_implicit_zeros, subsample):
X_np, X = nan_filled_positive
transformer = \
cuQuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
ignore_implicit_zeros=ignore_implicit_zeros,
subsample=subsample, random_state=42, copy=True)
t_X = transformer.fit_transform(X)
assert type(t_X) == type(X)
r_X = transformer.inverse_transform(t_X)
assert type(r_X) == type(t_X)
quantiles_ = transformer.quantiles_
references_ = transformer.references_
transformer = \
skQuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
ignore_implicit_zeros=ignore_implicit_zeros,
subsample=subsample, random_state=42, copy=True)
sk_t_X = transformer.fit_transform(X_np)
sk_r_X = transformer.inverse_transform(sk_t_X)
sk_quantiles_ = transformer.quantiles_
sk_references_ = transformer.references_
assert_allclose(quantiles_, sk_quantiles_)
assert_allclose(references_, sk_references_)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("n_quantiles", [30, 100])
@pytest.mark.parametrize("output_distribution", ['uniform', 'normal'])
@pytest.mark.parametrize("ignore_implicit_zeros", [False, True])
@pytest.mark.parametrize("subsample", [100])
def test_quantile_transformer_sparse(failure_logger,
sparse_nan_filled_positive, # noqa: F811
n_quantiles, output_distribution,
ignore_implicit_zeros, subsample):
X_np, X = sparse_nan_filled_positive
X_np = X_np.tocsc()
X = X.tocsr().tocsc()
transformer = \
cuQuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
ignore_implicit_zeros=ignore_implicit_zeros,
subsample=subsample, random_state=42, copy=True)
t_X = transformer.fit_transform(X)
t_X = t_X.tocsc()
r_X = transformer.inverse_transform(t_X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
quantiles_ = transformer.quantiles_
references_ = transformer.references_
transformer = \
skQuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
ignore_implicit_zeros=ignore_implicit_zeros,
subsample=subsample, random_state=42, copy=True)
sk_t_X = transformer.fit_transform(X_np)
sk_r_X = transformer.inverse_transform(sk_t_X)
sk_quantiles_ = transformer.quantiles_
sk_references_ = transformer.references_
assert_allclose(quantiles_, sk_quantiles_)
assert_allclose(references_, sk_references_)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("n_quantiles", [30, 100])
@pytest.mark.parametrize("output_distribution", ['uniform', 'normal'])
@pytest.mark.parametrize("ignore_implicit_zeros", [False, True])
@pytest.mark.parametrize("subsample", [100])
def test_quantile_transform(failure_logger, nan_filled_positive, # noqa: F811
axis, n_quantiles, output_distribution,
ignore_implicit_zeros, subsample):
X_np, X = nan_filled_positive
t_X = cu_quantile_transform(X, axis=axis,
n_quantiles=n_quantiles,
output_distribution=output_distribution,
ignore_implicit_zeros=ignore_implicit_zeros,
subsample=subsample,
random_state=42, copy=True)
assert type(t_X) == type(X)
sk_t_X = sk_quantile_transform(X_np, axis=axis,
n_quantiles=n_quantiles,
output_distribution=output_distribution,
ignore_implicit_zeros=ignore_implicit_zeros,
subsample=subsample,
random_state=42, copy=True)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("method", ['yeo-johnson', 'box-cox'])
@pytest.mark.parametrize("standardize", [False, True])
def test_power_transformer(failure_logger, nan_filled_positive, # noqa: F811
method, standardize):
X_np, X = nan_filled_positive
transformer = cuPowerTransformer(method=method,
standardize=standardize,
copy=True)
ft_X = transformer.fit_transform(X)
assert type(ft_X) == type(X)
t_X = transformer.transform(X)
assert type(t_X) == type(X)
r_X = transformer.inverse_transform(t_X)
assert type(r_X) == type(t_X)
normalizer = skPowerTransformer(method=method,
standardize=standardize,
copy=True)
sk_t_X = normalizer.fit_transform(X_np)
sk_r_X = transformer.inverse_transform(sk_t_X)
assert_allclose(ft_X, sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("method", ['yeo-johnson', 'box-cox'])
@pytest.mark.parametrize("standardize", [False, True])
def test_power_transform(failure_logger, nan_filled_positive, # noqa: F811
method, standardize):
X_np, X = nan_filled_positive
t_X = cu_power_transform(X, method=method, standardize=standardize)
assert type(t_X) == type(X)
sk_t_X = sk_power_transform(X_np, method=method, standardize=standardize)
assert_allclose(t_X, sk_t_X)
def test_kernel_centerer():
X = np.array([[1., -2., 2.],
[-2., 1., 3.],
[4., 1., -2.]])
K = pairwise_kernels(X, metric='linear')
model = cuKernelCenterer()
model.fit(K)
t_X = model.transform(K, copy=True)
assert type(t_X) == type(X)
model = skKernelCenterer()
sk_t_X = model.fit_transform(K)
assert_allclose(sk_t_X, t_X)
def test__repr__():
assert cuBinarizer().__repr__() == 'Binarizer()'
assert cuFunctionTransformer().__repr__() == 'FunctionTransformer()'
assert cuKBinsDiscretizer().__repr__() == 'KBinsDiscretizer()'
assert cuKernelCenterer().__repr__() == 'KernelCenterer()'
assert cuMaxAbsScaler().__repr__() == 'MaxAbsScaler()'
assert cuMinMaxScaler().__repr__() == 'MinMaxScaler()'
assert cuMissingIndicator().__repr__() == 'MissingIndicator()'
assert cuNormalizer().__repr__() == 'Normalizer()'
assert cuPolynomialFeatures().__repr__() == 'PolynomialFeatures()'
assert cuQuantileTransformer().__repr__() == 'QuantileTransformer()'
assert cuRobustScaler().__repr__() == 'RobustScaler()'
assert cuSimpleImputer().__repr__() == 'SimpleImputer()'
assert cuStandardScaler().__repr__() == 'StandardScaler()'
| [
"noreply@github.com"
] | noreply@github.com |
f63a1432724c3cac911ccad6422806edc4c92da0 | 0369761e54c2766ff2ce13ed249d462a12320c0f | /bubble-search/bubble-search-practice/exercise-09.py | de843c707b960f927b8aa8ee8b57bf0057cd539f | [] | no_license | JasoSalgado/algorithms | e54c739005cc47ee8a401912a77cc70865d28c87 | 8db7d2bedfe468c70e5191bc7873e4dd86e7f95a | refs/heads/master | 2023-04-25T23:41:10.655874 | 2021-06-11T17:35:49 | 2021-06-11T17:35:49 | 333,979,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | """
Bubble search exercise 09
"""
list = [6514 , 2352 , 3984 , 3596 , 2445 , 5535 , 6332 , 5346 , 617 , 3976 , 1242 , 2573 , 7772 , 9324 , 4655 , 3144 , 6233 , 2287 , 6109 , 4139 , 2030 , 6734 , 1495 , 9466 , 6893 , 9336 , 963 , 4412 , 5347 , 2565 , 7590 , 5932 , 6747 , 7566 , 2456 , 9982 , 8880 , 6816 , 9415 , 2426 , 5892 , 5074 , 1501 , 9445 , 6921 , 545 , 4415 , 9516 , 6426 , 7369]
print(f"List: {list}")
for i in range(len(list)):
for x in range(len(list) - 1):
if list[x] > list[x + 1]:
aux = list[x]
list[x] = list[x + 1]
list[x + 1] = aux
print(list)
| [
"jaso_98@hotmail.com"
] | jaso_98@hotmail.com |
3e2d5b25ba6cdc75221465e223ac9c6c0a7a9de6 | cf8b4b316b2b019ca6afef15937d1306b231cd73 | /feature_clean.py | ec4b30c0765ed4472ff89eb79270025992d7fc78 | [] | no_license | seridica/cs229project | e1372304aabf3f683147b33f208966bb7fb6c7cb | 311d82b20a6ae5c3b93810f5d7b6dc24d5145a74 | refs/heads/master | 2021-08-30T05:12:55.265581 | 2017-12-16T04:35:04 | 2017-12-16T04:35:04 | 114,216,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,393 | py | # -*- coding: utf-8 -*-
"""
SVM for Pan-Lung Data
November 30 2017
CS229 Project
File provides functions for cleaning up feature set.
1) Function removes features that have all the same value (typically 0)
2) PCA on features
3) Normalize features
@author: Calvin
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.image import imread
from random import *
import io
import sys
import pickle
from count_features import *
from generate_labels import *
from cleanup import *
import pdb
from sklearn.decomposition import PCA
import random
"""
Function removes features that have all the same value.
"""
def remove_useless_features(trainFeatureMatrix, testFeatureMatrix):
nData, nFeat = trainFeatureMatrix.shape
nTest = testFeatureMatrix.shape[0]
newTrainFeatureMatrix = np.array([[]])
newTestFeatureMatrix = np.array([[]])
newTrainFeatureMatrix.shape = (nData, 0)
newTestFeatureMatrix.shape = (nTest, 0)
for i in range(nFeat):
tot_Sum = np.sum(trainFeatureMatrix[:,i])
if not (tot_Sum % nData == 0):
ntrainf = trainFeatureMatrix[:,i]
ntrainf.shape = (nData, 1)
ntestf = testFeatureMatrix[:,i]
ntestf.shape = (nTest, 1)
newTrainFeatureMatrix = np.concatenate( (newTrainFeatureMatrix, ntrainf), axis=1 )
newTestFeatureMatrix = np.concatenate( (newTestFeatureMatrix, ntestf), axis=1 )
return newTrainFeatureMatrix, newTestFeatureMatrix
"""
Function performs PCA on the features to identify the where all the variance
in the data lies.
"""
def pca_features(trainFeatureMatrix, testFeatureMatrix):
pca = PCA()
fullTrainPCA = pca.fit_transform(trainFeatureMatrix)
fullTestPCA = pca.transform(testFeatureMatrix)
expVar = pca.explained_variance_ratio_
print(expVar)
cumExp = 0
thresh = 0.9
for i in range(len(expVar)):
cumExp += expVar[i]
if cumExp > thresh:
break;
#thresh = 0.1
#for i in range(len(expVar)):
# if expVar[i] < thresh:
# break;
print("Number of components: ")
print( i )
print("Number of original features: ")
print(trainFeatureMatrix.shape[1])
newTrainFeatureMatrix = fullTrainPCA[:,:i]
newTestFeatureMatrix = fullTestPCA[:,:i]
# Plotting for presentation
components = (pca.components_)
plt.figure(figsize=(12,12))
plt.imshow(components, cmap='bwr', interpolation='none')
plt.colorbar()
frame1 = plt.gca()
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
plt.show()
return newTrainFeatureMatrix, newTestFeatureMatrix
"""
Function demeans and normalizes features
"""
def normalize_features(trainFeatureMatrix, testFeatureMatrix):
nDat, nFeat = trainFeatureMatrix.shape
newTrainFeatureMatrix = np.array(trainFeatureMatrix)
newTestFeatureMatrix = np.array(testFeatureMatrix)
for i in range(nFeat):
thisFeat = trainFeatureMatrix[:,i]
mFeat = np.mean(thisFeat)
mStd = np.std(thisFeat)
thisFeat = ( thisFeat - mFeat ) / mStd
newTrainFeatureMatrix[:,i] = thisFeat
testFeat = testFeatureMatrix[:,i]
if mStd == 0:
testFeat = testFeat - mFeat
else:
testFeat = ( testFeat - mFeat ) / mStd
newTestFeatureMatrix[:,i] = testFeat
return newTrainFeatureMatrix, newTestFeatureMatrix
"""
Function redistributes test and train data
"""
def redist_data( trainData, trainLabels, testData, testLabels ):
newRatio = 5
trainClassInds = {}
testClassInds = {}
nTrainDat, nFeatures = trainData.shape
nTestDat = testData.shape[0]
# Partition data in train and test
trainKeys = []
for i in range(nTrainDat):
currLabel = int( trainLabels[i] )
if currLabel in trainKeys:
trainClassInds[currLabel].append(i)
else:
trainClassInds[currLabel] = [i]
trainKeys.append(currLabel)
testKeys = []
for i in range(nTestDat):
currLabel = int( testLabels[i] )
if currLabel in testKeys:
testClassInds[currLabel].append(i)
else:
testClassInds[currLabel] = [i]
testKeys.append(currLabel)
# Make sure there are the same number of class labels
assert( len(testKeys) == len(trainKeys) )
# Redistribute
newTrainData = np.array([[]])
newTrainData.shape = (0, nFeatures)
newTrainLabels = []
newTestData = np.array([[]])
newTestData.shape = (0, nFeatures)
newTestLabels = []
for i in range(len(testKeys)):
# For original training data
inds = np.array(trainClassInds[testKeys[i]])
p = np.random.permutation(len(inds))
cutoff = int( np.floor( len(inds) / newRatio ) )
newTrainData = np.concatenate( (newTrainData, trainData[inds[p[cutoff:]],:] ), axis=0 )
newTestData = np.concatenate( (newTestData, trainData[inds[p[:cutoff]],:] ), axis=0 )
newTrainLabels = np.concatenate( (newTrainLabels, trainLabels[inds[p[cutoff:]]].reshape(-1)) )
newTestLabels = np.concatenate( (newTestLabels, trainLabels[inds[p[:cutoff]]].reshape(-1)) )
# For original test data
inds = np.array(testClassInds[testKeys[i]])
p = np.random.permutation(len(inds))
cutoff = int( np.floor( len(inds) / newRatio ) )
newTrainData = np.concatenate( (newTrainData, testData[inds[p[cutoff:]],:] ), axis=0 )
newTestData = np.concatenate( (newTestData, testData[inds[p[:cutoff]],:] ), axis=0 )
newTrainLabels = np.concatenate( (newTrainLabels, testLabels[inds[p[cutoff:]]].reshape(-1)) )
newTestLabels = np.concatenate( (newTestLabels, testLabels[inds[p[:cutoff]]].reshape(-1)) )
print( newTrainData.shape )
print( newTestData.shape )
newTrainLabels = np.array([newTrainLabels]).T
newTestLabels = np.array([newTestLabels]).T
return newTrainData, newTrainLabels, newTestData, newTestLabels | [
"calvink@stanford.edu"
] | calvink@stanford.edu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.