seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
36347951264
|
import random
import numpy as np
from scipy.optimize import fsolve
# velocity upper bound from Wu et al (https://flow-project.github.io/papers/wu17a.pdf )
# This is an approximation
def v_eq_max_function(v, *args):
"""Return the error between the desired and actual equivalent gap."""
num_vehicles, length = args
# maximum gap in the presence of one rl vehicle
s_eq_max = (length - num_vehicles * 5) / (num_vehicles - 1)
v0 = 30
s0 = 2
tau = 1
gamma = 4
error = s_eq_max - (s0 + v * tau) * (1 - (v / v0) ** gamma) ** -0.5
return error
def get_velocity_upper_bound(num_vehicles, length):
"""Return the velocity upper bound for the given number of vehicles."""
v_guess = 4
return fsolve(v_eq_max_function, np.array(v_guess), args=(num_vehicles, length))[0]
def get_desired_velocity(num_vehicles, length, method_name = None):
"""
Desired velocity is gotten as the uniform flow equillibrium velocity
Only some controllers require this
"""
# some known values are hard coded:
if length == 220:
# reduce to 2.7 for FS
if method_name == "fs":
return 2.7
else:
return 3.0
elif length == 230:
return 3.45
elif length == 260:
# From hit and trial, for
return 4.82 # Value from LORR paper, other sources
elif length == 270:
return 5.2
else:
scaler = 0.93 # 93% of the upper bound may be desired?
print("Scaler: ", scaler)
return get_velocity_upper_bound(num_vehicles, length) * scaler
# Shock
# Define shock models
def get_shock_model(identifier, length = None, network_scaler=1, bidirectional=False, high_speed = False):
# Network scaler 6 used in the bottleneck
# Accel/ Decel value, duration, frequency (in the interval between shock start and shock end)
# Duration: In seconds, for which each shock is applied
# Frequency: In the interval, how many shocks are applied
# if identifier == 1:
# return (-1.4, 2, 10)
if identifier == 2:
# Thiese ranges are obtained form data
# sample frequency
frequency = network_scaler*np.random.randint(5, 20) # value of 10 means once shock every 3000/10 = 300 steps, 5 = 600 steps, 15 = 200 steps
intensity_collect = []
duration_collect = []
if high_speed:
intensity_abs_min = 1.5
intensity_abs_max = 4.0
else:
intensity_abs_min = 1
intensity_abs_max = 3.0
print("Frequency:", frequency)
for i in range(frequency):
if bidirectional:
# between (-abs_max to -abs_min) and (abs_min to abs_max) but not between (-abs_min to abs_min)
intensity = random.uniform(-intensity_abs_max, intensity_abs_max)
while intensity > -intensity_abs_min and intensity < intensity_abs_min:
intensity = random.uniform(-intensity_abs_max, intensity_abs_max)
else:
intensity = random.uniform(-intensity_abs_max, -intensity_abs_min)
print("Intensity:", intensity)
durations = np.linspace(0.1, 2.5, 20) # In seconds
abs_intensity = abs(intensity)
intensity_bucket = np.linspace(intensity_abs_min, intensity_abs_max,len(durations))
loc = np.searchsorted(intensity_bucket, abs_intensity)
left = loc
right = len(durations) - loc
probabilities_left = np.linspace(0.0, 10, left)
# print("Probabilities left:", probabilities_left, probabilities_left.sum())
probabilities_right = np.linspace(10, 0.0, right)
# print("Probabilities right:", probabilities_right, probabilities_right.sum())
probabilities = np.concatenate((probabilities_left, probabilities_right))
probabilities /= probabilities.sum()
#print("Probabilities:", probabilities, probabilities.sum())
duration = round(np.random.choice(durations, 1, p=probabilities)[0], 1)
print("Duration:", duration)
intensity_collect.append(intensity)
duration_collect.append(duration)
# return intensity, durations (second), frequency
return (np.asarray(intensity_collect), np.asarray(duration_collect), frequency)
# Stability test
elif identifier == -1:
# velocity, duration, frequency
# Stability tests have velocity manipulation, so the first param here is speed at the velocity dip
# Duration and frequency are also used
# Just apply once is enough
if length ==220:
vel_set = 2.0
duration = 1
elif length == 270:
vel_set = 3.0
duration = 2
elif length == 260:
vel_set = 3.0
duration = 2
else:
vel_set = 5.0
duration = 2
print("\n\nVelocity set: ", vel_set)
return (vel_set, duration, 1)
#return (2, 10, 10)
else:
raise ValueError("Shock model identifier not recognized")
## Shock utils
def get_time_steps_stability(duration, frequency, shock_start_time, shock_end_time):
# Convert duration to env steps
duration = duration*10
# Based on this frequency, get the time steps at which the shock is applied
start_times = np.linspace(shock_start_time, shock_end_time - duration, frequency, dtype=int)
end_times = np.linspace(shock_start_time + duration, shock_end_time, frequency, dtype=int)
shock_time_steps = np.stack((start_times, end_times), axis=1)
print("Start times: ", start_times)
print("End times: ", end_times)
print("Shock times: \n", shock_time_steps)
# TODO: Perform overlap tests and warn if there is overlap
# if start_times[1] < end_times[0]:
# import sys
# sys.exit()
return shock_time_steps
def get_time_steps(durations, frequency, shock_start_time, shock_end_time):
# Convert duration to env steps
durations = durations*10
print("Durations: ", durations)
# Based on this frequency, get the time steps at which the shock is applied
start_times = np.linspace(shock_start_time, shock_end_time - durations[-1], frequency, dtype=int)
end_times = []
for i in range(frequency):
end_times.append(start_times[i] + durations[i])
shock_time_steps = np.stack((start_times, end_times), axis=1)
print("Start times: ", start_times)
print("End times: ", end_times)
print("Shock times: \n", shock_time_steps)
# TODO: Perform overlap tests and warn if there is overlap
# if start_times[1] < end_times[0]:
# import sys
# sys.exit()
return shock_time_steps
# use
# sm = shock_model(2)
# get_time_steps(durations, frequency, 8000, 10000)
#print(sm[0][1])
|
poudel-bibek/Beyond-Simulated-Drivers
|
flow/density_aware_util.py
|
density_aware_util.py
|
py
| 7,049
|
python
|
en
|
code
| 0
|
github-code
|
6
|
19886880930
|
from guardata.client.client_events import ClientEvent
import pytest
from unittest.mock import ANY
from pendulum import datetime
from guardata.api.data import UserManifest, WorkspaceEntry
from guardata.client.types import WorkspaceRole, LocalUserManifest, EntryID
from guardata.client.fs import (
FSError,
FSWorkspaceNotFoundError,
FSBackendOfflineError,
FSSharingNotAllowedError,
)
from backendService.realm import RealmGrantedRole, RealmRole
from tests.common import freeze_time, create_shared_workspace
@pytest.mark.trio
async def test_share_unknown(running_backend, alice_user_fs, bob):
wid = EntryID()
with pytest.raises(FSWorkspaceNotFoundError):
await alice_user_fs.workspace_share(wid, bob.user_id, WorkspaceRole.MANAGER)
@pytest.mark.trio
async def test_share_to_oneself(running_backend, alice_user_fs, alice):
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
with pytest.raises(FSError) as exc:
await alice_user_fs.workspace_share(wid, alice.user_id, WorkspaceRole.MANAGER)
assert str(exc.value) == "Cannot share to oneself"
@pytest.mark.trio
async def test_share_bad_recipient(running_backend, alice_user_fs, alice, mallory):
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
with pytest.raises(FSError) as exc:
await alice_user_fs.workspace_share(wid, mallory.user_id, WorkspaceRole.MANAGER)
assert str(exc.value) == "User `mallory` doesn't exist in backend"
@pytest.mark.trio
async def test_share_offline(running_backend, alice_user_fs, bob):
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
with running_backend.offline():
with pytest.raises(FSBackendOfflineError):
await alice_user_fs.workspace_share(wid, bob.user_id, WorkspaceRole.MANAGER)
@pytest.mark.trio
@pytest.mark.parametrize("presynced", (True, False))
async def test_share_ok(running_backend, alice_user_fs, bob_user_fs, alice, bob, presynced):
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
if presynced:
await alice_user_fs.sync()
await alice_user_fs.workspace_share(wid, bob.user_id, WorkspaceRole.MANAGER)
with bob_user_fs.event_bus.listen() as spy:
with freeze_time("2000-01-03"):
await bob_user_fs.process_last_messages()
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED,
{
"new_entry": WorkspaceEntry(
name="w1",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 2),
role_cached_on=datetime(2000, 1, 3),
role=WorkspaceRole.MANAGER,
),
"previous_entry": None,
},
)
aum = alice_user_fs.get_user_manifest()
bum = bob_user_fs.get_user_manifest()
assert len(aum.workspaces) == 1
assert len(bum.workspaces) == 1
awe = aum.get_workspace_entry(wid)
bwe = bum.get_workspace_entry(wid)
assert bwe.name == "w1"
assert bwe.id == awe.id
assert bwe.role == WorkspaceRole.MANAGER
aw = alice_user_fs.get_workspace(wid)
bw = bob_user_fs.get_workspace(wid)
aw_stat = await aw.path_info("/")
bw_stat = await bw.path_info("/")
assert aw_stat == bw_stat
@pytest.mark.trio
async def test_share_workspace_then_rename_it(
running_backend, alice_user_fs, bob_user_fs, alice, bob
):
# Share a workspace between Alice and Bob
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w")
await alice_user_fs.workspace_share(wid, bob.user_id, WorkspaceRole.MANAGER)
with freeze_time("2000-01-03"):
await bob_user_fs.process_last_messages()
# Now Bob and alice both rename the workpsace for there own taste
await bob_user_fs.workspace_rename(wid, "from_alice")
await alice_user_fs.workspace_rename(wid, "to_bob")
await bob_user_fs.sync()
await alice_user_fs.sync()
# This should have not changed the workspace in any way
bw = bob_user_fs.get_workspace(wid)
aw = alice_user_fs.get_workspace(wid)
await bw.touch("/ping_bob.txt")
await aw.mkdir("/ping_alice")
await bw.sync()
await aw.sync()
await bw.sync()
aw_stat = await aw.path_info("/")
bw_stat = await bw.path_info("/")
assert aw_stat == bw_stat
assert aw_stat["id"] == wid
@pytest.mark.trio
async def test_unshare_ok(running_backend, alice_user_fs, bob_user_fs, alice, bob):
# Share a workspace...
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
await alice_user_fs.workspace_share(wid, bob.user_id, WorkspaceRole.OWNER)
await bob_user_fs.process_last_messages()
# ...and unshare it
await bob_user_fs.workspace_share(wid, alice.user_id, None)
with alice_user_fs.event_bus.listen() as spy:
with freeze_time("2000-01-03"):
await alice_user_fs.process_last_messages()
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED,
{
"new_entry": WorkspaceEntry(
name="w1",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 2),
role_cached_on=datetime(2000, 1, 3),
role=None,
),
"previous_entry": WorkspaceEntry(
name="w1",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 2),
role_cached_on=datetime(2000, 1, 2),
role=WorkspaceRole.OWNER,
),
},
)
aum = alice_user_fs.get_user_manifest()
aw = aum.workspaces[0]
assert not aw.role
# TODO: check workspace access is no longer possible
@pytest.mark.trio
async def test_unshare_not_shared(running_backend, alice_user_fs, bob_user_fs, alice, bob):
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
await alice_user_fs.workspace_share(wid, bob.user_id, None)
with alice_user_fs.event_bus.listen() as spy:
await bob_user_fs.process_last_messages()
assert not spy.events
# Workspace unsharing should have been ignored
bum = bob_user_fs.get_user_manifest()
assert not bum.workspaces
@pytest.mark.trio
async def test_share_to_another_after_beeing_unshared(
running_backend, alice_user_fs, bob_user_fs, alice, bob
):
# Share a workspace...
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
await alice_user_fs.workspace_share(wid, bob.user_id, WorkspaceRole.MANAGER)
await bob_user_fs.process_last_messages()
# ...and unshare it
await alice_user_fs.workspace_share(wid, bob.user_id, None)
await bob_user_fs.process_last_messages()
# Shouldn't be able to share the workspace anymore
with pytest.raises(FSSharingNotAllowedError):
await bob_user_fs.workspace_share(wid, alice.user_id, None)
@pytest.mark.trio
async def test_reshare_workspace(running_backend, alice_user_fs, bob_user_fs, alice, bob):
# Share a workspace...
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
await alice_user_fs.workspace_share(wid, bob.user_id, WorkspaceRole.MANAGER)
with freeze_time("2000-01-03"):
await bob_user_fs.process_last_messages()
# ...and unshare it...
await alice_user_fs.workspace_share(wid, bob.user_id, None)
with freeze_time("2000-01-04"):
await bob_user_fs.process_last_messages()
# ...and re-share it !
await alice_user_fs.workspace_share(wid, bob.user_id, WorkspaceRole.MANAGER)
with bob_user_fs.event_bus.listen() as spy:
with freeze_time("2000-01-05"):
await bob_user_fs.process_last_messages()
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED,
{
"new_entry": WorkspaceEntry(
name="w1",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 2),
role_cached_on=datetime(2000, 1, 5),
role=WorkspaceRole.MANAGER,
),
"previous_entry": WorkspaceEntry(
name="w1",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 2),
role_cached_on=datetime(2000, 1, 4),
role=None,
),
},
)
# Check access
aum = alice_user_fs.get_user_manifest()
bum = bob_user_fs.get_user_manifest()
assert len(aum.workspaces) == 1
assert len(bum.workspaces) == 1
aw = aum.workspaces[0]
bw = bum.workspaces[0]
assert bw.name == "w1"
assert bw.id == aw.id
assert bw.role == WorkspaceRole.MANAGER
@pytest.mark.trio
async def test_share_with_different_role(running_backend, alice_user_fs, bob_user_fs, alice, bob):
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
aum = alice_user_fs.get_user_manifest()
aw = aum.workspaces[0]
previous_entry = None
for role in WorkspaceRole:
# (re)share with rights
await alice_user_fs.workspace_share(wid, bob.user_id, role)
with bob_user_fs.event_bus.listen() as spy:
await bob_user_fs.process_last_messages()
new_entry = spy.partial_obj(WorkspaceEntry, name="w1", id=wid, role=role)
if not previous_entry:
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED, {"new_entry": new_entry, "previous_entry": None}
)
else:
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED,
{"new_entry": new_entry, "previous_entry": previous_entry},
)
previous_entry = new_entry
# Check access
bum = bob_user_fs.get_user_manifest()
assert len(bum.workspaces) == 1
bw = bum.workspaces[0]
assert bw.name == "w1"
assert bw.id == aw.id
assert bw.role == role
@pytest.mark.trio
async def test_share_no_manager_right(running_backend, alice_user_fs, alice, bob):
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
await alice_user_fs.sync()
# Drop manager right (and give to Bob the ownership)
await running_backend.backend.realm.update_roles(
alice.organization_id,
RealmGrantedRole(
realm_id=wid,
user_id=bob.user_id,
certificate=b"<dummy>",
role=RealmRole.OWNER,
granted_by=alice.device_id,
granted_on=datetime(2000, 1, 3),
),
)
await running_backend.backend.realm.update_roles(
alice.organization_id,
RealmGrantedRole(
realm_id=wid,
user_id=alice.user_id,
certificate=b"<dummy>",
role=RealmRole.CONTRIBUTOR,
granted_by=bob.device_id,
granted_on=datetime(2000, 1, 4),
),
)
with pytest.raises(FSSharingNotAllowedError) as exc:
await alice_user_fs.workspace_share(wid, bob.user_id, WorkspaceRole.MANAGER)
assert (
exc.value.message
== "Must be Owner or Manager on the workspace is mandatory to share it: {'status': 'not_allowed'}"
)
@pytest.mark.trio
async def test_share_with_sharing_name_already_taken(
running_backend, alice_user_fs, bob_user_fs, alice, bob
):
# Bob and Alice both has a workspace with similar name
with freeze_time("2000-01-01"):
awid = await alice_user_fs.workspace_create("w")
bwid = await bob_user_fs.workspace_create("w")
bw2id = await bob_user_fs.workspace_create("w")
# Sharing them shouldn't be a trouble
await bob_user_fs.sync()
await alice_user_fs.workspace_share(awid, bob.user_id, WorkspaceRole.MANAGER)
# Bob should get a notification
with bob_user_fs.event_bus.listen() as spy:
with freeze_time("2000-01-02"):
await bob_user_fs.process_last_messages()
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED,
{
"new_entry": WorkspaceEntry(
name="w",
id=awid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 1),
role_cached_on=datetime(2000, 1, 2),
role=WorkspaceRole.MANAGER,
),
"previous_entry": None,
},
)
assert len(bob_user_fs.get_user_manifest().workspaces) == 3
b_aw_stat = await bob_user_fs.get_workspace(awid).path_info("/")
a_aw_stat = await alice_user_fs.get_workspace(awid).path_info("/")
b_aw_stat.pop("need_sync")
a_aw_stat.pop("need_sync")
assert b_aw_stat == a_aw_stat
b_bw_stat = await bob_user_fs.get_workspace(bwid).path_info("/")
assert b_bw_stat["id"] == bwid
b_bw2_stat = await bob_user_fs.get_workspace(bw2id).path_info("/")
assert b_bw2_stat["id"] == bw2id
@pytest.mark.trio
@pytest.mark.parametrize("first_to_sync", ("alice", "alice2"))
async def test_share_workspace_then_conflict_on_rights(
running_backend, alice_user_fs, alice2_user_fs, bob_user_fs, alice, alice2, bob, first_to_sync
):
# Bob shares a workspace with Alice...
with freeze_time("2000-01-01"):
wid = await bob_user_fs.workspace_create("w")
with freeze_time("2000-01-02"):
await bob_user_fs.workspace_share(wid, alice.user_id, WorkspaceRole.MANAGER)
# ...but only Alice's first device get the information
with freeze_time("2000-01-03"):
await alice_user_fs.process_last_messages()
# Now Bob change the sharing rights...
with freeze_time("2000-01-04"):
await bob_user_fs.workspace_share(wid, alice.user_id, WorkspaceRole.CONTRIBUTOR)
# ...this time it's Alice's second device which get the info
with freeze_time("2000-01-05"):
# Note we will process the 2 sharing messages bob sent us, this
# will attribute role_cached_on to the first message timestamp even
# if we cache the second message role...
await alice2_user_fs.process_last_messages()
if first_to_sync == "alice":
first = alice_user_fs
second = alice2_user_fs
synced_timestamp = datetime(2000, 1, 7)
synced_version = 3
else:
first = alice2_user_fs
second = alice_user_fs
synced_timestamp = datetime(2000, 1, 6)
synced_version = 2
# Finally Alice devices try to reconciliate
with freeze_time("2000-01-06"):
await first.sync()
with freeze_time("2000-01-07"):
await second.sync()
# Resync first device to get changes from the 2nd
with freeze_time("2000-01-08"):
await first.sync()
am = alice_user_fs.get_user_manifest()
a2m = alice2_user_fs.get_user_manifest()
expected_remote = UserManifest(
author=alice2.device_id,
timestamp=synced_timestamp,
id=alice2.user_manifest_id,
version=synced_version,
created=datetime(2000, 1, 1),
updated=datetime(2000, 1, 5),
last_processed_message=2,
workspaces=(
WorkspaceEntry(
name="w",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 1),
role_cached_on=datetime(2000, 1, 5),
role=WorkspaceRole.CONTRIBUTOR,
),
),
)
expected = LocalUserManifest(
base=expected_remote,
need_sync=False,
updated=expected_remote.updated,
last_processed_message=expected_remote.last_processed_message,
workspaces=expected_remote.workspaces,
)
assert am == expected
assert a2m == expected
a_w = alice_user_fs.get_workspace(wid)
a2_w = alice2_user_fs.get_workspace(wid)
a_w_stat = await a_w.path_info("/")
a2_w_stat = await a2_w.path_info("/")
a_w_entry = a_w.get_workspace_entry()
a2_w_entry = a2_w.get_workspace_entry()
assert a_w_stat == {
"type": "folder",
"is_placeholder": False,
"id": wid,
"created": ANY,
"updated": ANY,
"base_version": 1,
"need_sync": False,
"children": [],
"confined": False,
}
assert a_w_stat == a2_w_stat
assert a_w_entry == WorkspaceEntry(
name="w",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 1),
role_cached_on=datetime(2000, 1, 5),
role=WorkspaceRole.CONTRIBUTOR,
)
assert a2_w_entry == a_w_entry
@pytest.mark.trio
async def test_sharing_events_triggered_on_sync(
running_backend, alice_user_fs, alice2_user_fs, bob_user_fs, alice, bob
):
# Share a first workspace
with freeze_time("2000-01-02"):
wid = await create_shared_workspace("w", bob_user_fs, alice_user_fs)
with alice2_user_fs.event_bus.listen() as spy:
await alice2_user_fs.sync()
expected_entry_v1 = WorkspaceEntry(
name="w",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 2),
role_cached_on=datetime(2000, 1, 2),
role=WorkspaceRole.MANAGER,
)
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED, {"new_entry": expected_entry_v1, "previous_entry": None}
)
# Change role
await bob_user_fs.workspace_share(wid, alice.user_id, WorkspaceRole.OWNER)
with freeze_time("2000-01-03"):
await alice_user_fs.process_last_messages()
await alice_user_fs.sync()
with alice2_user_fs.event_bus.listen() as spy:
await alice2_user_fs.sync()
expected_entry_v2 = WorkspaceEntry(
name="w",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 2),
role_cached_on=datetime(2000, 1, 3),
role=WorkspaceRole.OWNER,
)
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED,
{"new_entry": expected_entry_v2, "previous_entry": expected_entry_v1},
)
# Revoke
await bob_user_fs.workspace_share(wid, alice.user_id, None)
with freeze_time("2000-01-04"):
await alice_user_fs.process_last_messages()
await alice_user_fs.sync()
with alice2_user_fs.event_bus.listen() as spy:
await alice2_user_fs.sync()
expected_entry_v3 = WorkspaceEntry(
name="w",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 2),
role_cached_on=datetime(2000, 1, 4),
role=None,
)
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED,
{"new_entry": expected_entry_v3, "previous_entry": expected_entry_v2},
)
@pytest.mark.trio
async def test_no_sharing_event_on_sync_on_unknown_workspace(
running_backend, alice_user_fs, alice2_user_fs, bob_user_fs, alice, bob
):
# Share a workspace...
wid = await create_shared_workspace("w", bob_user_fs, alice_user_fs)
# ...and unshare it before alice2 even know about it
await bob_user_fs.workspace_share(wid, alice.user_id, None)
await alice_user_fs.process_last_messages()
await alice_user_fs.sync()
# No sharing event should be triggered !
with alice2_user_fs.event_bus.listen() as spy:
await alice2_user_fs.sync()
spy.assert_events_exactly_occured([ClientEvent.FS_ENTRY_REMOTE_CHANGED])
@pytest.mark.trio
async def test_sharing_event_on_sync_if_same_role(
running_backend, alice_user_fs, alice2_user_fs, bob_user_fs, alice, bob
):
# Share a workspace, alice2 knows about it
with freeze_time("2000-01-02"):
wid = await create_shared_workspace("w", bob_user_fs, alice_user_fs, alice2_user_fs)
expected_entry_v1 = WorkspaceEntry(
name="w",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 2),
role_cached_on=datetime(2000, 1, 2),
role=WorkspaceRole.MANAGER,
)
# Then change alice's role...
await bob_user_fs.workspace_share(wid, alice.user_id, WorkspaceRole.OWNER)
with freeze_time("2000-01-03"):
await alice_user_fs.process_last_messages()
await alice_user_fs.sync()
# ...and give back alice the same role
await bob_user_fs.workspace_share(wid, alice.user_id, WorkspaceRole.MANAGER)
with freeze_time("2000-01-04"):
await alice_user_fs.process_last_messages()
expected_entry_v3 = expected_entry_v1.evolve(role_cached_on=datetime(2000, 1, 4))
await alice_user_fs.sync()
# A single sharing event should be triggered
with alice2_user_fs.event_bus.listen() as spy:
await alice2_user_fs.sync()
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED,
{"new_entry": expected_entry_v3, "previous_entry": expected_entry_v1},
)
|
bitlogik/guardata
|
tests/client/fs/userfs/test_sharing.py
|
test_sharing.py
|
py
| 21,167
|
python
|
en
|
code
| 9
|
github-code
|
6
|
9264192052
|
import mne
import numpy as np
import pandas as pd
from mne.beamformer import make_lcmv, apply_lcmv, apply_lcmv_cov
from scipy.stats import pearsonr
import config
from config import fname, lcmv_settings
from time_series import simulate_raw, create_epochs
# Don't be verbose
mne.set_log_level(False)
fn_stc_signal = fname.stc_signal(vertex=config.vertex)
fn_simulated_raw = fname.simulated_raw(vertex=config.vertex)
fn_simulated_epochs = fname.simulated_epochs(vertex=config.vertex)
# fn_report_h5 = fname.report(vertex=config.vertex)
fn_report_h5 = None # Don't produce a report
###############################################################################
# Simulate raw data and create epochs
###############################################################################
print('simulate data')
info = mne.io.read_info(fname.sample_raw)
info = mne.pick_info(info, mne.pick_types(info, meg=True, eeg=False))
fwd_disc_true = mne.read_forward_solution(fname.fwd_discrete_true)
fwd_disc_true = mne.pick_types_forward(fwd_disc_true, meg=True, eeg=False)
er_raw = mne.io.read_raw_fif(fname.ernoise, preload=True)
raw, stc_signal = simulate_raw(info=info, fwd_disc_true=fwd_disc_true, signal_vertex=config.vertex,
signal_freq=config.signal_freq, n_trials=config.n_trials,
noise_multiplier=config.noise, random_state=config.random,
n_noise_dipoles=config.n_noise_dipoles_vol, er_raw=er_raw)
true_ori = fwd_disc_true['src'][0]['nn'][config.vertex]
# del info, fwd_disc_true, er_raw
epochs = create_epochs(raw)
###############################################################################
# Sensor-level analysis
###############################################################################
epochs_grad = epochs.copy().pick_types(meg='grad')
epochs_mag = epochs.copy().pick_types(meg='mag')
epochs_joint = epochs.copy().pick_types(meg=True)
# Make cov matrices
cov = mne.compute_covariance(epochs, tmin=-1, tmax=1, method='empirical')
signal_cov = mne.compute_covariance(epochs, tmin=0, tmax=1, method='empirical')
noise_cov = mne.compute_covariance(epochs, tmin=-1, tmax=0, method='empirical')
# Compute evokeds
evoked_grad = epochs_grad.average()
evoked_mag = epochs_mag.average()
evoked_joint = epochs_joint.average()
###############################################################################
# Compute LCMV beamformer results
###############################################################################
# Read in forward solution
fwd_disc_man = mne.read_forward_solution(fname.fwd_discrete_man)
dists = []
focs = []
corrs = []
ori_errors = []
for setting in lcmv_settings:
reg, sensor_type, pick_ori, inversion, weight_norm, normalize_fwd, use_noise_cov, reduce_rank, project_pca = setting
try:
if sensor_type == 'grad':
evoked = evoked_grad
elif sensor_type == 'mag':
evoked = evoked_mag
elif sensor_type == 'joint':
evoked = evoked_joint
else:
raise ValueError('Invalid sensor type: %s', sensor_type)
if project_pca and pick_ori != 'vector':
raise NotImplementedError('project_pca=True only makes sense when pick_ori="vector"')
filters = make_lcmv(evoked.info, fwd_disc_man,
cov if use_noise_cov else signal_cov,
reg=reg,
pick_ori=pick_ori, weight_norm=weight_norm,
inversion=inversion,
depth=1. if normalize_fwd else None,
noise_cov=noise_cov if use_noise_cov else None,
reduce_rank=reduce_rank)
stc_est = apply_lcmv(evoked, filters).crop(0.001, 1)
if pick_ori == 'vector':
# Combine vector time source
if project_pca:
stc_proj, _ = stc_est.project('pca', fwd_disc_man['src'])
else:
stc_proj = stc_est.magnitude()
stc_est_power = (stc_proj ** 2).sum()
peak_vertex, peak_time = stc_est_power.get_peak(vert_as_index=True, time_as_index=True)
estimated_time_course = np.abs(stc_proj.data[peak_vertex])
else:
stc_est_power = (stc_est ** 2).sum()
peak_vertex, peak_time = stc_est_power.get_peak(vert_as_index=True, time_as_index=True)
estimated_time_course = np.abs(stc_est.data[peak_vertex])
# Compute distance between true and estimated source locations
pos_est = fwd_disc_man['source_rr'][peak_vertex]
pos_true = fwd_disc_man['source_rr'][config.vertex]
dist = np.linalg.norm(pos_est - pos_true)
# Ratio between estimated peak activity and all estimated activity.
focality_score = stc_est_power.data[peak_vertex, 0] / stc_est_power.data.sum()
# Correlation between true and reconstructed timecourse
true_time_course = stc_signal.copy().crop(0, 1).data[0]
corr = pearsonr(np.abs(true_time_course), estimated_time_course)[0]
# Angle between estimated and true source orientation
if pick_ori == 'max-power':
estimated_ori = filters['max_power_ori'][config.vertex]
ori_error = np.rad2deg(np.arccos(estimated_ori @ true_ori))
if ori_error > 90:
ori_error = 180 - ori_error
elif pick_ori == 'vector':
estimated_ori = stc_est.data[peak_vertex, :, peak_time]
estimated_ori /= np.linalg.norm(estimated_ori)
ori_error = np.rad2deg(np.arccos(estimated_ori @ true_ori))
if ori_error > 90:
ori_error = 180 - ori_error
else:
ori_error = np.nan
except Exception as e:
print(e)
dist = np.nan
focality_score = np.nan
corr = np.nan
ori_error = np.nan
print(setting, dist, focality_score, corr, ori_error)
dists.append(dist)
focs.append(focality_score)
corrs.append(corr)
ori_errors.append(ori_error)
###############################################################################
# Save everything to a pandas dataframe
###############################################################################
df = pd.DataFrame(lcmv_settings,
columns=['reg', 'sensor_type', 'pick_ori', 'inversion',
'weight_norm', 'normalize_fwd', 'use_noise_cov',
'reduce_rank', 'project_pca'])
df['dist'] = dists
df['focality'] = focs
df['corr'] = corrs
df['ori_error'] = ori_errors
df.to_csv(fname.lcmv_results(vertex=config.vertex, noise=config.noise))
print('OK!')
|
wmvanvliet/beamformer_simulation
|
lcmv.py
|
lcmv.py
|
py
| 6,703
|
python
|
en
|
code
| 4
|
github-code
|
6
|
7911525547
|
import nltk
from collections import Counter
nltk.download('vader_lexicon')
from nltk.sentiment import SentimentIntensityAnalyzer
#Зчитуємо файл який дали в завданні
filename = "data.csv"
with open(filename, 'r') as f:
reviews = f.readlines()
# ініціалізуємо SentimentIntensityAnalyzer (бібліотека для визначення настроїв)
sia = SentimentIntensityAnalyzer()
# рахуємо загальний настрій відгуків
compound_scores = [sia.polarity_scores(review)['compound'] for review in reviews]
overall_sentiment = sum(compound_scores) / len(compound_scores)
# класифікуємо відгуки на позитивні, негативні та нейтральні (рахує всі відгуки пропускаючи ті де немає числового значення в колонці "Stars"
positive_reviews = [review for review in reviews if sia.polarity_scores(review)['compound'] > 0]
negative_reviews = [review for review in reviews if sia.polarity_scores(review)['compound'] < 0]
neutral_reviews = [review for review in reviews if sia.polarity_scores(review)['compound'] == 0]
#positive_reviews = [review for review in reviews if review.strip() and int(review.split('Stars \n')[0]) >= 4]
#negative_reviews = [review for review in reviews if review.strip() and int(review.split('Stars \n')[0]) <= 2]
#neutral_reviews = [review for review in reviews if review.strip() and int(review.split('Stars \n')[0]) == 3]
# рахуємо кількість повторюваних слів
word_count = Counter(word for review in reviews for word in review.split())
most_common_words = word_count.most_common(5)
num_positive = len(positive_reviews)
num_negative = len(negative_reviews)
num_neutral = len(neutral_reviews)
with open('report.txt', 'w') as file:
file.write('\n Аналіз відгуків:\n')
file.write(f"Загальний настрій відгуків: ({overall_sentiment}):\n")
file.write(f"Позитивні: ({len(positive_reviews)}):\n")
file.write(f"Негативні: ({len(negative_reviews)}):\n")
file.write(f"Нейтральні: ({len(neutral_reviews)}):\n")
with open('repeating words.txt', 'w') as file:
file.write("\n П'ять найбільш вживаних слів: \n")
for word, count in most_common_words:
file.write(f"{word}: {count}\n")
file.write("Кількість повторюваних слів: \n")
for word, count in word_count.items():
file.write(f"{word}: {count}\n")
# Для перевірки
print("Аналіз настроїв:")
print("Загальний настрій відгуків: {:.2f}".format(overall_sentiment))
print("")
print("Аналіз негативних, позитивних і природних відгуків:")
print("Кількість позитивних відгуків: {}".format(num_positive))
print("Кількість негативних відгуків: {}".format(num_negative))
print("Кількість нейтральних відгуків: {}".format(num_neutral))
print("")
|
Stepanxan/home_task-2
|
app.py
|
app.py
|
py
| 3,167
|
python
|
uk
|
code
| 0
|
github-code
|
6
|
44426849776
|
from test_framework import mininode
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
class BsvProtoconfViolationTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="bitcoind binary to test")
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.setup_nodes()
def run_test(self):
test_node = mininode.NodeConnCB()
connections = []
connections.append(
mininode.NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
test_node.add_connection(connections[0])
mininode.NetworkThread().start() # Start up network handling in another thread
# 1. Test that protoconf is sent after verack
test_node.wait_for_verack()
test_node.wait_for_protoconf()
logger.info("Received time of verack: {} ".format(test_node.msg_timestamp["verack"]))
logger.info("Received time of protoconf: {} ".format(test_node.msg_timestamp["protoconf"]))
logger.info("Received msg_index of verack: {} ".format(test_node.msg_index["verack"]))
logger.info("Received msg_index of protoconf: {} ".format(test_node.msg_index["protoconf"]))
assert_greater_than(test_node.msg_index["protoconf"], test_node.msg_index["verack"])
# 2. Test that protoconf can only be sent once (if is sent twice --> disconnection)
assert_equal(len(self.nodes[0].listbanned()), 0)# Before, there are zero banned node
# First protoconf was already sent from mininode.
# Another protoconf message will cause disconnection (but not banning).
test_node.send_message(mininode.msg_protoconf())
test_node.wait_for_disconnect()
assert(self.nodes[0].closed) # disconnected
assert_equal(len(self.nodes[0].listbanned()), 0) # After, there are also zero banned node
if __name__ == '__main__':
BsvProtoconfViolationTest().main()
|
bitcoin-sv/bitcoin-sv
|
test/functional/bsv-protoconf-violation.py
|
bsv-protoconf-violation.py
|
py
| 2,254
|
python
|
en
|
code
| 597
|
github-code
|
6
|
42891510827
|
#PYTHON CAMERA MODEL
import cv2
import numpy as np
i=0
def capturing(event,x,y,flags,param):
global i
if event==cv2.EVENT_LBUTTONUP:
name="photo_"+str(i)+".png"
wname="CAPTURED IMAGE"
cv2.imwrite(name,frame)
h=cv2.imread(name)
cv2.namedWindow(wname)
cv2.imshow(wname,h)
cv2.moveWindow(wname,700,50)
i+=1
cv2.waitKey(1000)
cv2.destroyWindow(wname)
cap=cv2.VideoCapture(0)
while True:
ret,frame = cap.read()
win="CAPTURE"
cv2.imshow("CAMERA",frame)
cv2.moveWindow("CAMERA",50,50)
cv2.namedWindow(win)
img=np.zeros((150,150,3))
cv2.putText(img,"CLICK",(35,65),cv2.FONT_HERSHEY_SIMPLEX,0.85,(255,255,255),2,cv2.LINE_AA)
cv2.putText(img,"HERE",(35,90),cv2.FONT_HERSHEY_SIMPLEX,0.85,(255,255,255),2,cv2.LINE_AA)
cv2.imshow(win,img)
cv2.moveWindow(win,250,560)
cv2.setMouseCallback(win,capturing)
if cv2.waitKey(1)==13:
break
cap.release()
cv2.destroyAllWindows()
|
NamrithaGirish/LiveCam
|
cam.py
|
cam.py
|
py
| 1,003
|
python
|
en
|
code
| 0
|
github-code
|
6
|
31366310671
|
from api.models import EventTypes
# temp models
class GithubBodyModel(object):
def __init__(self):
self.type = ''
self.preferred_labels = {}
self.alternative_labels = []
self.broader_labels = []
self.narrower_labels = []
self.related_labels = []
self.exact_matches = []
self.needed_for = None
self.description = None
self.reason = None
self.scope_note = None
self.groups = []
self.organization = None
self.yse_term = None
class GithubMeetingModel(object):
def __init__(self, name, created_date, meeting_date):
self.name = name
self.created_date = created_date
self.meeting_date = meeting_date
class GithubIssueModel(object):
def __init__(self, name, status, meeting, created, modified, closed, body):
self.name = name
self.status = status
self.meeting = meeting
self.created = created
self.modified = modified
self.closed = closed
self.body = body
self.tags = []
self.events = []
self.comments = []
class GithubCommentModel(object):
def __init__(self, created, modified, text):
self.created = created
self.modified = modified
self.user_id = None
self.event_type = EventTypes.COMMENT
self.suggestion_id = None
self.text = text
|
NatLibFi/Finto-suggestions
|
api/scripts/github_models.py
|
github_models.py
|
py
| 1,467
|
python
|
en
|
code
| 7
|
github-code
|
6
|
32467362643
|
"""
ID: jasonhu5
LANG: PYTHON3
TASK: transform
"""
def reflect(ar):
n = len(ar)
res = ar.copy()
for row in range(n):
res[row] = res[row][::-1]
return res
def solve(ar1, ar2):
def rot_cw_90(A, B):
for row in range(n):
for col in range(n):
if A[row][col] != B[col][n-1-row]:
return False
return True
def rot_cw_180(A, B):
for row in range(n):
for col in range(n):
if A[row][col] != B[n-1-row][n-1-col]:
return False
return True
def rot_cw_270(A, B):
for row in range(n):
for col in range(n):
if A[row][col] != B[n-1-col][row]:
return False
return True
def flipped(A, B):
return reflect(A) == B
def combination(A, B):
flipped = reflect(A)
return rot_cw_90(flipped, B) or rot_cw_180(flipped, B) or rot_cw_270(flipped, B)
n = len(ar1)
if rot_cw_90(ar1, ar2):
return 1
if rot_cw_180(ar1, ar2):
return 2
if rot_cw_270(ar1, ar2):
return 3
if flipped(ar1, ar2):
return 4
if combination(ar1, ar2):
return 5
if ar1 == ar2:
return 6
return 7
# ---- Unit Tests ----
def test_flip():
# Flip
assert reflect(['@-@','---','@@-']) == ['@-@','---','-@@']
assert reflect(['*-@-','@---','-@@-','**0-']) == ['-@-*','---@','-@@-', '-0**']
def test_90():
ar1 = ['@-@','---','@@-']
ar2 = ['@-@','@--','--@']
assert solve(ar1, ar2) == 1
def test_180():
ar1 = ['@-@','---','@@-']
ar2 = ['-@@','---','@-@']
assert solve(ar1, ar2) == 2
def test_270():
ar1 = ['@-@','---','@@-']
ar2 = ['@--','--@','@-@']
assert solve(ar1, ar2) == 3
def test_flipped():
ar1 = ['@-@','---','@@-']
ar2 = ['@-@','---','-@@']
assert solve(ar1, ar2) == 4
def test_combination():
ar1 = ['@-@','---','@@-']
ar2 = ['--@','@--','@-@']
assert solve(ar1, ar2) == 5
def test_no_change():
ar1 = ['@-@','---','@@-']
ar2 = ['@-@','---','@@-']
assert solve(ar1, ar2) == 6
def test_invalid():
ar1 = ['@-@','---','@@-']
ar2 = ['-@-','-@-','-@-']
assert solve(ar1, ar2) == 7
if __name__ == '__main__':
# test_flip()
# test_90()
# test_180()
# test_270()
# test_flipped()
# test_combination()
# test_no_change()
# test_invalid()
fin = open('transform.in','r')
fout = open('transform.out','w')
N = int(fin.readline())
ar1, ar2 = [], []
for _ in range(N):
s = fin.readline().strip()
ar1.append(s)
for _ in range(N):
s = fin.readline().strip()
ar2.append(s)
ans = solve(ar1, ar2)
fout.write('{}\n'.format(ans))
|
jasonhuh/UASCO-Solutions
|
transform/transform.py
|
transform.py
|
py
| 2,822
|
python
|
en
|
code
| 0
|
github-code
|
6
|
35968448866
|
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import ui
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from collections import defaultdict
import time
import datetime
import csv
import unicodedata
import re
import hashlib
import os
from selenium.common.exceptions import ElementNotVisibleException
options = webdriver.ChromeOptions()
options.add_argument("--start-maximized")
driver = webdriver.Chrome(chrome_options=options)
actions = ActionChains(driver)
today =datetime.date.today()
def check_exists_by_xpath(xpath):
try:
while (driver.find_element_by_xpath("%s"%(xpath,))) :
driver.find_element_by_xpath("%s"%(xpath,)).click()
time.sleep(5)
except ElementNotVisibleException:
print ("element not found")
wait = ui.WebDriverWait(driver, 10)
driver.get('http://www.cwtv.com/shows/')
print(driver.current_url)
time.sleep(8)
(driver.page_source).encode('ascii','ignore')
shows_count =driver.find_elements_by_xpath(".//*[@id='cw-main-footer-1']/div[1]/ul/li/a")
print ("Shows count :[%s]"%(len(shows_count)),)
launch_id =[]
service_videos = {}
href =[]
release_year=0
multiples =1
for s in range (len(shows_count)):
href.append(shows_count[s].get_attribute('href'))
print (href)
for h in range (len(href)):
try:
print (h)
driver.get (href[h])
episodes=driver.find_elements_by_xpath(".//*[@id='list_1']/div//li//a")
multiples= len(episodes)/5
print (multiples)
for m in range (multiples) :
for e in range (len(episodes)):
print (len(episodes), e+1, m+1)
if e+1==(5*(m+1)) :
driver.find_element_by_xpath(".//*[contains(@id,'touchcarousel_1')]/button[2]").click()
time.sleep (3)
epi_href =episodes[e].get_attribute('href')
video_id =epi_href.split("=")[-1].encode('ascii', 'ignore')
epi_details =driver.find_element_by_xpath("(.//*[@id='list_1']/div//li//a//div[contains(@class,'videodetails')]/p[1])[%s]"%(e+1)).text.encode('ascii', 'ignore')
epi_title =epi_details.split("Ep.")[0].split("(")[0].strip()
epi_sea_num =epi_details.split("Ep.")[1].split(")")[0]
print (epi_details, epi_title, epi_sea_num)
if (len (epi_sea_num) == 3) :
epi_num=epi_details.split("Ep.")[1].split(")")[0][-2:]
season_num =epi_details.split("Ep.")[1].split(")")[0][0]
elif (len (epi_sea_num) == 4) :
epi_num=epi_details.split("Ep.")[1].split(")")[0][-2:]
season_num =epi_details.split("Ep.")[1].split(")")[0][0:2]
series_title =driver.find_element_by_xpath(".//*[@id='show-logo']/a").get_attribute('title').encode('ascii', 'ignore')
launch_id.append(video_id)
service_videos ["cwtv"] =launch_id
res=[today, "CWTV Shows", series_title, season_num, epi_num, epi_title, service_videos]
print (res)
with open(os.getcwd()+'/'+"cwtv_shows_output"+ '.csv', 'ab+') as mycsvfile:
thedatawriter =csv.writer(mycsvfile)
thedatawriter.writerow(res)
launch_id =[]
service_videos = {}
except Exception as e:
print(e)
continue
|
surbhikhandelwal/Python-Projects
|
CWTV/cwtv.py
|
cwtv.py
|
py
| 3,267
|
python
|
en
|
code
| 0
|
github-code
|
6
|
40483436324
|
import tkinter
import os
from PIL import Image, ImageTk
class OngletsPersonnage():
def __init__(self, main_onglets):
self.onglets_personnage = tkinter.ttk.Frame(main_onglets)
self.onglets_personnage.pack()
main_onglets.add(self.onglets_personnage, text='character')
self.create_canvas_character()
def set_character(self, character):
self.character = character
def create_canvas_character(self):
self.canvas_gfx_character = tkinter.Canvas(self.onglets_personnage)
self.create_charater("0","None","nul","nul")
self.canvas_gfx_character.place(relx=0.03, rely=0.1, relwidth=1, relheight=1)
self.canvas_vita = tkinter.Canvas(self.onglets_personnage)
self.print_image("stats\\vitaliter.png",self.canvas_vita)
self.canvas_vita.place(relx=0.75, rely=0.05, relwidth=0.1, relheight=0.12)
self.canvas_sagesse = tkinter.Canvas(self.onglets_personnage)
self.print_image("stats\\sagesse.png",self.canvas_sagesse)
self.canvas_sagesse.place(relx=0.75, rely=0.20, relwidth=0.1, relheight=0.12)
self.canvas_force = tkinter.Canvas(self.onglets_personnage)
self.print_image("stats\\force.png",self.canvas_force)
self.canvas_force.place(relx=0.75, rely=0.35, relwidth=0.1, relheight=0.12)
self.canvas_intel = tkinter.Canvas(self.onglets_personnage)
self.print_image("stats\\intelligence.png",self.canvas_intel)
self.canvas_intel.place(relx=0.75, rely=0.50, relwidth=0.1, relheight=0.12)
self.canvas_chance = tkinter.Canvas(self.onglets_personnage)
self.print_image("stats\\chance.png",self.canvas_chance)
self.canvas_chance.place(relx=0.75, rely=0.65, relwidth=0.1, relheight=0.12)
self.canvas_agi = tkinter.Canvas(self.onglets_personnage)
self.print_image("stats\\agilite.png",self.canvas_agi)
self.canvas_agi.place(relx=0.75, rely=0.80, relwidth=0.1, relheight=0.12)
def create_label_caracteristique(self,character):
self.label_vita = tkinter.Label(self.onglets_personnage, text = character.vie_max)
self.label_vita.place(relx=0.80, rely=0.05, relwidth=0.1, relheight=0.12)
self.label_sagesse = tkinter.Label(self.onglets_personnage, text = character.sagesse)
self.label_sagesse.place(relx=0.80, rely=0.20, relwidth=0.1, relheight=0.12)
self.label_force = tkinter.Label(self.onglets_personnage, text = character.force)
self.label_force.place(relx=0.80, rely=0.35, relwidth=0.1, relheight=0.12)
self.label_intel = tkinter.Label(self.onglets_personnage, text = character.intel)
self.label_intel.place(relx=0.80, rely=0.50, relwidth=0.1, relheight=0.12)
self.label_chance = tkinter.Label(self.onglets_personnage, text = character.chance)
self.label_chance.place(relx=0.80, rely=0.65, relwidth=0.1, relheight=0.12)
self.label_agi = tkinter.Label(self.onglets_personnage, text = character.agi)
self.label_agi.place(relx=0.80, rely=0.80, relwidth=0.1, relheight=0.12)
def print_image(self,path,canvas_):
dir_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),"resource\\" +path )
image = Image.open(dir_path)
photo = ImageTk.PhotoImage(image)
canvas_.create_image(photo.width(),photo.height(),image=photo)
canvas_.image = photo
def create_charater(self,gfx,speudo ,id_,lvl = ""):
dir_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),f"resource\\gfx\\{gfx}.png")
image = Image.open(dir_path)
photo = ImageTk.PhotoImage(image)
self.canvas_gfx_character.create_image(photo.width()/4.5,photo.height()/2,image=photo)
self.canvas_gfx_character.image = photo
self.canvas_gfx_character.place(relx=0.05, rely=0.9, relwidth=0.5, relheight=0.5)
self.canvas_gfx_character.place(relx=0.03, rely=0.1, relwidth=1, relheight=1)
speudo_and_id ="SPEUDO: "+ speudo +" ID: "+ id_ + " LEVEL: "+ lvl
name = tkinter.Label(self.onglets_personnage, text = speudo_and_id)
name.place(relx=0.01, rely=0.017,relwidth=0.4, relheight=0.09)
|
Azzary/LeafMITM
|
interface/onglets/onglets_personnage.py
|
onglets_personnage.py
|
py
| 4,255
|
python
|
en
|
code
| 3
|
github-code
|
6
|
34928362935
|
import mysql.connector
from mysql.connector import pooling
class Database:
def __init__(self, config):
self.config = config
self.cnxpool = self.create_cnxpool()
def create_cnxpool(self):
try:
cnxpool = pooling.MySQLConnectionPool(
pool_name = "cnxpool",
pool_size = 3,
**self.config
)
except mysql.connector.Error as err:
print(err)
return cnxpool
def close(self, cursor, cnx):
cursor.close()
cnx.close()
def execute_sql(self, sql, sql_data, commit=False):
try:
cnx = self.cnxpool.get_connection()
cursor = cnx.cursor(dictionary = True)
cursor.execute(sql, sql_data)
result = cursor.fetchall()
except:
cnx.rollback()
finally:
if commit is True:
cnx.commit()
self.close(cursor, cnx)
else:
self.close(cursor, cnx)
return result
|
alice1315/wehelp-third
|
app/models/database.py
|
database.py
|
py
| 1,070
|
python
|
en
|
code
| 0
|
github-code
|
6
|
7998902064
|
import os
from bson.json_util import dumps
from dotenv import load_dotenv
# from flask import jsonify
import pymongo
load_dotenv() # use dotenv to hide sensitive credential as environment variables
DATABASE_URL = f'mongodb+srv://{os.environ.get("user")}:{os.environ.get("passwort")}' \
'@flask-mongodb-atlas.wicsm.mongodb.net/' \
'flaura?retryWrites=true&w=majority' # get connection url from environment
client = pymongo.MongoClient(DATABASE_URL) # establish connection with database
# plants.config['MONGO_DBNAME'] = 'restdb'
# plants.config['MONGO_URI'] = 'mongodb://localhost:27017/restdb'
# mongo = PyMongo(plants)
mydb = client.flaura
mycol = mydb.plants
def getPlantsByName(name):
cursor = mycol.find({"name": {"$regex": '.*'+name+'.*', "$options": 'i'}})
list_cur = list(cursor)
plants = dumps(list_cur)
return plants
def getAllPlants():
cursor = mycol.find()
list_cur = list(cursor)
plantList = dumps(list_cur)
return plantList
def setNewPlant(name, waterAmount, critMoist, sleepTime):
newPlant = {"name": name, "waterAmountML": waterAmount, "criticalMoisture": critMoist, "sleepTime": sleepTime}
mycol.insert_one(newPlant)
# function Get List of Plants that contain <name>
# function Get All Plants??
# function Add new Plant to DB
|
rosemaxio/flauraBackend
|
plants/db.py
|
db.py
|
py
| 1,326
|
python
|
en
|
code
| 0
|
github-code
|
6
|
811133362
|
import pygame
from pygame.locals import *
from entities import User, Enemy
from fonctions import *
from stage import *
from hud import *
import random
import time
import zmq
import threading
from stage import *
from tkinter import *
from playsound import playsound
def choix1():
global perso
perso=1
button1.configure(relief=SUNKEN)
button2.configure(relief=RAISED)
button3.configure(relief=RAISED)
def choix2():
global perso
perso=2
button1.configure(relief=RAISED)
button2.configure(relief=SUNKEN)
button3.configure(relief=RAISED)
def choix3():
global perso
perso=3
button1.configure(relief=RAISED)
button2.configure(relief=RAISED)
button3.configure(relief=SUNKEN)
perso=1
fen=Tk()
fen.geometry("250x300+200+0")
fen.configure(bg = "white")
user1=PhotoImage(file='images/user1.gif')
user2=PhotoImage(file='images/user2.gif')
user3=PhotoImage(file='images/user3.gif')
fen.title("LE JEU")
Label(fen,text=" ",bg="white").grid(row=1,column=0)
Label(fen,text="LE JEU \n\n ",bg="white").grid(row=0,column=2)
Button(fen,text="Jouer ",bg="white",command=fen.destroy).grid(row=1,column=2)
Label(fen,text="\n"*3,bg="white").grid(row=2,column=1)
button1=Button(fen, image=user1,bg="white",command=choix1, relief=SUNKEN)
button1.grid(row=3,column=1)
button2=Button(fen, image=user2,bg="white",command=choix2)
button2.grid(row=3,column=2)
button3=Button(fen, image=user3,bg="white",command=choix3)
button3.grid(row=3,column=3)
Label(fen,text="\n"*3,bg="white").grid(row=4,column=1)
Button(fen,text="Quitter",command=exit).grid(row=5,column=2)
jeu=0
playsound('musique_menu.mp3',block = False)
fen.mainloop()
gameOver = False
pygame.init()
screen = pygame.display.set_mode((620,480))
pygame.display.set_caption('User 1')
screen.fill((50,60,50))
pygame.display.update()
user = User(screen,1,perso)
coop = User(screen,2,3)
hud = HUD(screen)
context = zmq.Context()
usersChan = context.socket(zmq.PAIR)
usersChan.bind("tcp://127.0.0.1:1111".format(coop.id))
murs, enemies, potions, portes, eaus = classic(screen)
def recv(usersChan):
global coop, gameOver, points
while True:
if gameOver == True:
if points == 16:
print("WIN !")
else:
print("Game Over ! Vous avez {} points".format(points))
exit()
return
try:
data = usersChan.recv_pyobj(flags=zmq.NOBLOCK)
coop.pos = data["user"]["pos"]
coop.vie = data["user"]["vie"]
coop.attaque = data["user"]["attaque"]
coop.defense = data["user"]["defense"]
coop.level = data["user"]["level"]
coop.xp= data["user"]["xp"]
### Supprimer objets qui ne sont pas en communs entre 2 listes python
for potion in potions:
ok = False
for p in data["potions"]:
if potion.pos == p["pos"]:
ok = True
if ok == False:
potions.remove(potion)
for enemy in enemies:
ok = False
for e in data["enemies"]:
if enemy.pos == e["pos"]:
enemy.vie = e["vie"]
ok = True
if ok == False:
enemies.remove(enemy)
refresh()
except zmq.ZMQError as err:
pass
def refresh():
screen.fill((50,60,50))
hud.show(user,coop)
user.show()
coop.show()
for enemy in enemies:
enemy.show()
for mur in murs:
mur.show()
for potion in potions:
potion.show()
for porte in portes:
porte.show()
for eau in eaus:
eau.show()
user.show()
coop.show()
pygame.display.flip()
pygame.display.update()
# Envoyez première data
usersChan.send_pyobj(setData(user,coop,murs,potions,portes,eaus,enemies,True))
points = 0
# Création du Thread pour recevoir les données
threadRecv = threading.Thread(target=recv, args=(usersChan,))
threadRecv.start()
while not gameOver:
changement = False
if user.vie <= 0:
gameOver = True
if coop.vie <= 0:
gameOver = True
for event in pygame.event.get():
# Alt + F4 ou fléche en haut
if event.type == QUIT:
gameOver = True
# Si touche pressée
if event.type == KEYDOWN:
action = 1
if event.key == K_UP:
coord = (0,-1)
elif event.key == K_DOWN:
coord = (0,1)
elif event.key == K_LEFT:
coord = (-1,0)
elif event.key == K_RIGHT:
coord = (1,0)
else:
action = 0
if action != 0:
user.mouvement(coord)
if user.pos == coop.pos:
user.mouvement((-coord[0],-coord[1]))
for enemy in enemies:
if enemy.pos == user.pos:
# Attaquer :
enemy.vie -= user.attaque + user.arme
user.vie -= enemy.defense
if user.vie <= 0:
user.vie = 0
gameOver == True
# print("Vie restante :", user.vie, "Vie enemmi :", enemy.vie)
if enemy.vie <= 0:
user.xp += enemy.level
enemies.remove(enemy)
# Revenir en arriére
else:
user.mouvement((-coord[0],-coord[1]))
if user.xp >= user.level * 2:
user.levelUP()
for mur in murs:
if mur.pos == user.pos :
if mur.genre == "lave":
user.vie -= 15
elif mur.genre == "pont":
pass
elif mur.genre == "levier":
pass
else:
user.mouvement((-coord[0],-coord[1]))
for eau in eaus:
if eau.pos == user.pos :
user.mouvement((-coord[0],-coord[1]))
for potion in potions:
if user.pos == potion.pos:
if potion.type == "heal":
user.heal()
elif potion.type == "atk":
user.atk()
elif potion.type == "atkboss":
for i in range (20):
user.atk()
elif potion.type == "xp":
user.levelUP()
potions.remove(potion)
for porte in portes:
if porte.pos == user.pos or porte.pos == coop.pos:
print("Changement de map")
points += 1
user.pos = [32,160]
coop.pos = [32,192]
if points == 1:
murs, enemies, potions, portes, eaus = deux(screen)
elif points == 2:
murs, enemies, potions, portes, eaus = troix(screen)
elif points == 15:
murs, enemies, potions, portes, eaus = six(screen)
elif points == 16:
gameOver = True
else:
murs, enemies, potions, portes, eaus = random.choice([quatre(screen), cinq(screen)])
changement = True
### Renvoyez les données
try:
message = setData(user,coop,murs,potions,portes,eaus,enemies,changement)
usersChan.send_pyobj(message)
except zmq.ZMQError as err:
print ("Error while trying to send the value " + message + " : " + str(err))
refresh()
pygame.display.flip()
pygame.display.update()
pygame.time.wait(10)
|
ZeProf10T/projet-isn
|
server.py
|
server.py
|
py
| 8,030
|
python
|
en
|
code
| 0
|
github-code
|
6
|
6193427862
|
"""
Main script: Autonomous Driving on Udacity Simulator
@author : nelsoonc
Undergraduate Thesis
Nelson Changgraini - Bandung Institute of Technology, Indonesia
"""
# Throttle 0 - 1 will produce speed 0 - 30 mph
# Steering -1 - 1 will produce angle -25 - 25 degrees
import os
import numpy as np
import socketio
import eventlet
from flask import Flask
import tensorflow as tf
from tensorflow.keras.models import load_model
import base64
from io import BytesIO
from PIL import Image
from train import rmse, get_lr_metric
from utils import preprocess
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# DIRECTORY PATH
MODEL_PATH = 'models/simulation_model.h5'
# VARIABLE
MAX_SPEED = 25
# FOR REAL TIME COMMUNICATION BETWEEN CLIENT AND SERVER
sio = socketio.Server()
# FLASK IS A MICRO WEB FRAMEWORK WRITTEN IN PYTHON
app = Flask(__name__) # '__main__'
# Executing in graph mode
@tf.function
def predict(input_tensor, model):
return model(input_tensor)
@sio.on('telemetry')
def telemetry(sid, data):
speed = float(data['speed'])
image = Image.open(BytesIO(base64.b64decode(data['image'])))
image = np.asarray(image)
image = preprocess(image)
image = np.array([image])
steering = float(predict(image, model))
throttle = 1.0 - abs(steering) - speed / MAX_SPEED
print('{}, {}, {}'.format(steering, throttle, speed))
sendControl(steering, throttle)
@sio.on('connect')
def connect(sid, environ):
print('Connected', sid)
sendControl(0, 0)
@sio.on('disconnect')
def disconnect(sid):
print('Disconnect', sid)
def sendControl(steering, throttle):
sio.emit('steer', data={
'steering_angle': steering.__str__(),
'throttle': throttle.__str__()
}, skip_sid=True)
if __name__ == '__main__':
print('Setting up..')
model = load_model(MODEL_PATH, custom_objects={'rmse': rmse, 'lr': get_lr_metric})
if model:
print('Model loaded')
app = socketio.Middleware(sio, app)
# LISTEN TO PORT 4567
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
|
zhouzheny1/Conditional_Imitation_Learning
|
simulation/main.py
|
main.py
|
py
| 2,123
|
python
|
en
|
code
| 0
|
github-code
|
6
|
37182795454
|
import os
import re
from typing import Tuple
from transformers import pipeline # type: ignore
MODEL_PATH = os.environ.get("MODEL_PATH", "./distilbert-base-cased-distilled-squad")
class CardSourceGeneratorMock:
def __call__(self, text: str, question: str) -> Tuple[int, int]:
return 0, len(text) // 2
class CardSourceGenerator:
def __init__(self) -> None:
self._qa_model = pipeline(
"question-answering", model=MODEL_PATH, tokenizer=MODEL_PATH
)
def __call__(self, text: str, question: str) -> Tuple[int, int]:
answer = self._qa_model(question=question, context=text) # type: ignore
start, end = self._find_sentence_indices(text, answer["start"], answer["end"])
return start, end
def _find_sentence_indices(
self, text: str, substring_start: int, substring_end: int
) -> Tuple[int, int]:
"""
Finds the starting and ending indices of the sentence that contains the substring.
"""
sentences = re.split(r"\n|(?<=[.!?])\s+", text)
substring = text[substring_start:substring_end]
for sentence in sentences:
index = sentence.lower().find(substring.lower())
if index != -1:
start = text.index(sentence)
end = start + len(sentence)
return start, end
return substring_start, substring_end
|
MoShrank/card-generation-service
|
text/CardSourceGenerator.py
|
CardSourceGenerator.py
|
py
| 1,408
|
python
|
en
|
code
| 0
|
github-code
|
6
|
5203502596
|
# -*- coding: utf-8 -*-
"""
Spyderエディタ
これは一時的なスクリプトファイルです
"""
#WEBクローリング
import time
import re
import requests
import lxml.html
from pymongo import MongoClient
def main():
client = MongoClient('localhost', 27017)
#scrapingデータベースのebooksコレクションを作成
collection = client.scraping.ebooks
#keyフィールドとしてユニークなインデックスを設定
collection.create_index('key', unique=True)
#Webページを取得、繰り返しアクセスするためSessionを使用
response = requests.get('https://gihyo.jp/dp')
#URLリストのジェネレータを取得
urls = scrape_list_page(response)
#url_list = [str(url) for url in urls]
for url in urls:
#url = url_list[0]
#キーの取得
key = extract_key(url)
#キーが同じ最初のドキュメントを取得
ebook = collection.find_one({'key': key})
#キーが同じドキュメントが存在しない場合
if not ebook:
#各URLにアクセス
time.sleep(1)
response = requests.get(url)
#ebookコレクションのドキュメントを作成
ebook = scrape_detail_page(response)
#DBにドキュメントを追加
collection.insert_one(ebook)
print(ebook)
#WEBページ(html)を入力
def scrape_list_page(response):
#htmlをパース
root = lxml.html.fromstring(response.content)
#URLを相対パスから絶対パスに変換
root.make_links_absolute(response.url)
#id属性がlistBookの子孫で、a要素のitemprop属性がurlの値を取得(CSSセレクター)
for a in root.cssselect('#listBook a[itemprop="url"]'):
url = a.get('href')
yield url
#各書籍の情報(タイトル、価格、目次)を取得
def scrape_detail_page(response):
root = lxml.html.fromstring(response.content)
ebook = {
'url': response.url,
'key': extract_key(response.url),
'title': root.cssselect('#bookTitle')[0].text_content(),
'price': root.cssselect('.buy')[0].text,
'content': [normalize_spaces(h3.text_content()) for h3 in root.cssselect('#content > h3')],
}
return ebook
def extract_key(url):
#末尾から遡って、最初の/までの文字列を取得
m = re.search(r'([^/]+)$', url)
return m.group(1)
#任意の空白文字を取り除く
def normalize_spaces(s):
#return re.sub(r'\s+', ' ', s).strip()
return re.sub(r'\u3000+', ': ', s).strip()
if __name__ == '__main__':
main()
chk = {'a':0, 'b':1, 'c':3}
for val in chk:
chk[val] += 1
|
inamasa12/cr-sc
|
python_crowler_4.py
|
python_crowler_4.py
|
py
| 2,855
|
python
|
ja
|
code
| 0
|
github-code
|
6
|
25993011459
|
import urllib
from flask import Blueprint, request, render_template, flash, redirect, url_for
from orders_tracker.blueprints.clients.service import add_client, update_client, remove_client, search_clients, \
get_form_fields, get_path_args, \
get_clients_count, render_empty, get_pagination_metadata, paginate_clients
from orders_tracker.forms import NewClientForm, DeleteConfirmForm
from orders_tracker.models import Client, Device
from orders_tracker.tables import ClientsTable
clients_blueprint = Blueprint('clients_bp', __name__, template_folder="templates")
@clients_blueprint.route('/clients/new', methods=['GET', 'POST'])
def new_client():
form = NewClientForm()
if request.method == 'POST':
if form.validate_on_submit():
created_client = Client(form.name.data, form.phone.data, form.address.data, form.notes.data)
add_client(created_client)
return redirect(url_for('clients_bp.clients'))
else:
flash('Перевірте введені значення.', category='warning')
return render_template('new_client.html', form=form)
@clients_blueprint.route('/clients', methods=['GET', 'POST'])
def clients():
if request.method == 'POST':
search_field = get_form_fields()
return redirect(url_for('clients_bp.clients',
search_query=search_field))
search_arg, page_arg = get_path_args()
stats = {'total': get_clients_count(), 'filter': -1}
clients_query = search_clients(search_arg)
stats['filter'] = clients_query.count()
if stats['filter'] == 0:
return render_empty(stats, search_arg)
pagination_metadata = get_pagination_metadata(page_arg, clients_query)
clients_list = paginate_clients(pagination_metadata, clients_query)
table = ClientsTable(clients_list)
return render_template('clients.html',
table=table,
stats=stats,
search_field_value=search_arg,
pagination_data=pagination_metadata)
@clients_blueprint.route('/clients/<client_id>', methods=['GET', 'POST'])
def client(client_id):
address_link = None
selected_client = Client.query.filter_by(id=client_id).first_or_404()
if selected_client.address:
address_link = "https://www.google.com/maps/search/?api=1&query=" + \
urllib.parse.quote_plus(selected_client.address)
devices = Device.query.filter_by(client_id=client_id).all()
return render_template('client.html',
client=selected_client,
devices=devices,
address_link=address_link)
@clients_blueprint.route('/clients/<client_id>/edit', methods=['GET', 'POST'])
def edit_client(client_id):
edited_client = Client.query.filter_by(id=client_id).first()
modal_form = NewClientForm()
if request.method == 'POST':
if modal_form.validate_on_submit():
edited_client.name = modal_form.name.data
edited_client.phone = modal_form.phone.data
edited_client.address = modal_form.address.data
edited_client.notes = modal_form.notes.data
update_client(edited_client)
return redirect(url_for('clients_bp.client', client_id=edited_client.id))
else:
flash('Дані про клієнта не оновлено.', category='warning')
modal_form = NewClientForm(edited_client)
return render_template('edit_client.html',
form=modal_form,
message_title="Редагування інформації про клієнта",
client_id=edited_client.id,
color="is-link")
@clients_blueprint.route('/clients/<client_id>/delete', methods=['GET', 'POST'])
def delete_client(client_id):
deleted_client = Client.query.filter_by(id=client_id).first()
form = DeleteConfirmForm()
if request.method == 'POST':
if form.validate_on_submit():
remove_client(deleted_client)
return redirect(url_for('clients_bp.clients'))
return render_template('delete_confirm.html',
form=form,
client_id=deleted_client.id,
message_title="Видалення клієнта",
message="Ви дійсно бажаєте видалити клієнта " + deleted_client.name + "?")
|
1Lorde/orders-tracker
|
orders_tracker/blueprints/clients/routes.py
|
routes.py
|
py
| 4,565
|
python
|
en
|
code
| 0
|
github-code
|
6
|
8267132836
|
import logging
import os
import pytest
import yaml
from cekit.config import Config
from cekit.descriptor import Image, Overrides
from cekit.descriptor.resource import create_resource
from cekit.errors import CekitError
try:
from unittest.mock import call
except ImportError:
from mock import call
config = Config()
def setup_function(function):
config.cfg["common"] = {"work_dir": "/tmp"}
if os.path.exists("file"):
os.remove("file")
def test_repository_dir_is_constructed_properly(mocker):
mocker.patch("subprocess.run")
mocker.patch("os.path.isdir", ret="True")
mocker.patch("cekit.descriptor.resource.Chdir", autospec=True)
res = create_resource(
{"git": {"url": "http://host.com/url/repo.git", "ref": "ref"}}
)
assert res.copy("dir") == "dir/repo"
def test_repository_dir_uses_name_if_defined(mocker):
mocker.patch("subprocess.run")
mocker.patch("os.path.isdir", ret="True")
mocker.patch("cekit.descriptor.resource.Chdir", autospec=True)
res = create_resource(
{
"name": "some-id",
"git": {"url": "http://host.com/url/repo.git", "ref": "ref"},
}
)
assert res.copy("dir") == "dir/some-id"
def test_repository_dir_uses_target_if_defined(mocker):
mocker.patch("subprocess.run")
mocker.patch("os.path.isdir", ret="True")
mocker.patch("cekit.descriptor.resource.Chdir", autospec=True)
res = create_resource(
{
"target": "some-name",
"git": {"url": "http://host.com/url/repo.git", "ref": "ref"},
}
)
assert res.copy("dir") == "dir/some-name"
def test_git_clone(mocker):
mock = mocker.patch("subprocess.run")
mocker.patch("os.path.isdir", ret="True")
mocker.patch("cekit.descriptor.resource.Chdir", autospec=True)
res = create_resource(
{"git": {"url": "http://host.com/url/path.git", "ref": "ref"}}
)
res.copy("dir")
mock.assert_has_calls(
[
call(
["git", "clone", "http://host.com/url/path.git", "dir/path"],
stdout=None,
stderr=None,
check=True,
universal_newlines=True,
),
call(
["git", "checkout", "ref"],
stdout=None,
stderr=None,
check=True,
universal_newlines=True,
),
],
any_order=True,
)
def get_res(mocker):
res = mocker.Mock()
res.status_code = 200
res.iter_content = lambda chunk_size: [b"test"]
return res
def get_ctx(mocker):
ctx = mocker.Mock()
ctx.check_hostname = True
ctx.verify_mode = 1
return ctx
def get_mock_urlopen(mocker):
return mocker.patch("cekit.tools.urlopen", return_value=get_res(mocker))
def get_mock_ssl(mocker, ctx):
return mocker.patch("cekit.tools.ssl.create_default_context", return_value=ctx)
def test_fetching_with_ssl_verify(mocker):
config.cfg["common"]["ssl_verify"] = True
ctx = get_ctx(mocker)
get_mock_ssl(mocker, ctx)
mock_urlopen = get_mock_urlopen(mocker)
res = create_resource({"name": "file", "url": "https:///dummy"})
try:
res.copy()
except Exception:
pass
mock_urlopen.assert_called_with("https:///dummy", context=ctx)
assert ctx.check_hostname is True
assert ctx.verify_mode == 1
def test_fetching_disable_ssl_verify(mocker):
config.cfg["common"]["ssl_verify"] = False
mock_urlopen = get_mock_urlopen(mocker)
ctx = get_ctx(mocker)
get_mock_ssl(mocker, ctx)
res = create_resource({"name": "file", "url": "https:///dummy"})
try:
res.copy()
except Exception:
pass
mock_urlopen.assert_called_with("https:///dummy", context=ctx)
assert ctx.check_hostname is False
assert ctx.verify_mode == 0
def test_fetching_bad_status_code():
res = create_resource({"name": "file", "url": "http:///dummy"})
with pytest.raises(CekitError):
res.copy()
def test_fetching_file_exists_but_used_as_is(mocker):
"""
It should not download the file, because we didn't
specify any hash algorithm, so integrity checking is
implicitly disabled here.
"""
with open("file", "w") as f: # noqa: F841
pass
mock_urlopen = get_mock_urlopen(mocker)
res = create_resource(
{
"name": "file",
"url": "http:///dummy",
"md5": "d41d8cd98f00b204e9800998ecf8427e",
}
)
res.copy()
mock_urlopen.assert_not_called()
def test_fetching_file_exists_fetched_again(mocker):
"""
It should download the file again, because available
file locally doesn't match checksum.
"""
mock_urlopen = get_mock_urlopen(mocker)
ctx = get_ctx(mocker)
get_mock_ssl(mocker, ctx)
with open("file", "w") as f: # noqa: F841
pass
res = create_resource({"name": "file", "url": "http:///dummy", "md5": "123456"})
with pytest.raises(CekitError):
# Checksum will fail, because the "downloaded" file
# will not have md5 equal to 123456. We need investigate
# mocking of requests get calls to do it properly
res.copy()
mock_urlopen.assert_called_with("http:///dummy", context=ctx)
def test_fetching_file_exists_no_hash_fetched_again(mocker):
"""
It should download the file again, because available
file locally doesn't match checksum.
"""
mock_urlopen = get_mock_urlopen(mocker)
ctx = get_ctx(mocker)
get_mock_ssl(mocker, ctx)
with open("file", "w") as f: # noqa: F841
pass
res = create_resource({"name": "file", "url": "http:///dummy"})
with pytest.raises(CekitError):
# url is not valid so we get error, but we are not interested
# in it. We just need to check that we attempted to downlad.
res.copy()
mock_urlopen.assert_called_with("http:///dummy", context=ctx)
def test_generated_url_without_cacher():
res = create_resource({"url": "url"})
assert res._Resource__substitute_cache_url("url") == "url"
def test_resource_verify(mocker):
mock = mocker.patch("cekit.descriptor.resource.check_sum")
res = create_resource({"url": "dummy", "sha256": "justamocksum"})
res._Resource__verify("dummy")
mock.assert_called_with("dummy", "sha256", "justamocksum")
def test_generated_url_with_cacher():
config.cfg["common"]["cache_url"] = "#filename#,#algorithm#,#hash#"
res = create_resource({"url": "dummy", "sha256": "justamocksum"})
res.name = "file"
assert res._Resource__substitute_cache_url("file") == "file,sha256,justamocksum"
def test_path_resource_absolute():
res = create_resource({"name": "foo", "path": "/bar"}, directory="/foo")
assert res.path == "/bar"
def test_path_resource_relative():
res = create_resource({"name": "foo", "path": "bar"}, directory="/foo")
assert res.path == "/foo/bar"
def test_path_local_existing_resource_no_cacher_use(mocker):
config.cfg["common"]["cache_url"] = "#filename#,#algorithm#,#hash#"
mocker.patch("os.path.exists", return_value=True)
shutil_mock = mocker.patch("shutil.copy2")
res = create_resource({"name": "foo", "path": "bar"}, directory="/foo")
mocker.spy(res, "_download_file")
res.guarded_copy("target")
shutil_mock.assert_called_with("/foo/bar", "target")
assert res._download_file.call_count == 0
def test_path_local_non_existing_resource_with_cacher_use(mocker):
config.cfg["common"]["cache_url"] = "#filename#,#algorithm#,#hash#"
mocker.patch("os.path.exists", return_value=False)
mocker.patch("os.makedirs")
res = create_resource({"name": "foo", "path": "bar"}, directory="/foo")
mocker.spy(res, "_download_file")
download_file_mock = mocker.patch.object(res, "_download_file")
res.guarded_copy("target")
download_file_mock.assert_called_with("/foo/bar", "target")
def test_url_resource_download_cleanup_after_failure(mocker, tmpdir, caplog):
caplog.set_level(logging.DEBUG, logger="cekit")
mocker.patch("os.path.exists", return_value=False)
mocker.patch("os.makedirs")
os_remove_mock = mocker.patch("os.remove")
urlopen_class_mock = mocker.patch("cekit.tools.urlopen")
urlopen_mock = urlopen_class_mock.return_value
urlopen_mock.getcode.return_value = 200
urlopen_mock.read.side_effect = Exception
res = create_resource({"url": "http://server.org/dummy", "sha256": "justamocksum"})
targetfile = os.path.join(str(tmpdir), "targetfile")
with pytest.raises(CekitError) as excinfo:
res.guarded_copy(targetfile)
assert "Error copying resource: 'dummy'. See logs for more info" in str(
excinfo.value
)
assert (
"Removing incompletely downloaded '{}' file".format(targetfile) in caplog.text
)
urlopen_class_mock.assert_called_with("http://server.org/dummy", context=mocker.ANY)
os_remove_mock.assert_called_with(targetfile)
def test_copy_plain_resource_with_cacher(mocker, tmpdir):
config.cfg["common"]["cache_url"] = "#filename#,#algorithm#,#hash#"
config.cfg["common"]["work_dir"] = str(tmpdir)
urlopen_class_mock = mocker.patch("cekit.tools.urlopen")
mock_urlopen = urlopen_class_mock.return_value
mock_urlopen.getcode.return_value = 200
mock_urlopen.read.side_effect = [b"one", b"two", None]
ctx = get_ctx(mocker)
get_mock_ssl(mocker, ctx)
with open("file", "w") as f: # noqa: F841
pass
res = create_resource({"name": "foo", "md5": "5b9164ad6f496d9dee12ec7634ce253f"})
substitute_cache_url_mock = mocker.patch.object(
res, "_Resource__substitute_cache_url", return_value="http://cache/abc"
)
res.copy(str(tmpdir))
substitute_cache_url_mock.assert_called_once_with(None)
urlopen_class_mock.assert_called_with("http://cache/abc", context=ctx)
def test_copy_plain_resource_from_brew(mocker, tmpdir):
config.cfg["common"]["work_dir"] = str(tmpdir)
config.cfg["common"]["redhat"] = True
urlopen_class_mock = mocker.patch("cekit.tools.urlopen")
mock_urlopen = urlopen_class_mock.return_value
mock_urlopen.getcode.return_value = 200
mock_urlopen.read.side_effect = [b"one", b"two", None]
ctx = get_ctx(mocker)
get_mock_ssl(mocker, ctx)
with open("file", "w") as f: # noqa: F841
pass
res = create_resource({"name": "foo", "md5": "5b9164ad6f496d9dee12ec7634ce253f"})
mocker.spy(res, "_Resource__substitute_cache_url")
mock_get_brew_url = mocker.patch(
"cekit.descriptor.resource.get_brew_url", return_value="http://cache/abc"
)
res.copy(str(tmpdir))
mock_get_brew_url.assert_called_once_with("5b9164ad6f496d9dee12ec7634ce253f")
assert res._Resource__substitute_cache_url.call_count == 0
urlopen_class_mock.assert_called_with("http://cache/abc", context=ctx)
def test_override_resource_remove_chksum():
image = Image(
yaml.safe_load(
"""
from: foo
name: test/foo
version: 1.9
artifacts:
- name: abs
path: /tmp/abs
md5: 'foo'
sha1: 'foo'
sha256: 'foo'
sha512: 'foo'
"""
),
"foo",
)
overrides = Overrides(
yaml.safe_load(
"""
artifacts:
- name: abs
path: /tmp/over
"""
),
"foo",
)
overrides.merge(image)
assert overrides["from"] == "foo"
assert overrides["artifacts"][0]["path"] == "/tmp/over"
assert "md5" not in overrides["artifacts"][0]
assert "sha1" not in overrides["artifacts"][0]
assert "sha256" not in overrides["artifacts"][0]
assert "sha512" not in overrides["artifacts"][0]
|
cekit/cekit
|
tests/test_unit_resource.py
|
test_unit_resource.py
|
py
| 11,760
|
python
|
en
|
code
| 70
|
github-code
|
6
|
6343086075
|
from .db import add_prefix_for_prod, db, environment, SCHEMA
from sqlalchemy.sql import func
community_users = db.Table(
"community_users",
db.Model.metadata,
db.Column("user_id", db.ForeignKey(
add_prefix_for_prod("users.id")), primary_key=True),
db.Column("business_id", db.ForeignKey(
add_prefix_for_prod("communities.id")), primary_key=True)
)
if environment == "production":
community_users.schema = SCHEMA
|
marcsmithr/Reddit-Clone
|
app/models/join_tables.py
|
join_tables.py
|
py
| 450
|
python
|
en
|
code
| 0
|
github-code
|
6
|
7538481658
|
# You are given a list of integers. Write a Python function that finds and returns the largest element in the list.
# The integers in the list may not be sorted.
# You can assume that the list will not be empty.
def find_largest_element(input_list):
positional_var = 0
num = input_list[0]
while True:
positional_var += 1
if input_list[positional_var] > num:
num = input_list[positional_var]
if positional_var == len(input_list) - 1:
print("The largest element in the list is:", num)
return num
find_largest_element([5, 7, 2, 4, 1, 9, 4, 12, 4, 3, 11])
find_largest_element([15, 13, 14, 5, 8, 3])
# Optimum Solution
def findLargestElement(inputList):
largestElement = -1
for i in inputList:
if(i>largestElement):
largestElement=i
return largestElement
print(findLargestElement([12,5,1,78,32,122]))
|
Shaunc99/Python
|
arrays/LargestElement.py
|
LargestElement.py
|
py
| 908
|
python
|
en
|
code
| 2
|
github-code
|
6
|
7965704838
|
from pathlib import Path
from promtail_ops_manager import PromtailOpsManager
# The promtail release file.
resource = "./promtail.zip"
manager = PromtailOpsManager()
# manager.install(resource)
# Setup for local tests such that installation of binaries etc.
# will not mess up your local client.
manager.promtail_home = Path('/tmp/promtail')
manager.promtail = Path('/tmp/promtail/promtail-linux-amd64')
manager.promtail_cfg = manager.promtail_home.joinpath('promtail-local-config.yaml')
manager.promtail_unitfile = Path('/tmp/promtail.service')
# Run tests.
manager._prepareOS()
manager._install_from_resource(resource)
manager._install_config()
manager._install_systemd_unitfile()
if manager.verify_config():
print("Config OK")
else:
print("Config is error")
print("Version:", manager.promtail_version() )
# manager._purge()
|
erik78se/promtail-vm-operator
|
tests/testlib.py
|
testlib.py
|
py
| 839
|
python
|
en
|
code
| 0
|
github-code
|
6
|
4203368667
|
# Method to find all the legitimate words
def get_legimate_words(letters, sowpods):
legitimate_words = []
# Iterate on each word of the dictionary
for word in sowpods:
# Set the flag as True
is_word_legitimate = True
# Iterate on each character of word
for character in word:
# If character not in letters break the loop
if character not in letters:
is_word_legitimate = False
break
# Append the word to output list
if is_word_legitimate:
legitimate_words.append(word)
# Return
return legitimate_words
sowpods = [
'FAST',
'FEST',
'PEST',
'PAST',
'PAT',
'FAD',
'FEAST',
'DIRT'
]
letters = ['A', 'P', 'F', 'D', 'S', 'T', 'E']
print(get_legimate_words(letters, sowpods))
|
Adnation/sowpods
|
yoptima.py
|
yoptima.py
|
py
| 841
|
python
|
en
|
code
| 0
|
github-code
|
6
|
24037873801
|
import os.path
homedir = os.path.expanduser("~")
class Config:
bindsym_dict = {}
set_dict = {}
exec_list = []
exec_always_list = []
def get_i3_config():
i3_config_file = open(homedir + "/.config/i3/config", "r")
config = Config()
for line in i3_config_file:
line = line.strip()
phrases = line.split(sep=" ")
if phrases[0].strip() == "bindsym":
config.bindsym_dict[phrases[1]] = " ".join(phrases[2:])
elif phrases[0].strip() == "set":
config.set_dict[phrases[1]] = " ".join(phrases[2])
elif phrases[0].strip() == "exec":
if phrases[1] == "--no-startup-id":
config.exec_list.append((" ".join(phrases[2:]), True))
else:
config.exec_list.append((" ".join(phrases[1:]), False))
elif phrases[0] == "exec_always":
if phrases[1] == "--no-startup-id":
config.exec_always_list.append((" ".join(phrases[2:]), True))
else:
config.exec_always_list.append((" ".join(phrases[1:]), False))
i3_config_file.close()
return config
|
flyingcakes85/i3wm-config-gui
|
config_parser.py
|
config_parser.py
|
py
| 1,139
|
python
|
en
|
code
| 1
|
github-code
|
6
|
18155298342
|
import customtkinter as ctk
from PIL import Image
root = ctk.CTk()
root.title("IRIS")
root.geometry("1080x720")
root._set_appearance_mode("dark")
frame = ctk.CTkFrame(master=root)
frame.pack(pady=20)
logo = ctk.CTkImage(Image.open(
"/home/nabendu/Documents/MCA/projects/python-speechRecongition-desktop-AI-project/main/img/walle.png"), size=(200, 180))
label = ctk.CTkLabel(frame, image=logo, text="")
label.grid(row=0, column=0, pady=0, padx=0)
aiTextBox = ctk.CTkTextbox(master=frame, height=100, width=500)
aiTextBox.grid(row=0, column=1, pady=10, padx=50)
frame2 = ctk.CTkFrame(master=root)
frame2.pack(pady=10)
userTextBox = ctk.CTkTextbox(master=frame2, height=50, width=500)
userTextBox.grid(row=0, column=0, padx=30, pady=10)
command = ctk.CTkButton(master=frame2, text="Enter Command",
height=50)
command.grid(row=0, column=1, padx=50, pady=10)
root.mainloop()
|
Nandy1002/python-speechRecongition-desktop-AI-project
|
main/gui.py
|
gui.py
|
py
| 906
|
python
|
en
|
code
| 0
|
github-code
|
6
|
71040814269
|
import pandas as pd
n = 6
res = [[] for _ in range(0, 105, 5)]
def checkLine(boardline, cons):
realCons = []
cnt = 0
for i in boardline:
if i==0:
if cnt!=0:
realCons.append(cnt)
cnt = 0
else:
cnt += 1
if cnt!=0:
realCons.append(cnt)
if len(realCons)==0:
realCons.append(0)
return realCons == cons
def checkBoard(board, constraints):
rightRes = 0
for k in range(n):
newLine = [board[k][l] for l in range(n)]
if checkLine(newLine, constraints[k]):
rightRes += 1
for k in range(n):
newLine = [board[l][k] for l in range(n)]
if checkLine(newLine, constraints[k + n]):
rightRes += 1
return rightRes
for i in range(0, 105, 5):
bpsumsim = 0
bptime = 0
bpsumright = 0
totalsumright = 0
for j in range(100):
f = open("Testcases/Test_6_"+str(i)+"/Test_"+str(j)+".txt", 'r')
lines = f.readlines()
f.close()
if len(lines)==0:
print("???")
continue
constraints = [list(map(int, lines[k].split())) for k in range(1, n*2+1)]
ans = [list(map(int, lines[k+13].split())) for k in range(n)]
f = open("Results/Result_6_"+str(i)+"/Result_"+str(j)+".txt", 'r')
lines = f.readlines()
f.close()
if len(lines)==0:
print("???")
continue
bpans = [list(map(int, lines[k+n*2+1].split())) for k in range(n)]
bpsim = 0
for k in range(n):
for l in range(n):
if ans[k][l] == bpans[k][l]:
bpsim+=1
bpsumsim += bpsim/n/n
totalsumright += checkBoard(ans, constraints)/(2*n)
bpsumright += checkBoard(bpans, constraints)/(2*n)
bpsumsim /= 100
res[i//5].append(bpsumsim)
res[i // 5 ].append(bpsumright/100)
print(bpsumsim, bpsumright/100)
df = pd.DataFrame(res, columns=['sim', 'right'])
df.to_csv("BPResult8.csv")
# write_wb = Workbook()
# write_ws = write_wb.create_sheet('Result')
# for i in range(n):
# for j in range(len(res[0])):
# write_ws.cell(j+2, i+5, res[i][j])
# write_wb.save("Graph_15.xlsx")
|
ilesejin/ECSR_Nonogram
|
NonogramGrapher.py
|
NonogramGrapher.py
|
py
| 2,231
|
python
|
en
|
code
| 0
|
github-code
|
6
|
3777146121
|
from django.shortcuts import render
from cowsay_app.models import Input
from cowsay_app.forms import InputForm
import subprocess
# I mainly used this source to figure out subprocess:
# https://linuxhint.com/execute_shell_python_subprocess_run_method/
# I also used Stackoverflow and Python docs
# Also found some useful stuff on Stackoverflow for doing the history:
# https://stackoverflow.com/questions/47428403/how-to-get-the-last-10-item-data-in-django
def index(request):
if request.method == "POST":
new_input = InputForm()
form = InputForm(request.POST)
if form.is_valid():
data = form.cleaned_data
Input.objects.create(
input=data.get('input')
)
cow = subprocess.run(
['cowsay', data['input']], capture_output=True
).stdout.decode("utf-8")
return render(request, "index.html", {'form': new_input, 'cow': cow})
form = InputForm()
return render(request, "index.html", {"title": "Welcome to Cowsay!", "form": form})
def history(request):
cowsay_history = Input.objects.order_by('-id')[:10]
return render(request, 'history.html', {'cowsay_history': cowsay_history})
|
pokeyjess/cowsay
|
cowsay_app/views.py
|
views.py
|
py
| 1,247
|
python
|
en
|
code
| 0
|
github-code
|
6
|
6191154878
|
#! /usr/bin/env python
"""
Compute the transmission and reflection probabilities of a
particle with a given mass and energy encountering a potential step.
Leon Hostetler, Feb. 14, 2017
USAGE: python quantum_step.py
"""
from __future__ import division, print_function
# Constants
m = 9.11e-31 # Mass of particle (kg)
eV = 1.6022e-19 # Convert eV to Joules
E = 10.0*eV # Energy of incoming particle (J)
V = 9.0*eV # Height of potential step (J)
h = 1.0546e-34 # h-bar (m^2 kg / s)
# Calculations
k1 = (2*m*E)**(1/2)/h
k2 = ((2*m*(E-V))**(1/2))/h
T = (4*k1*k2)/(k1+k2)**2 # Transmission probability
R = ((k1-k2)/(k1+k2))**2 # Reflection probability
# Print results
print("The transmission probability is ", "{0:.2f}".format(T), ".", sep="")
print("The reflection probability is ", "{0:.2f}".format(R), ".", sep="")
print("As a check, the total probability is the sum of the two: ", "{0:.2f}".format(T+R), ".", sep="")
|
leonhostetler/undergrad-projects
|
computational-physics/01_basic_calculations/quantum_step.py
|
quantum_step.py
|
py
| 960
|
python
|
en
|
code
| 0
|
github-code
|
6
|
16675031691
|
import os
import syslog
import time
import traceback
import support.cmd_exe
from vmlib import fwprint
gips_connects = {}
gips_state = {}
def pause_vm(uuid):
cmd = '/usr/bin/python /usr/vmd/glusterfs/connect_serial0.py /var/run/%s/monit.sock stop' % (uuid)
fwprint( cmd)
os.system(cmd)
def cont_vm(uuid):
cmd = '/usr/bin/python /usr/vmd/glusterfs/connect_serial0.py /var/run/%s/monit.sock cont' % (uuid)
fwprint( cmd)
os.system(cmd)
def pause_vms():
cmd = '''find /var/run -name 'monit.sock' '''
output = support.cmd_exe.cmd_exe(cmd)
if not output[0]:
return
lines = output[1]['stdout']
for line in lines:
line = line.strip()
if len(line)!= 40:
continue
uuid = line[9:-11]
pause_vm(uuid)
def cont_vms():
cmd = '''find /var/run -name 'monit.sock' '''
output = support.cmd_exe.cmd_exe(cmd)
if not output[0]:
return
lines = output[1]['stdout']
for line in lines:
line = line.strip()
if len(line)!= 40:
continue
uuid = line[9:-11]
cont_vm(uuid)
def update_route_connect(host_ip, flag):
fwprint( 'route rrrr state '+str(host_ip)+' '+str(flag))
if host_ip not in gips_connects:
gips_connects[host_ip] = True
if flag != gips_connects.get(host_ip):
gips_connects[host_ip] = flag
fwprint( 'route rrrr state change')
if flag:
gips_state[host_ip] = 'route starting'
cont_vms()
else:
gips_state[host_ip] = 'route downing'
pause_vms()
return
def route_connect(host_ip):
num = 0
while True:
num = num + 1
cmd = "ping %s -c 1 -W 1 > /dev/null" % host_ip
if 0 == os.system(cmd):
update_route_connect(host_ip, True)
break
time.sleep(2)
if num>=2:
break
if num >=2:
update_route_connect(host_ip,False)
return True
def loop_route_state(host_ip):
while True:
try:
route_connect(host_ip)
except:
syslog.syslog(syslog.LOG_ERR,'loop_route_state: '+str(traceback.format_exc()))
time.sleep(3)
|
sun7shines/GlusterFS
|
glusterfs/vm_route.py
|
vm_route.py
|
py
| 2,215
|
python
|
en
|
code
| 0
|
github-code
|
6
|
73644652346
|
import structure.concrete.类型 as type
'''
部分系数
'''
def 外形系数(t: type.钢筋种类) -> float:
s = type.钢筋种类
switch = {
s.带勾光面钢筋 : 0.16,
s.带肋钢筋 : 0.14,
s.螺旋肋钢丝 : 0.13,
s.三股钢绞线 : 0.16,
s.七股钢绞线 : 0.17
}
return switch[t]
|
TheVeryDarkness/structure
|
concrete/附录.py
|
附录.py
|
py
| 359
|
python
|
zh
|
code
| 0
|
github-code
|
6
|
38899572282
|
import pygame
import time
import random
pygame.init()
pygame.font.init()
myfont = pygame.font.SysFont('Comic Sans MS', 30)
screen = pygame.display.set_mode((1280,720))
done = False
p1_x=30
p1_y= screen.get_height()-60
#make player
class Player:
def __init__(self,x,y):
self.x=x
self.y=y
def moveLeft(self):
if self.x>0:
self.x-=2
def moveRight(self):
if self.x<screen.get_width()-60:
self.x+=2
def draw(self):
pygame.draw.rect(screen, (255,255,255), pygame.Rect(self.x,self.y,60,60))
class Egg:
def __init__(self):
self.x=random.randint(0,screen.get_width()-30)
self.y=0
self.incr=1
def update(self):
if self.y==screen.get_height()-30:
self.__init__()
self.y+=self.incr
self.incr*=1.1
def draw(self):
pygame.draw.rect(screen, (255,255,255), pygame.Rect(self.x,self.y,30,30))
p1 = Player(p1_x,p1_y)
egg1=Egg()
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
pressed=pygame.key.get_pressed()
#movement
if pressed[pygame.K_a] : p1.moveLeft()
if pressed[pygame.K_d] : p1.moveRight()
screen.fill((0,0,0))
#screen.blit(score, ((screen.get_width()/2)-20,0))
p1.draw()
egg1.draw()
egg1.update()
pygame.display.flip()
|
mahi-pas/Egg-Catcher
|
catcher.py
|
catcher.py
|
py
| 1,381
|
python
|
en
|
code
| 0
|
github-code
|
6
|
21397154599
|
import os
import backoff
import pytest
from racetrack_commons.dir import project_root
from racetrack_client.client.deploy import send_deploy_request
from racetrack_client.client_config.auth import set_user_auth
from racetrack_client.client_config.client_config import ClientConfig
from racetrack_client.utils.request import Requests, ResponseError
from racetrack_client.utils.auth import RT_AUTH_HEADER, is_auth_required
from racetrack_commons.entities.dto import EscDto
from racetrack_commons.entities.esc_client import EscRegistryClient
from racetrack_commons.entities.job_client import JobRegistryClient
from e2e.utils import ADMIN_AUTH_TOKEN, INTERNAL_AUTH_TOKEN, PYTHON_PLUGIN_VERSION, _configure_env, _create_esc, _delete_workload, _wait_for_components, _install_plugin
TEST_SUITE = os.getenv('TEST_SUITE')
suite_auth = pytest.mark.skipif(
TEST_SUITE != 'auth' and TEST_SUITE != 'full', reason='TEST_SUITE value != auth,full'
)
@suite_auth
def test_deploy_job_chain():
_configure_env()
_wait_for_components()
_install_plugin(f'github.com/TheRacetrack/plugin-python-job-type=={PYTHON_PLUGIN_VERSION}')
esc = _create_esc()
_delete_workload('adder')
_deploy_and_verify('sample/python-class', 'adder', esc)
_verify_deployed_job_adder_response('adder', ADMIN_AUTH_TOKEN)
_delete_workload('python-chain')
_deploy_and_verify('sample/python-chain', 'python-chain', esc)
_make_wrongly_authenticated_request('adder')
@suite_auth
def test_deploy_unauthenticated():
_configure_env()
_wait_for_components()
_install_plugin(f'github.com/TheRacetrack/plugin-python-job-type=={PYTHON_PLUGIN_VERSION}')
lifecycle_url = os.environ['LIFECYCLE_URL']
expect_fail = is_auth_required(lifecycle_url)
sample_path = 'sample/python-class'
print(f'Deploying unauthenticated {sample_path} job...')
workdir = str(project_root() / sample_path)
config = ClientConfig()
set_user_auth(config, lifecycle_url, 'invalid')
if expect_fail:
with pytest.raises(ResponseError):
send_deploy_request(workdir, lifecycle_url=lifecycle_url, client_config=config, force=True)
else:
send_deploy_request(workdir, lifecycle_url=lifecycle_url, client_config=config, force=True)
@suite_auth
def test_deploy_wrong_authentication():
_configure_env()
_wait_for_components()
_install_plugin(f'github.com/TheRacetrack/plugin-python-job-type=={PYTHON_PLUGIN_VERSION}')
lifecycle_url = os.environ['LIFECYCLE_URL']
sample_path = 'sample/python-class'
print(f'Deploying with wrong authentication {sample_path} job...')
expect_fail = is_auth_required(lifecycle_url)
workdir = str(project_root() / sample_path)
config = ClientConfig()
# wrong token
user_auth = "eyJ1c2VybmFtZSI6ICJmb28iLCAidG9rZW4iOiAiOGJjMDkzMGEtNTA2Mi00MWFiLWE4MWQtNDVhNjg0OWIyYjg4In1="
set_user_auth(config, lifecycle_url, user_auth)
if expect_fail:
with pytest.raises(ResponseError):
send_deploy_request(workdir, lifecycle_url=lifecycle_url, client_config=config, force=True)
else:
send_deploy_request(workdir, lifecycle_url=lifecycle_url, client_config=config, force=True)
def _deploy(sample_path: str):
lifecycle_url = os.environ['LIFECYCLE_URL']
config = ClientConfig()
set_user_auth(config, lifecycle_url, ADMIN_AUTH_TOKEN)
print(f'Deploying {sample_path} job...')
workdir = str(project_root() / sample_path)
send_deploy_request(workdir, lifecycle_url=lifecycle_url, client_config=config, force=True)
def _deploy_and_verify(sample_path: str, job_name: str, esc: EscDto):
_deploy(sample_path)
print(f'Allowing a job {job_name} to ESC...')
erc = EscRegistryClient(auth_token=INTERNAL_AUTH_TOKEN)
erc.esc_allow_job(esc_id=esc.id, job_name=job_name)
esc_token = erc.get_esc_auth_token(esc.id)
if job_name == 'adder':
_verify_deployed_job_adder_response(job_name, esc_token)
elif job_name == 'python-chain':
frc = JobRegistryClient(auth_token=INTERNAL_AUTH_TOKEN)
frc.job_allow_job('python-chain', 'adder')
_verify_deployed_job_chain_adder_reponse(job_name, esc_token)
_verify_job_logs(job_name, ADMIN_AUTH_TOKEN)
@backoff.on_exception(backoff.fibo, AssertionError, max_value=3, max_time=60, jitter=None)
def _verify_deployed_job_adder_response(job_name: str, auth_token: str):
print(f'Verifying {job_name} job response...')
pub_url = os.environ['PUB_URL']
url = f'{pub_url}/job/{job_name}/latest/api/v1/perform'
headers = {RT_AUTH_HEADER: auth_token}
r = Requests.post(url, json={'numbers': [40, 2]}, headers=headers)
assert r.ok, f'Job response: {r.status_code} {r.status_reason} for url {r.url}, content: {str(r.content)}'
output = r.json()
assert output == 42, 'Unexpected output returned by Job'
@backoff.on_exception(backoff.fibo, AssertionError, max_value=3, max_time=30, jitter=None)
def _verify_deployed_job_chain_adder_reponse(job_name: str, auth_token: str):
print(f'Verifying {job_name} job response...')
pub_url = os.environ['PUB_URL']
url = f'{pub_url}/job/{job_name}/latest/api/v1/perform'
r = Requests.post(url, json={'numbers': [40, 2.7]}, headers={RT_AUTH_HEADER: auth_token})
assert r.ok, f'Job response: {r.status_code} {r.status_reason} for url {r.url}, content: {str(r.content)}'
output = r.json()
assert output == 43, 'Unexpected output returned by Job'
@backoff.on_exception(backoff.fibo, ResponseError, max_value=3, max_time=60, jitter=None)
def _verify_job_logs(job_name: str, user_auth: str):
print(f'Verifying {job_name} logs...')
frc = JobRegistryClient(auth_token=user_auth)
logs = frc.get_runtime_logs(job_name, 'latest')
assert len(logs) > 1, 'Unexpected short log from Job'
def _make_wrongly_authenticated_request(job_name: str):
print(f'Verifying requests without authentication to {job_name}...')
pub_url = os.environ['PUB_URL']
url = f'{pub_url}/job/{job_name}/latest/api/v1/perform'
lifecycle_url = os.environ['LIFECYCLE_URL']
auth_required = is_auth_required(lifecycle_url)
# wrong auth token value
r = Requests.post(url, json={'numbers': [40, 2]}, headers={RT_AUTH_HEADER: 'MrNobody'})
if auth_required:
assert r.status_code == 401
else:
assert r.status_code == 200
# lack of auth token
r = Requests.post(url, json={'numbers': [40, 2]}, headers={})
if auth_required:
assert r.status_code == 401
else:
assert r.status_code == 200
|
TheRacetrack/racetrack
|
tests/e2e/test_auth.py
|
test_auth.py
|
py
| 6,570
|
python
|
en
|
code
| 27
|
github-code
|
6
|
40323903072
|
# LOOP WHILE
# Estrutura de repetição que permite executar um bloco de códico, enquanto a condição for verdadeira
# Sintaxe;
# while (condição):
# bloco de códico.
#
# ex:
#controle = ""
#while (controle != "s"):
# print("a.Pagar")
# print("b.Receber")
# print("c.Transferir")
# print("s.Sair")
# controle = input('Digite a opção desejada: ')
#print('Atividade Encerrada')
#WHILE COM BREAK
# A declaração break termina o loop atual e consegue a execução na próxima declaração após o loop.
# O usos mais comum é quando alguma condição externa é disparada e requer saída imediata do loop.
# O comando brack pode ser usado tanto em loops while quanto em lopps for.
cont = 20
while (cont > 0):
print(f"o valor da variável é igual a {cont}")
cont -= 1
if (cont == 11):
break
print('Loop interrompido no valor 11')
|
Herley25/algoritmo_python
|
While.py
|
While.py
|
py
| 887
|
python
|
pt
|
code
| 0
|
github-code
|
6
|
15864287326
|
import pandas as pd
import matplotlib.pyplot as plt
# Set up the output screen
plt.style.use(style='ggplot')
plt.rcParams['figure.figsize'] = [20, 12]
# Read dataset
trainData = pd.read_csv('./train.csv')
# With outliers
plt.scatter(trainData.GarageArea, trainData.SalePrice, color='red')
plt.xlabel('Garage Area')
plt.ylabel('Sale Price')
plt.show()
# Delete the outlier value of GarageArea
outlier_drop = trainData[(trainData.GarageArea < 999) & (trainData.GarageArea > 111)]
# Display the scatter plot of GarageArea and SalePrice after deleting
plt.scatter(outlier_drop.GarageArea, outlier_drop.SalePrice, color='green')
plt.xlabel('Garage Area')
plt.ylabel('Sale Price')
plt.show()
|
nikolozdz/Linear-Regression-Models-ICP5
|
Task 1.py
|
Task 1.py
|
py
| 690
|
python
|
en
|
code
| 0
|
github-code
|
6
|
42479620713
|
"""Collection of common layers."""
import tensorflow as tf
class Layers(object):
"""Collection of computational NN layers."""
@staticmethod
def linear(prev_layer, out_dim, name="linear"):
"""Create a linear fully-connected layer.
Parameters
----------
prev_layer : tf.Tensor
Last layer's output tensor.
out_dim : int
Number of output units.
Returns
-------
tuple (
tf.Tensor : Linear output tensor
tf.Tensor : Linear weights variable
tf.Tensor : Linear biases variable
)
"""
with tf.name_scope(name):
in_dim = prev_layer.get_shape()[1].value
W = tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=0.1))
b = tf.Variable(tf.constant(0.1, shape=[out_dim]))
out = tf.add(tf.matmul(prev_layer, W), b)
return (out, W, b)
@staticmethod
def regularization(variables, regtype, regcoef, name="regularization"):
"""Compute the regularization tensor.
Parameters
----------
variables : list of tf.Variable
List of model variables.
regtype : str
Type of regularization. Can be ["none", "l1", "l2"]
regcoef : float,
Regularization coefficient.
name : str, optional (default = "regularization")
Name for the regularization op.
Returns
-------
tf.Tensor : Regularization tensor.
"""
with tf.name_scope(name):
if regtype != 'none':
regs = tf.constant(0.0)
for v in variables:
if regtype == 'l2':
regs = tf.add(regs, tf.nn.l2_loss(v))
elif regtype == 'l1':
regs = tf.add(regs, tf.reduce_sum(tf.abs(v)))
return tf.multiply(regcoef, regs)
else:
return None
class Evaluation(object):
"""Collection of evaluation methods."""
@staticmethod
def accuracy(mod_y, ref_y, summary=True, name="accuracy"):
"""Accuracy computation op.
Parameters
----------
mod_y : tf.Tensor
Model output tensor.
ref_y : tf.Tensor
Reference input tensor.
summary : bool, optional (default = True)
Whether to save tf summary for the op.
Returns
-------
tf.Tensor : accuracy op. tensor
"""
with tf.name_scope(name):
mod_pred = tf.argmax(mod_y, 1)
correct_pred = tf.equal(mod_pred, tf.argmax(ref_y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
if summary:
tf.summary.scalar('accuracy', accuracy)
return accuracy
|
gabrieleangeletti/Deep-Learning-TensorFlow
|
yadlt/core/layers.py
|
layers.py
|
py
| 2,874
|
python
|
en
|
code
| 965
|
github-code
|
6
|
6106005547
|
"""
Usage:
python parser.py <filename>
example:
python parser.py 2012_12_15.txt
"""
class FileReader(object):
def __init__(self):
self.count = 0
def process(self, filename):
"""
This a filereader
Arguments:
filename: Name of the file that we are reading
fieldname: Column name that we are making unique
Returns a unique set of value specified by fieldname
"""
self.count = self.count + 1
uniques = set()
for line_no, line in enumerate(open(filename)):
if line_no == 0:
# Skip the header
continue
line_data = line.split()
city, state, shape = self.parse_record(line_no, line_data)
uniques.add(city)
return uniques
def parse_record(self, line_no, data):
record = {}
record['line_no'] = line_no
record['date'] = data[0]
record['time'] = data[1]
record['city'] = data[2]
record['state'] = data[3]
record['shape'] = data[4]
return record['city'], record['state'], record['shape']
def add(self, x, y):
self.count = self.count + 1
return x + y
|
justincely/miami-python
|
day_2/fp.py
|
fp.py
|
py
| 1,227
|
python
|
en
|
code
| 0
|
github-code
|
6
|
14241805756
|
from sys import exit
from time import sleep, time
from random import randint
import pygame
from pygame.constants import RESIZABLE
# Tetramino definitions on a 4x4 grid. 1 means the tile exists.
TETRAMINO_I = (((0, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1), (0, 0, 0, 0)),
((0, 1, 0, 0), (0, 1, 0, 0), (0, 1, 0, 0), (0, 1, 0, 0)),
((0, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1), (0, 0, 0, 0)),
((0, 1, 0, 0), (0, 1, 0, 0), (0, 1, 0, 0), (0, 1, 0, 0)))
TETRAMINO_J = (((0, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 0), (0, 0, 1, 0)),
((0, 0, 0, 0), (0, 1, 1, 0), (0, 1, 0, 0), (0, 1, 0, 0)),
((0, 0, 0, 0), (1, 0, 0, 0), (1, 1, 1, 0), (0, 0, 0, 0)),
((0, 0, 0, 0), (0, 1, 0, 0), (0, 1, 0, 0), (1, 1, 0, 0)))
TETRAMINO_L = (((0, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 0), (1, 0, 0, 0)),
((0, 0, 0, 0), (0, 1, 0, 0), (0, 1, 0, 0), (0, 1, 1, 0)),
((0, 0, 0, 0), (0, 0, 1, 0), (1, 1, 1, 0), (0, 0, 0, 0)),
((0, 0, 0, 0), (1, 1, 0, 0), (0, 1, 0, 0), (0, 1, 0, 0)))
TETRAMINO_O = (((0, 0, 0, 0), (0, 0, 0, 0), (0, 1, 1, 0), (0, 1, 1, 0)),
((0, 0, 0, 0), (0, 0, 0, 0), (0, 1, 1, 0), (0, 1, 1, 0)),
((0, 0, 0, 0), (0, 0, 0, 0), (0, 1, 1, 0), (0, 1, 1, 0)),
((0, 0, 0, 0), (0, 0, 0, 0), (0, 1, 1, 0), (0, 1, 1, 0)))
TETRAMINO_S = (((0, 0, 0, 0), (0, 0, 0, 0), (0, 1, 1, 0), (1, 1, 0, 0)),
((0, 0, 0, 0), (0, 1, 0, 0), (0, 1, 1, 0), (0, 0, 1, 0)),
((0, 0, 0, 0), (0, 0, 0, 0), (0, 1, 1, 0), (1, 1, 0, 0)),
((0, 0, 0, 0), (0, 1, 0, 0), (0, 1, 1, 0), (0, 0, 1, 0)))
TETRAMINO_T = (((0, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 0), (0, 1, 0, 0)),
((0, 0, 0, 0), (0, 1, 0, 0), (1, 1, 0, 0), (0, 1, 0, 0)),
((0, 0, 0, 0), (0, 1, 0, 0), (1, 1, 1, 0), (0, 0, 0, 0)),
((0, 0, 0, 0), (0, 1, 0, 0), (0, 1, 1, 0), (0, 1, 0, 0)))
TETRAMINO_Z = (((0, 0, 0, 0), (0, 0, 0, 0), (1, 1, 0, 0), (0, 1, 1, 0)),
((0, 0, 0, 0), (0, 0, 1, 0), (0, 1, 1, 0), (0, 1, 0, 0)),
((0, 0, 0, 0), (0, 0, 0, 0), (1, 1, 0, 0), (0, 1, 1, 0)),
((0, 0, 0, 0), (0, 0, 1, 0), (0, 1, 1, 0), (0, 1, 0, 0)))
# Array used for randomly picking tetraminos
TETRAMINOS = [(TETRAMINO_I, (0xFF, 0xFF, 0x00)), (TETRAMINO_J, (0xFF, 0x00, 0x00)),
(TETRAMINO_L, (0xFF, 0x00, 0xFF)), (TETRAMINO_O, (0x00, 0xFF, 0x00)),
(TETRAMINO_S, (0x00, 0xFF, 0xFF)), (TETRAMINO_T, (0x00, 0x00, 0xFF)),
(TETRAMINO_Z, (0x01, 0x82, 0x50))]
# Constant colors
COLOR_BACKGROUND = (0x22, 0x22, 0x22)
COLOR_SHADOW = (0x44, 0x44, 0x44)
COLOR_BORDER = (0xAA, 0xAA, 0xAA)
COLOR_FLASH = (0xFF, 0xFF, 0xFF)
COLOR_PAUSE = (0x00, 0x00, 0x00)
COLOR_TEXT = (0xFF, 0xFF, 0xFF)
# Max framerate
FRAMERATE = 1 / 60
# Time to show that a line has been cleared
FLASH_TIME = 0.5
PREVIEW_OFFSET = 4
KEYDOWN_TIME_CONST = 0.036
# Definition for a tile
class TetrisTile(pygame.Rect):
def __init__(self, left, top, width, height, empty, color):
super().__init__(left, top, width, height)
self.empty = empty
self.color = color
class TetrisGame:
def __init__(self):
self.width = 500
self.height = 500
self.rows = 22
self.cols = 10
self.speed = 0.7
self.scale = 11
self.tile_length = 15
self.fallspeed = 1
pygame.init()
self.screen = pygame.display.set_mode(
(self.width, self.height), RESIZABLE)
# Loop gameplay until the player closes the window
# Initialize grid
welcome = True
while True:
self.grid = self.grid = [
[None] * self.cols for _ in range(self.rows)
]
for y in range(self.rows):
for x in range(self.cols):
dy = y * self.tile_length + self.tile_length
dx = x * self.tile_length + self.tile_length
self.grid[y][x] = TetrisTile(
dx, dy, self.tile_length, self.tile_length, True, COLOR_BACKGROUND
)
# Create the grid for the tetris tile preview
self.preview_grid = [[None] * 4 for _ in range(4)]
for y in range(4):
for x in range(4):
dy = y * self.tile_length
dx = x * self.tile_length + \
(self.cols + PREVIEW_OFFSET) * self.tile_length
self.preview_grid[y][x] = pygame.Rect(
dx, dy, self.tile_length, self.tile_length)
# Draw the board
self.draw_everything(init=True, resize=True, welcome=welcome)
pygame.display.flip()
# Initial wait for user to start the game
if welcome:
welcome = False
new_game = False
while not new_game:
frame_time = time()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.sysexit()
elif event.type == pygame.WINDOWRESIZED:
self.draw_everything(resize=True, welcome=True, init=True)
pygame.display.flip()
elif event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
new_game = True
delta = time() - frame_time
if delta < FRAMERATE:
sleep(FRAMERATE - delta)
self.draw_everything(init=True)
# Start the game
self.eventloop()
self.draw_everything(gameover=True)
pygame.display.flip()
new_game = False
while not new_game:
frame_time = time()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.sysexit()
elif event.type == pygame.WINDOWRESIZED:
self.draw_everything(resize=True, gameover=True)
pygame.display.flip()
elif event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
new_game = True
delta = time() - frame_time
if delta < FRAMERATE:
sleep(FRAMERATE - delta)
# Main event loop. Will block until the game ends.
def eventloop(self):
self.next_tetramino = None
self.cur_keydown = None
self.keydown_time = None
self.do_next_tetramino()
self.draw(color=COLOR_SHADOW, y=self.lowest_y())
self.draw(next=True)
self.draw()
pygame.display.flip()
gravity_time = time()
while True:
frame_time = time()
self.handle_events()
if time() - gravity_time >= self.fallspeed:
if self.can_be_placed(y=self.y + 1):
self.draw(color=COLOR_BACKGROUND, y=self.lowest_y())
self.draw(color=COLOR_BACKGROUND)
self.y += 1
self.draw(color=COLOR_SHADOW, y=self.lowest_y())
self.draw()
pygame.display.flip()
else:
self.place()
self.do_next_tetramino()
self.draw(next=True)
self.draw(color=COLOR_SHADOW, y=self.lowest_y())
self.draw()
pygame.display.flip()
if not self.can_be_placed():
return
gravity_time = time()
delta = time() - frame_time
if delta < FRAMERATE:
sleep(FRAMERATE - delta)
# Handle game and window controls
def handle_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.sysexit()
elif event.type == pygame.WINDOWRESIZED:
self.draw_everything(resize=True)
elif event.type == pygame.KEYUP:
self.cur_keydown = None
self.keydown_time = None
elif event.type == pygame.KEYDOWN:
self.cur_keydown = event.key
self.keydown_time = time()
if event.key in (pygame.K_UP, pygame.K_DOWN, pygame.K_LEFT, pygame.K_RIGHT):
self.move(event.key)
if event.key == pygame.K_SPACE:
self.autoplace()
if event.key == pygame.K_RETURN:
self.pause()
if self.cur_keydown == pygame.K_DOWN and self.keydown_time and time() - self.keydown_time >= KEYDOWN_TIME_CONST:
self.keydown_time = time()
if self.cur_keydown == pygame.K_DOWN:
self.move(self.cur_keydown)
def pause(self):
self.draw_everything(paused=True)
pygame.display.flip()
while True:
frame_time = time()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.sysexit()
elif event.type == pygame.WINDOWRESIZED:
self.draw_everything(resize=True, paused=True)
pygame.display.flip()
elif event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
self.draw_everything()
pygame.display.flip()
return
delta = time() - frame_time
if delta < FRAMERATE:
sleep(FRAMERATE - delta)
# Move the current tetramino in a given direction based on user input
def move(self, direction):
newx = self.x
newy = self.y
newr = self.r
if direction == pygame.K_DOWN:
newy += 1
elif direction == pygame.K_UP:
newr = (self.r + 1) % 4
elif direction == pygame.K_LEFT:
newx -= 1
elif direction == pygame.K_RIGHT:
newx += 1
if self.can_be_placed(x=newx, y=newy, r=newr):
self.draw(color=COLOR_BACKGROUND, y=self.lowest_y())
self.draw(color=COLOR_BACKGROUND)
self.x, self.y, self.r = newx, newy, newr
self.draw(color=COLOR_SHADOW, y=self.lowest_y())
self.draw()
pygame.display.flip()
# Draw the current tetramino
# kwargs modify x, y, r drawn
def draw(self, **kwargs):
if 'next' in kwargs and kwargs['next']:
dy = 2
dx = 0
for row in self.next_tetramino[0][2:]:
for i in row:
if i:
pygame.draw.rect(
self.screen, self.next_color, self.preview_grid[dy][dx])
else:
pygame.draw.rect(
self.screen, COLOR_BACKGROUND, self.preview_grid[dy][dx])
dx += 1
dy += 1
dx = 0
elif not None in kwargs.values():
dy = self.y if 'y' not in kwargs else kwargs['y']
dx = self.x if 'x' not in kwargs else kwargs['x']
color = self.color if 'color' not in kwargs else kwargs['color']
for row in self.tetramino[self.r if 'r' not in kwargs else kwargs['r']]:
for i in row:
if i:
pygame.draw.rect(self.screen, color, self.grid[dy][dx])
dx += 1
dy += 1
dx = self.x if 'x' not in kwargs else kwargs['x']
# Place the current tetramino
def place(self):
dy = self.y
dx = self.x
for row in self.tetramino[self.r]:
for i in row:
if i:
self.grid[dy][dx].empty = False
self.grid[dy][dx].color = self.color
dx += 1
dy += 1
dx = self.x
self.lineclear()
# Place the current tetramino immediately (pressing spacebar)
def autoplace(self):
self.draw(color=COLOR_BACKGROUND)
self.y = self.lowest_y()
self.draw()
self.place()
self.do_next_tetramino()
self.draw(next=True)
self.draw(color=COLOR_SHADOW, y=self.lowest_y())
self.draw()
pygame.display.flip()
# Clear filled rows
def lineclear(self):
to_clear = []
not_to_clear = []
for row in self.grid:
if any(tile.empty for tile in row):
not_to_clear.append(row)
else:
to_clear.append(row)
# Return if nothing to do
if len(to_clear) == 0:
return
# Do a flash "animation"
for row in to_clear:
pygame.draw.rect(self.screen, COLOR_FLASH, pygame.Rect(
row[0].left, row[0].top, row[-1].left +
self.tile_length - row[0].left, self.tile_length
))
pygame.display.flip()
sleep(FLASH_TIME / 3)
for row in to_clear:
for tile in row:
pygame.draw.rect(self.screen, tile.color, tile)
pygame.display.flip()
sleep(FLASH_TIME / 3)
for row in to_clear:
pygame.draw.rect(self.screen, COLOR_FLASH, pygame.Rect(
row[0].left, row[0].top, row[-1].left +
self.tile_length - row[0].left, self.tile_length
))
pygame.display.flip()
sleep(FLASH_TIME / 3)
# self.grid is now a reference to to_clear
# rows in not_to_clear will be added later
self.grid = to_clear
amt_rows_cleared = len(to_clear)
# Reset rows in to_clear to blank and move them to the top
for y in range(len(to_clear)):
for x in range(self.cols):
dy = y * self.tile_length + self.tile_length
dx = x * self.tile_length + self.tile_length
to_clear[y][x].empty = True
to_clear[y][x].color = COLOR_BACKGROUND
to_clear[y][x].update(
dx, dy, self.tile_length, self.tile_length)
# Update the existing rows
for i in range(len(not_to_clear)):
for x in range(self.cols):
dy = (i + amt_rows_cleared) * \
self.tile_length + self.tile_length
dx = x * self.tile_length + self.tile_length
not_to_clear[i][x].update(
dx, dy, self.tile_length, self.tile_length)
self.grid.append(not_to_clear[i])
# Finally, redraw everything
for row in self.grid:
for tile in row:
pygame.draw.rect(self.screen, tile.color, tile)
pygame.display.flip()
# Select a new random tetramino
def do_next_tetramino(self):
if self.next_tetramino:
self.tetramino = self.next_tetramino
self.color = self.next_color
else:
i = randint(0, len(TETRAMINOS) - 1)
self.tetramino = TETRAMINOS[i][0]
self.color = TETRAMINOS[i][1]
i = randint(0, len(TETRAMINOS) - 1)
self.next_tetramino = TETRAMINOS[i][0]
self.next_color = TETRAMINOS[i][1]
self.x = (self.cols - 1) // 2 - 1
self.y = 0
self.r = 0
if self.fallspeed > 0.1:
self.fallspeed -= 0.005
elif self.fallspeed > 0.05:
self.fallspeed -= 0.0001
# Calculate the lowest (greatest) possible y value for the current tetramino
def lowest_y(self):
dy = self.y + 1
while self.can_be_placed(y=dy):
dy += 1
dy -= 1
return dy
# Return True/False if the current tetramino can/can't be place in its current position
# Modify x, y, or the rotation depending on kwargs
def can_be_placed(self, **kwargs):
dy = self.y if not 'y' in kwargs else kwargs['y']
dx = self.x if not 'x' in kwargs else kwargs['x']
dr = self.r if not 'r' in kwargs else kwargs['r']
for row in self.tetramino[dr]:
for i in row:
if i:
if (dy not in range(0, self.rows) or dx not in range(0, self.cols)) or not self.grid[dy][dx].empty:
return False
dx += 1
dy += 1
dx = self.x if not 'x' in kwargs else kwargs['x']
return True
def draw_everything(self, **kwargs):
if kwargs.get('resize'):
width, height = self.screen.get_size()
t_h = height // (self.rows + 2)
t_w = width // (self.cols + PREVIEW_OFFSET + 6)
new_tile_length = min(t_h, t_w)
if new_tile_length != self.tile_length:
self.tile_length = new_tile_length
for y in range(self.rows):
for x in range(self.cols):
dy = y * self.tile_length + self.tile_length
dx = x * self.tile_length + self.tile_length
self.grid[y][x].update(
dx, dy, self.tile_length, self.tile_length
)
for y in range(4):
for x in range(4):
dy = y * self.tile_length
dx = x * self.tile_length + \
(self.cols + PREVIEW_OFFSET) * self.tile_length
self.preview_grid[y][x].update(
dx, dy, self.tile_length, self.tile_length
)
self.screen.fill(COLOR_BACKGROUND)
border = pygame.Rect(0, 0,
self.tile_length * (self.cols + 2),
self.tile_length * (self.rows + 2))
pygame.draw.rect(self.screen, COLOR_BORDER, border)
if kwargs.get('paused'):
curtain = pygame.Rect(
self.tile_length,
self.tile_length,
self.cols * self.tile_length,
self.rows * self.tile_length
)
pygame.draw.rect(self.screen, COLOR_PAUSE, curtain)
font1 = pygame.font.Font(
'freesansbold.ttf',
int(self.tile_length * 1.7)
)
font2 = pygame.font.Font(
'freesansbold.ttf',
int(self.tile_length * 1.3)
)
s1 = font1.render("PAUSED", True, COLOR_TEXT)
s2 = font2.render("PRESS ENTER", True, COLOR_TEXT)
s3 = font2.render("TO UNPAUSE", True, COLOR_TEXT)
self.screen.blit(s1, (
(self.tile_length * (self.cols // 2) + self.tile_length) -
s1.get_size()[0] // 2,
(self.tile_length * (self.rows // 2)) - + s1.get_size()[1]
))
self.screen.blit(s2, (
(self.tile_length * (self.cols // 2) + self.tile_length)
- s2.get_size()[0] // 2,
(self.tile_length * (self.rows // 2)) + s2.get_size()[1] // 2
))
self.screen.blit(s3, (
(self.tile_length * (self.cols // 2) + self.tile_length)
- s3.get_size()[0] // 2,
(self.tile_length * (self.rows // 2)) +
s2.get_size()[1] // 2 + s3.get_size()[1]
))
else:
for row in self.grid:
for tile in row:
pygame.draw.rect(self.screen, tile.color, tile)
if not kwargs.get('init'):
self.draw(color=COLOR_SHADOW, y=self.lowest_y())
self.draw()
self.draw(next=True)
if kwargs.get('gameover') or kwargs.get('welcome'):
font1 = pygame.font.Font(
'freesansbold.ttf',
int(self.tile_length * 1.5)
)
font2 = pygame.font.Font(
'freesansbold.ttf',
int(self.tile_length * 0.9)
)
s1 = font1.render(
"GAME OVER" if kwargs.get('gameover') else "WELCOME",
True,
COLOR_TEXT
)
s2 = font2.render("PRESS ENTER TO", True, COLOR_TEXT)
s3 = font2.render("START A NEW GAME", True, COLOR_TEXT)
text_begin = (self.tile_length * (self.rows // 2)
) - + s1.get_size()[1]
text_end = (self.tile_length * (self.rows // 2)) + \
s2.get_size()[1] // 2 + s3.get_size()[1]
background = pygame.Rect(
self.tile_length,
text_begin - self.tile_length,
self.cols * self.tile_length,
(text_end + s3.get_size()[1] + self.tile_length) -
(text_begin - self.tile_length)
)
pygame.draw.rect(self.screen, COLOR_PAUSE, background)
self.screen.blit(s1, (
(self.tile_length * (self.cols // 2) + self.tile_length) -
s1.get_size()[0] // 2,
text_begin
))
self.screen.blit(s2, (
(self.tile_length * (self.cols // 2) + self.tile_length)
- s2.get_size()[0] // 2,
(self.tile_length * (self.rows // 2)) +
s2.get_size()[1] // 2
))
self.screen.blit(s3, (
(self.tile_length * (self.cols // 2) + self.tile_length)
- s3.get_size()[0] // 2,
text_end
))
font = pygame.font.Font(
'freesansbold.ttf', int(self.tile_length * 1.5))
text_next = font.render("NEXT", True, COLOR_TEXT)
self.screen.blit(text_next,
(self.tile_length * (self.cols + PREVIEW_OFFSET), self.tile_length // 2))
def sysexit(self):
pygame.quit()
exit()
if __name__ == "__main__":
TetrisGame()
|
dmcdo/Pygame-Games
|
tetris.pyw
|
tetris.pyw
|
pyw
| 22,293
|
python
|
en
|
code
| 0
|
github-code
|
6
|
14334373987
|
# dictionary inside list
a = [{'Name':'Ram','Age':34,'Add':'Kathmandu'},
{'Name':'Shyam','Age':56,'Add':'Bhaktapur'},
{'Name':'Hari','Age':89,'Add':'Lalitpur'}]
print(a[0])
b = {'Name':'Hari','Age':89,'Add':'Lalitpur'}
a.append(b)
print(a)
info = []
n = int(input("Enter n = "))
for i in range(n):
name = input("Enter name = ")
age = int(input("Enter age = "))
add = input("Enter add = ")
data = {'Name':name,'Age':age,'Add':add}
info.append(data)
print(info)
a = [{'Name': 'Ram', 'Age': 45, 'Add': 'Kathmandu'},
{'Name': 'Shyma', 'Age': 89, 'Add': 'Kathmandu'},
{'Name': 'Nabin', 'Age': 23, 'Add': 'Lalitpur'}]
a[0] = {'Name': 'Rama', 'Age': 25, 'Add': 'Bara'}
a
a = [{'Name': 'Ram', 'Age': 45, 'Add': 'Kathmandu'},
{'Name': 'Shyma', 'Age': 89, 'Add': 'Kathmandu'},
{'Name': 'Nabin', 'Age': 23, 'Add': 'Lalitpur'}]
a[0]['Name'] = 'Rama'
a
# dict inside dict
d = {1:{'Name':'Ram','Per':80,'Pos':2},
2:{'Name':'Shyam','Per':60,'Pos':20},
3:{'Name':'Nabin','Per':78,'Pos':5}
}
print(d)
d = {}
# d[<key>] = {<key>:<velue>}
d[1] = {'Name': 'Ram', 'Per': 80, 'Pos': 2}
d[2] = {'Name': 'Shyam', 'Per': 60, 'Pos': 20}
d
d = {'sn':[],'name':[],'category':[]}
# WAP to create dict inside dict
d = {}
n = int(input("Enter n = "))
for i in range(1,n+1):
name = input("Enter name = ")
per = int(input("Enter per = "))
pos = int(input("Enter pos = "))
d[i] = {'Name':name,'Per':per,'Pos':pos}
print(d)
d = {'sn':[1,2],
'name':['Coke','Momo'],
'quantity':[3,3],
'price':[200,150],
'total':[600,450]}
{1: {'Name': 'Ram', 'quantity': 78, 'price': 2,'total':600},
2: {'Name': 'Hari', 'quantity': 78, 'price': 2,'total':600}}
|
Roshan2059/learning-python-with-django
|
day15-c.py
|
day15-c.py
|
py
| 1,733
|
python
|
en
|
code
| 0
|
github-code
|
6
|
7804756691
|
from jinja2 import Environment, FileSystemLoader
import yaml
import os.path
ENV = Environment(loader=FileSystemLoader('./'))
script_path = 'SCRIPTS/'
script = os.path.join(script_path, 'script.txt')
with open("config.yaml") as _:
yaml_dict = yaml.load(_)
template = ENV.get_template("template.text")
with open(script, 'w') as outfile:
temp = template.render(config=yaml_dict)
outfile.write(temp)
|
dancwilliams/Prefix_List_Script
|
EXTRA_SCRIPTS/MANUAL_CREATE/generate_config.py
|
generate_config.py
|
py
| 416
|
python
|
en
|
code
| 0
|
github-code
|
6
|
26239584931
|
from sets import Set
def prod_exists(x):
x = str(x)
for i in range(1,5):
for j in range(1, 8 - i):
if (int(x[:i]) * int(x[i:i + j]) == int(x[i+j:])):
return int(x[i+j:])
return 0
facs = {}
def fac(x):
try: return facs[x]
except:
if(x == 1 or x == 0):
facs[x] = 1
return 1
facs[x] = x*fac(x-1)
return x*fac(x-1)
def perm_gen(domain,nr):
ret = ""
l = len(domain)
for i in range(0,l):
t = int(nr) / fac(l - i - 1)
nr = int(nr) % fac(l - i - 1)
ret += str(domain[t])
domain.remove(domain[t])
return ret
def gen_perms(domain):
domainlist = []
domainlist.extend(str(domain))
domainlist.sort()
perms = []
for i in range(0, fac(len(domainlist))):
calldomain = domainlist[:]
perms.append(int(perm_gen(calldomain,i)))
return perms
perms = gen_perms(123456789)
print(sum( Set(prod_exists(i) for i in perms) ))
|
schroeji/Projekt-Euler
|
prob32.py
|
prob32.py
|
py
| 1,063
|
python
|
en
|
code
| 0
|
github-code
|
6
|
16106099445
|
#import logging
class stopwatch:
"""usage:
swgen = stopwatch.template("[INTEGRATION]")
...
with swgen("Running xxx") as _:
run_stuff()
with swgen("Finalizing xxx") as _:
finish_stuff()
"""
def __init__(self, message, logger):
self.logger = logger
self.pre_message = message
if len(message) > 1:
self.post_message = message[0].lower() + message[1:]
else:
self.post_message = message
def __enter__(self):
from time import time
self.logger.info(self.pre_message)
self.timer = time()
return self
def tqdm_range(self, item_list, **kwargs):
from tqdm.auto import tqdm
return tqdm(item_list, desc=self.pre_message, **kwargs)
def tqdm(self, **kwargs):
return tqdm.tqdm(desc=self.pre_message, **kwargs)
def __exit__(self, exc_type, exc_val, exc_tb):
from time import time
delta = time() - self.timer
self.logger.info("Finished %s in %.2f seconds" % (self.post_message, delta))
def template(logname : str = "benj", level=None):
import logging
logger = logging.getLogger(logname)
if level is not None:
logging.basicConfig(level=level)
else:
logging.basicConfig(level=logging.INFO)
return lambda msg: stopwatch(msg, logger=logger)
|
KellisLab/benj
|
benj/timer.py
|
timer.py
|
py
| 1,382
|
python
|
en
|
code
| 2
|
github-code
|
6
|
29707449656
|
#!/usr/bin/env python
import pybullet as p
import random
import numpy as np
from mamad_util import JointInfo
def check_collision(active_joints_info,num_active_joints):
collision_set=[]
index_of_active_joints = [active_joints_info[i]["jointIndex"] for i in range(num_active_joints)]
for i in index_of_active_joints:
for j in index_of_active_joints:
if i == j:
continue
contact = p.getClosestPoints(fingerID,fingerID,0,i,j)
if len(contact)!=0:
collision_set.append([contact[0][3],contact[0][4]])
check_flip=[]
for i in range(len(collision_set)):
index_1=collision_set[i][0]
index_2=collision_set[i][1]
for j in range(i,len(collision_set)):
if i == j:
continue
if index_1 == collision_set[j][1] and index_2 == collision_set[j][0]:
check_flip.append(j)
new_check=[]
sort=np.argsort(check_flip)
for i in range(len(check_flip)):
new_check.append(check_flip[sort[i]])
for i in range(len(check_flip)):
del collision_set[new_check[i]-i]
check_parent=[]
for i in range(len(parent_list)):
index_parent_1=parent_list[i][0]
index_parent_2=parent_list[i][1]
for j in range(len(collision_set)):
if index_parent_1 == collision_set[j][0] and index_parent_2 == collision_set[j][1]:
check_parent.append(j)
if index_parent_1 == collision_set[j][1] and index_parent_2 == collision_set[j][0]:
check_parent.append(j)
new_check_parent=[]
sort_parent=np.argsort(check_parent)
for i in range(len(check_parent)):
new_check_parent.append(check_parent[sort_parent[i]])
for i in range(len(check_parent)):
del collision_set[new_check_parent[i]-i]
collision_result=[]
for i in range (len(collision_set)):
index_collision_set_1=collision_set[i][0]
index_collision_set_2=collision_set[i][1]
for j in range(num_active_joints):
if index_collision_set_1 == active_joints_info[j]["jointIndex"]:
index_collision_set_1_result = j
if index_collision_set_2 == active_joints_info[j]["jointIndex"]:
index_collision_set_2_result = j
collision_result.append([active_joints_info[index_collision_set_1_result]["linkName"],active_joints_info[index_collision_set_2_result]["linkName"]])
return collision_result
p.connect(p.GUI)
p.setGravity(0,0,-9.8)
finger = p.loadSDF("./model.sdf")
fingerID = finger[0]
jointInfo = JointInfo()
jointInfo.get_infoForAll_joints(finger)
active_joints_info = jointInfo.getActiveJointsInfo()
num_active_joints = jointInfo.getNumberOfActiveJoints()
num_joints = p.getNumJoints(fingerID)
# print("active_joints_info::",active_joints_info)
# print("finger::",finger)
# print("`num of joints:::",num_joints)
"""
for i in range(num_joints):
j_info = p.getJointInfo(fingerID,i)
print("joint_info::",j_info)
"""
# texUid = p.loadTexture("./../cube_new/aaa.png")
# cube_objects = p.loadSDF("./../cube_new/model.sdf")
# p.changeVisualShape(cube_objects[0], -1, rgbaColor=[1, 1, 1, 1])
# p.changeVisualShape(cube_objects[0], -1, textureUniqueId=texUid)
# p.resetBasePositionAndOrientation(cube_objects[0], [0, 0.37, 0.07],[0.7071, 0.000000, 0.000000, 0.7071])
p.setRealTimeSimulation(0)
p.setTimeStep(1./5000)
while(1):
p.resetBasePositionAndOrientation(fingerID, [0, 0, 0],[0.7071, 0.000000, 0.000000, -0.7071])
parent_list=[]
for i in range(num_active_joints):
jointIndex = active_joints_info[i]["jointIndex"]
jointName = active_joints_info[i]["jointName"]
linkName = active_joints_info[i]["linkName"]
jointPositionState = p.getJointState(fingerID,jointIndex)[0]
# print("linkName::",linkName)
# print("jointName::",jointName)
# print("jointIndex::",jointIndex)
# print("jointPositionState::",jointPositionState)
jointll = active_joints_info[i]["jointLowerLimit"]
jointul = active_joints_info[i]["jointUpperLimit"]
# print("lower limit",jointll)
# print("upper limit",jointul)
motor_command = jointPositionState
parent_list.append([jointIndex,jointInfo.searchBy("jointIndex",jointIndex)[0]["parentIndex"]])
if jointIndex == 3:
step =(abs(jointll)-abs(jointul))/100
motor_command = jointPositionState+0.0
p.setJointMotorControl2(fingerID,jointIndex,p.POSITION_CONTROL,motor_command, force=1.0)
collision_result=check_collision(active_joints_info,num_active_joints)
#print("right hand self coliision -------",collision_set)
print("right hand self coliision -------",collision_result)
print("\n")
p.stepSimulation()
|
ccylance/theis-code
|
gym_test/gym_test/envs/shadow_hand_vijay/gym_test.py
|
gym_test.py
|
py
| 4,379
|
python
|
en
|
code
| 0
|
github-code
|
6
|
9756222638
|
import theano
from theano import tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano.tensor.signal import pool
from theano.tensor.nnet import conv3d2d
import numpy as np
from collections import OrderedDict
from .. import config
from .numpy_backend import get_random_magic_seed, get_random_magic_seed
_FLOATX = config.floatX()
_EPSILON = config.epsilon()
# ===========================================================================
# INTERNAL UTILS
# ===========================================================================
theano.config.floatX = _FLOATX
def _on_gpu():
'''Return whether the session is set to
run on GPU or not (i.e. on CPU).
'''
return theano.config.device[:3] == 'gpu' or theano.sandbox.cuda.cuda_enabled
if _on_gpu():
'''Import cuDNN only if running on GPU:
not having Cuda installed should not
prevent from running the present code.
'''
from theano.sandbox.cuda import dnn
def get_session():
return _on_gpu()
# ===========================================================================
# VARIABLE MANIPULATION
# ===========================================================================
def variable(value, dtype=_FLOATX, name=None, broadcastable=None):
'''Instantiate a tensor variable.
'''
value = np.asarray(value, dtype=dtype)
if broadcastable:
return theano.shared(value=value, name=name, strict=False,
broadcastable=broadcastable)
return theano.shared(value=value, name=name, strict=False)
def zeros_var(shape, dtype=_FLOATX, name=None):
'''Instantiate an all-zeros variable.
'''
return variable(np.zeros(shape), dtype, name)
def ones_var(shape, dtype=_FLOATX, name=None):
'''Instantiate an all-ones variable.
'''
return variable(np.ones(shape), dtype, name)
def is_variable(v):
return isinstance(v, theano.compile.SharedVariable)
_PLACEHOLDER_ID = 0
_PLACEHOLDER_SHAPE = {}
def placeholder(shape=None, ndim=None, dtype=_FLOATX, name=None):
'''Instantiate an input data placeholder variable.
'''
if shape is None and ndim is None:
raise Exception('Specify either a shape or ndim value.')
if shape is not None:
ndim = len(shape)
broadcast = (False,) * ndim
# ====== Modify add name prefix ====== #
global _PLACEHOLDER_ID
name_prefix = 'ID.%02d.' % _PLACEHOLDER_ID
_PLACEHOLDER_ID += 1
if name is None:
name = ''
name = name_prefix + name
placeholder = T.TensorType(dtype, broadcast)(name)
# store the predefined shape of placeholder
_PLACEHOLDER_SHAPE[name] = \
[None for _ in range(ndim)] if shape is None else shape
return placeholder
def is_expression(v):
'''placeholder also is an expression'''
return isinstance(v, theano.tensor.TensorVariable)
def is_placeholder(v):
if is_expression(v) and v.name in _PLACEHOLDER_SHAPE:
return True
return False
def eval(x):
'''Run a graph.
'''
# just a hack to return placeholder shape when eval
if x in _PLACEHOLDER_SHAPE:
return _PLACEHOLDER_SHAPE[x]
return x.eval()
# ===========================================================================
# Shape operator
# ===========================================================================
def shape(x):
'''Return the shape of a tensor.
Warning: type returned will be different for
Theano backend (Theano tensor type) and TF backend (TF TensorShape).
'''
shape = x.shape
# little to eval the shape of placeholder
if hasattr(x, 'name'):
if x.name in _PLACEHOLDER_SHAPE:
_PLACEHOLDER_SHAPE[shape] = _PLACEHOLDER_SHAPE[x.name]
return shape
def int_shape(x):
return x.shape.eval()
def ndim(x):
return x.ndim
def broadcastable(x):
return x.broadcastable
def addbroadcast(x, *axes):
return T.addbroadcast(x, *axes)
# ===========================================================================
# Predefined data
# ===========================================================================
def zeros(shape, dtype=_FLOATX, name=None):
'''Instantiate an all-zeros variable.
'''
return T.zeros(shape=shape, dtype=dtype)
def ones(shape, dtype=_FLOATX, name=None):
'''Instantiate an all-ones variable.
'''
return T.ones(shape=shape, dtype=dtype)
def ones_like(x):
return T.ones_like(x)
def zeros_like(x):
return T.zeros_like(x)
def count_params(x):
'''Return number of scalars in a tensor.
Return: numpy integer.
'''
return np.prod(x.shape.eval())
def cast(x, dtype):
if 'theano.' in str(x.__class__):
return T.cast(x, dtype)
return np.cast[dtype](x)
def castX(x):
return cast(x, _FLOATX)
# LINEAR ALGEBRA
'''
Assumed overridden:
+, -, /, *, +=, -=, *=, /=
'''
def dot(x, y):
return T.dot(x, y)
def transpose(x):
return T.transpose(x)
def gather(reference, indices):
'''reference: a tensor.
indices: an int tensor of indices.
Return: a tensor of same type as reference.
'''
return reference[indices]
# ===========================================================================
# ELEMENT-WISE OPERATIONS
# ===========================================================================
def var(x, axis=None, keepdims=False):
return T.var(x, axis=axis, keepdims=keepdims)
def max(x, axis=None, keepdims=False):
return T.max(x, axis=axis, keepdims=keepdims)
def min(x, axis=None, keepdims=False):
return T.min(x, axis=axis, keepdims=keepdims)
def sum(x, axis=None, keepdims=False):
'''Sum of the values in a tensor, alongside the specified axis.
'''
return T.sum(x, axis=axis, keepdims=keepdims)
def prod(x, axis=None, keepdims=False):
'''Multiply the values in a tensor, alongside the specified axis.
'''
return T.prod(x, axis=axis, keepdims=keepdims)
def mean(x, axis=None, keepdims=False):
dtype = None
if 'int' in x.dtype:
dtype = _FLOATX
return T.mean(x, axis=axis, keepdims=keepdims, dtype=dtype)
def std(x, axis=None, keepdims=False):
return T.std(x, axis=axis, keepdims=keepdims)
def any(x, axis=None, keepdims=False):
'''Bitwise reduction (logical OR).
'''
return T.any(x, axis=axis, keepdims=keepdims)
def argmax(x, axis=-1):
return T.argmax(x, axis=axis, keepdims=False)
def argsort(x, axis=-1):
return T.argsort(x, axis)
def argtop_k(x, k=1):
# top-k accuracy
top = T.argsort(x, axis=-1)
# (Theano cannot index with [..., -top_k:], we need to simulate that)
top = top[[slice(None) for _ in range(top.ndim - 1)] +
[slice(-k, None)]]
top = top[(slice(None),) * (top.ndim - 1) + (slice(None, None, -1),)]
return top
def argmin(x, axis=-1):
return T.argmin(x, axis=axis, keepdims=False)
def square(x):
return T.sqr(x)
def abs(x):
return T.abs_(x)
def sqrt(x):
x = T.clip(x, 0., np.inf)
return T.sqrt(x)
def exp(x):
return T.exp(x)
def log(x):
return T.log(x)
def round(x):
return T.round(x)
def pow(x, a):
return T.pow(x, a)
def clip(x, min_value, max_value):
if max_value < min_value:
max_value = min_value
return T.clip(x, min_value, max_value)
def maximum(x, y):
return T.maximum(x, y)
def minimum(x, y):
return T.minimum(x, y)
# ===========================================================================
# SHAPE OPERATIONS
# ===========================================================================
def reverse(x, axis=-1):
'''Apply [::-1] to appropriate axis'''
if axis < 0:
axis += x.ndim
return x[(slice(None),) * axis + (slice(None, None, -1),)]
def concatenate(tensors, axis=-1):
return T.concatenate(tensors, axis=axis)
def reshape(x, shape):
return T.reshape(x, shape)
def dimshuffle(x, pattern):
'''Transpose dimensions.
pattern should be a tuple or list of
dimension indices, e.g. [0, 2, 1].
'''
pattern = tuple(pattern)
return x.dimshuffle(pattern)
def repeat_elements(x, rep, axis):
'''Repeat the elements of a tensor along an axis, like np.repeat.
If x has shape (s1, s2, s3) and axis=1, the output
will have shape (s1, s2 * rep, s3).
'''
return T.repeat(x, rep, axis=axis)
def resize_images(X, height_factor, width_factor, dim_ordering):
'''Resize the images contained in a 4D tensor of shape
- [batch, channels, height, width] (for 'th' dim_ordering)
- [batch, height, width, channels] (for 'tf' dim_ordering)
by a factor of (height_factor, width_factor). Both factors should be
positive integers.
'''
if dim_ordering == 'th':
output = repeat_elements(X, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
elif dim_ordering == 'tf':
output = repeat_elements(X, height_factor, axis=1)
output = repeat_elements(output, width_factor, axis=2)
return output
else:
raise Exception('Invalid dim_ordering: ' + dim_ordering)
def repeat(x, n):
'''Repeat a 2D tensor.
If x has shape (samples, dim) and n=2,
the output will have shape (samples, 2, dim).
'''
assert x.ndim == 2
x = x.dimshuffle((0, 'x', 1))
return T.extra_ops.repeat(x, n, axis=1)
def tile(x, n):
return T.tile(x, n)
def flatten(x, outdim=2):
return T.flatten(x, outdim)
def expand_dims(x, dim=-1):
'''Add a 1-sized dimension at index "dim".
'''
pattern = [i for i in range(x.type.ndim)]
if dim < 0:
if x.type.ndim == 0:
dim = 0
else:
dim = dim % x.type.ndim + 1
pattern.insert(dim, 'x')
return x.dimshuffle(pattern)
def squeeze(x, axis):
'''Remove a 1-dimension from the tensor at index "axis".
'''
x = T.addbroadcast(x, axis)
return T.squeeze(x)
def temporal_padding(x, padding=1):
'''Pad the middle dimension of a 3D tensor
with "padding" zeros left and right.
Appologies for the inane API, but Theano makes this
really hard.
'''
input_shape = x.shape
output_shape = (input_shape[0],
input_shape[1] + 2 * padding,
input_shape[2])
output = T.zeros(output_shape)
return T.set_subtensor(output[:, padding:x.shape[1] + padding, :], x)
def spatial_2d_padding(x, padding=(1, 1), dim_ordering='th'):
'''Pad the 2nd and 3rd dimensions of a 4D tensor
with "padding[0]" and "padding[1]" (resp.) zeros left and right.
'''
input_shape = x.shape
if dim_ordering == 'th':
output_shape = (input_shape[0],
input_shape[1],
input_shape[2] + 2 * padding[0],
input_shape[3] + 2 * padding[1])
output = T.zeros(output_shape)
indices = (slice(None),
slice(None),
slice(padding[0], input_shape[2] + padding[0]),
slice(padding[1], input_shape[3] + padding[1]))
elif dim_ordering == 'tf':
output_shape = (input_shape[0],
input_shape[1] + 2 * padding[0],
input_shape[2] + 2 * padding[1],
input_shape[3])
output = T.zeros(output_shape)
indices = (slice(None),
slice(padding[0], input_shape[1] + padding[0]),
slice(padding[1], input_shape[2] + padding[1]),
slice(None))
else:
raise Exception('Invalid dim_ordering: ' + dim_ordering)
return T.set_subtensor(output[indices], x)
def stack(*x):
return T.stack(*x)
# ===========================================================================
# VALUE MANIPULATION
# ===========================================================================
def get_value(x, borrow=False):
if not hasattr(x, 'get_value'):
raise Exception("'get_value() can only be called on a variable. " +
"If you have an expression instead, use eval().")
return x.get_value(borrow=borrow)
def set_value(x, value):
x.set_value(np.asarray(value, dtype=x.dtype))
def set_subtensor(x, y):
return T.set_subtensor(x, y)
# ===========================================================================
# GRAPH MANIPULATION
# ===========================================================================
_GLOBALS_UPDATES = OrderedDict()
def add_global_updates(variable, value):
'''trick to update tensorflow variables anywhere
This dictionary will be reseted after each time you create a function
'''
_GLOBALS_UPDATES[variable] = value
def reset_global_updates():
global _GLOBALS_UPDATES
_GLOBALS_UPDATES = OrderedDict()
class Function(object):
def __init__(self, inputs, outputs, updates=[], **kwargs):
if isinstance(updates, OrderedDict):
updates = updates.items()
# ====== add and reset global update ====== #
updates += _GLOBALS_UPDATES.items()
reset_global_updates()
self.function = theano.function(
inputs, outputs,
updates=updates,
on_unused_input='ignore', # TODO: remove this when stop testing
allow_input_downcast=True, **kwargs)
def __call__(self, *inputs):
return self.function(*inputs)
def function(inputs, outputs, updates=[]):
return Function(inputs, outputs, updates=updates)
def grad_clip(x, clip):
'''
This clip the gradient of expression, used on forward pass but clip the
gradient on backward pass
This is an elemwise operation.
Parameters
----------
x: expression
the variable we want its gradient inputs clipped
lower_bound: float
The lower bound of the gradient value
upper_bound: float
The upper bound of the gradient value.
Example
-------
>>> x = theano.tensor.scalar()
>>>
>>> z = theano.tensor.grad(grad_clip(x, -1, 1)**2, x)
>>> z2 = theano.tensor.grad(x**2, x)
>>>
>>> f = theano.function([x], outputs = [z, z2])
>>>
>>> print(f(2.0)) # output (1.0, 4.0)
Note
----
We register an opt in tensor/opt.py that remove the GradClip.
So it have 0 cost in the forward and only do work in the grad.
'''
return theano.gradient.grad_clip(x, -clip, clip)
def gradients(loss, variables, consider_constant=None, known_grads=None):
"""
Return symbolic gradients for one or more variables with respect to some
cost.
For more information about how automatic differentiation works in Theano,
see :mod:`gradient`. For information on how to implement the gradient of
a certain Op, see :func:`grad`.
Parameters
----------
cost : scalar (0-dimensional) tensor variable or None
Value with respect to which we are differentiating. May be
`None` if known_grads is provided.
wrt : variable or list of variables
term[s] for which we want gradients
consider_constant : list of expressions(variables)
expressions not to backpropagate through
known_grads : dict, optional
A dictionary mapping variables to their gradients. This is
useful in the case where you know the gradient on some
variables but do not know the original cost.
Returns
-------
variable or list/tuple of variables (matches `wrt`)
symbolic expression of gradient of `cost` with respect to each
of the `wrt` terms. If an element of `wrt` is not
differentiable with respect to the output, then a zero
variable is returned.
Example
-------
>>> # For consider_constant:
>>> a = T.variable(1.2)
>>> b = T.variable(1.3)
>>> x = a * b
>>>
>>> y = T.variable(2.)
>>> z = T.variable(1.)
>>>
>>> z_pred = x * y
>>> loss = T.pow((z - z_pred), 2)
>>>
>>> G = T.gradients(loss, [a, b, y], consider_constant=[x])
>>>
>>> for g in G:
>>> print(g.eval())
>>> # a_grad=0. b_grad=0. y_grad=6.614
"""
return T.grad(loss, variables,
consider_constant=consider_constant, known_grads=known_grads,
disconnected_inputs='warn')
def jacobian(loss, variables):
return theano.gradient.jacobian(loss, variables, disconnected_inputs='warn')
def hessian(loss, variables):
return theano.gradient.hessian(loss, variables, disconnected_inputs='warn')
# ===========================================================================
# CONTROL FLOW
# ===========================================================================
def scan(step_fn, sequences=None, outputs_info=None, non_sequences=None,
n_steps=None, truncate_gradient=-1, go_backwards=False):
return theano.scan(step_fn,
sequences=sequences,
outputs_info=outputs_info,
non_sequences=non_sequences,
n_steps=n_steps, truncate_gradient=truncate_gradient,
go_backwards=go_backwards,
strict=False)
def loop(step_fn, n_steps,
sequences=None, outputs_info=None, non_sequences=None,
go_backwards=False):
"""
Helper function to unroll for loops. Can be used to unroll theano.scan.
The parameter names are identical to theano.scan, please refer to here
for more information.
Note that this function does not support the truncate_gradient
setting from theano.scan.
Parameters
----------
step_fn : function
Function that defines calculations at each step.
sequences : TensorVariable or list of TensorVariables
List of TensorVariable with sequence data. The function iterates
over the first dimension of each TensorVariable.
outputs_info : list of TensorVariables
List of tensors specifying the initial values for each recurrent
value. Specify output_info to None for non-arguments to
the step_function
non_sequences: list of TensorVariables
List of theano.shared variables that are used in the step function.
n_steps: int
Number of steps to unroll.
go_backwards: bool
If true the recursion starts at sequences[-1] and iterates
backwards.
Returns
-------
List of TensorVariables. Each element in the list gives the recurrent
values at each time step.
"""
if not isinstance(sequences, (list, tuple)):
sequences = [] if sequences is None else [sequences]
# When backwards reverse the recursion direction
counter = range(n_steps)
if go_backwards:
counter = counter[::-1]
output = []
# ====== check if outputs_info is None ====== #
if outputs_info is not None:
prev_vals = outputs_info
else:
prev_vals = []
output_idx = [i for i in range(len(prev_vals)) if prev_vals[i] is not None]
# ====== check if non_sequences is None ====== #
if non_sequences is None:
non_sequences = []
# ====== Main loop ====== #
for i in counter:
step_input = [s[i] for s in sequences] + \
[prev_vals[idx] for idx in output_idx] + \
non_sequences
out_ = step_fn(*step_input)
# The returned values from step can be either a TensorVariable,
# a list, or a tuple. Below, we force it to always be a list.
if isinstance(out_, T.TensorVariable):
out_ = [out_]
if isinstance(out_, tuple):
out_ = list(out_)
output.append(out_)
prev_vals = output[-1]
# iterate over each scan output and convert it to same format as scan:
# [[output11, output12,...output1n],
# [output21, output22,...output2n],...]
output_scan = []
for i in range(len(output[0])):
l = map(lambda x: x[i], output)
output_scan.append(T.stack(*l))
return output_scan
def rnn(step_function, inputs, initial_states,
go_backwards=False, mask=None, constants=None):
'''Iterates over the time dimension of a tensor.
Parameters
----------
inputs: tensor of temporal data of shape (samples, time, ...)
(at least 3D).
step_function:
Parameters:
input: tensor with shape (samples, ...) (no time dimension),
representing input for the batch of samples at a certain
time step.
states: list of tensors.
Returns:
output: tensor with shape (samples, ...) (no time dimension),
new_states: list of tensors, same length and shapes
as 'states'.
initial_states: tensor with shape (samples, ...) (no time dimension),
containing the initial values for the states used in
the step function.
go_backwards: boolean. If True, do the iteration over
the time dimension in reverse order.
mask: binary tensor with shape (samples, time),
with a zero for every element that is masked.
constants: a list of constant values passed at each step.
Returns
-------
A tuple (last_output, outputs, new_states).
last_output: the latest output of the rnn, of shape (samples, ...)
outputs: tensor with shape (samples, time, ...) where each
entry outputs[s, t] is the output of the step function
at time t for sample s.
new_states: list of tensors, latest states returned by
the step function, of shape (samples, ...).
'''
ndim = inputs.ndim
assert ndim >= 3, 'Input should be at least 3D.'
axes = [1, 0] + list(range(2, ndim))
inputs = inputs.dimshuffle(axes)
if mask is not None:
if mask.ndim == ndim - 1:
mask = expand_dims(mask)
assert mask.ndim == ndim
mask = mask.dimshuffle(axes)
if constants is None:
constants = []
# build an all-zero tensor of shape (samples, output_dim)
initial_output = step_function(inputs[0], initial_states + constants)[0] * 0
# Theano gets confused by broadcasting patterns in the scan op
initial_output = T.unbroadcast(initial_output, 0, 1)
def _step(input, mask, output_tm1, *states):
output, new_states = step_function(input, states)
# output previous output if masked.
output = T.switch(mask, output, output_tm1)
return_states = []
for state, new_state in zip(states, new_states):
return_states.append(T.switch(mask, new_state, state))
return [output] + return_states
results, _ = theano.scan(
_step,
sequences=[inputs, mask],
outputs_info=[initial_output] + initial_states,
non_sequences=constants,
go_backwards=go_backwards)
else:
def _step(input, *states):
output, new_states = step_function(input, states)
return [output] + new_states
results, _ = theano.scan(
_step,
sequences=inputs,
outputs_info=[None] + initial_states,
non_sequences=constants,
go_backwards=go_backwards)
# deal with Theano API inconsistency
if type(results) is list:
outputs = results[0]
states = results[1:]
else:
outputs = results
states = []
outputs = T.squeeze(outputs)
last_output = outputs[-1]
axes = [1, 0] + list(range(2, outputs.ndim))
outputs = outputs.dimshuffle(axes)
states = [T.squeeze(state[-1]) for state in states]
return last_output, outputs, states
def switch(condition, then_expression, else_expression):
'''condition: scalar tensor.
'''
return T.switch(condition, then_expression, else_expression)
# ===========================================================================
# NN OPERATIONS
# ===========================================================================
def relu(x, alpha=0., max_value=None):
assert hasattr(T.nnet, 'relu'), ('It looks like like your version of '
'Theano is out of date. '
'Install the latest version with:\n'
'pip install git+git://github.com/Theano/Theano.git --upgrade --no-deps')
x = T.nnet.relu(x, alpha)
if max_value is not None:
x = T.minimum(x, max_value)
return x
def softmax(x):
return T.nnet.softmax(x)
def softplus(x):
return T.nnet.softplus(x)
def linear(x):
return x
def categorical_crossentropy(output, target, from_logits=False):
if from_logits:
output = T.nnet.softmax(output)
else:
# scale preds so that the class probas of each sample sum to 1
output /= output.sum(axis=-1, keepdims=True)
# avoid numerical instability with _EPSILON clipping
output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
return T.nnet.categorical_crossentropy(output, target)
def binary_crossentropy(output, target, from_logits=False):
if from_logits:
output = T.nnet.sigmoid(output)
# avoid numerical instability with _EPSILON clipping
output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
return T.nnet.binary_crossentropy(output, target)
def sigmoid(x):
return T.nnet.sigmoid(x)
def hard_sigmoid(x):
return T.nnet.hard_sigmoid(x)
def tanh(x):
return T.tanh(x)
def dropout(x, level, rescale=True, noise_shape=None,
seed=None, rng=None):
"""Computes dropout.
With probability `keep_prob`, outputs the input element scaled up by
`1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Parameters
----------
x: A tensor.
level: float(0.-1.)
probability dropout values in given tensor
rescale: bool
whether rescale the outputs by dividing the retain probablity
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: int
A Python integer. Used to create random seeds. See
rng: `tensor.rng`
random generator from tensor class
"""
# ====== Validate arguments ====== #
if seed is None:
seed = get_random_magic_seed()
if rng is None:
rng = _RandomWrapper(RandomStreams(seed=seed),
np.random.RandomState(seed=seed))
elif isinstance(rng, RandomStreams):
rng = _RandomWrapper(rng, np.random.RandomState(seed=seed))
# ====== Dropout ====== #
retain_prob = 1. - level
if noise_shape is None:
x = x * rng.binomial(shape=x.shape, p=retain_prob, dtype=x.dtype)
else:
# validate remove all None or -1 dimension
noise_shape = tuple([x.shape[i] if j is None or j < 0 else j
for i, j in enumerate(noise_shape)])
# auto select broadcast shape
broadcast = [i for i, j in enumerate(noise_shape) if j == 1]
if len(broadcast) > 0:
x = x * T.addbroadcast(
rng.binomial(shape=noise_shape, p=retain_prob, dtype=x.dtype),
*broadcast)
else:
x = x * rng.binomial(shape=noise_shape, p=retain_prob, dtype=x.dtype)
if rescale:
x /= retain_prob
return x
# ==================== Regularizations ==================== #
def l2_normalize(x, axis):
norm = T.sqrt(T.sum(T.square(x), axis=axis, keepdims=True))
return x / norm
def l2_regularize(x):
return T.sum(T.square(x))
def l1_regularize(x):
return T.sum(T.abs_(x))
def jacobian_regularize(hidden, params):
''' Computes the jacobian of the hidden layer with respect to
the input, reshapes are necessary for broadcasting the
element-wise product on the right axis
'''
hidden = hidden * (1 - hidden)
L = expand_dims(hidden, 1) * expand_dims(params, 0)
# Compute the jacobian and average over the number of samples/minibatch
L = T.sum(T.pow(L, 2)) / hidden.shape[0]
return T.mean(L)
def kl_gaussian(mean, logsigma,
prior_mean=0., prior_logsigma=0.):
''' KL-divergence between two gaussians.
Useful for Variational AutoEncoders. Use this as an activation regularizer
Parameters:
-----------
mean, logsigma: parameters of the input distributions
prior_mean, prior_logsigma: paramaters of the desired distribution (note the
log on logsigma)
Note
----
origin implementation from seya:
https://github.com/Philip-Bachman/ICML-2015/blob/master/LogPDFs.py
Copyright (c) Philip Bachman
'''
gauss_klds = 0.5 * (prior_logsigma - logsigma +
((T.exp(logsigma) + (mean - prior_mean)**2.0) / T.exp(prior_logsigma)) - 1.0)
return T.mean(gauss_klds)
def correntropy_regularize(x, sigma=1.):
'''
Note
----
origin implementation from seya:
https://github.com/EderSantana/seya/blob/master/seya/regularizers.py
Copyright (c) EderSantana
'''
return -T.sum(T.mean(T.exp(x**2 / sigma), axis=0)) / T.sqrt(2 * np.pi * sigma)
# ===========================================================================
# CONVOLUTIONS
# ===========================================================================
def conv2d(x, kernel, strides=(1, 1),
border_mode='valid', dim_ordering='th',
image_shape=None, filter_shape=None):
'''
Run on cuDNN if available.
border_mode: string, "same" or "valid".
'''
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols)
# TH kernel shape: (depth, input_depth, rows, cols)
# TF input shape: (samples, rows, cols, input_depth)
# TF kernel shape: (rows, cols, input_depth, depth)
x = x.dimshuffle((0, 3, 1, 2))
kernel = kernel.dimshuffle((3, 2, 0, 1))
if image_shape:
image_shape = (image_shape[0], image_shape[3],
image_shape[1], image_shape[2])
if filter_shape:
filter_shape = (filter_shape[3], filter_shape[2],
filter_shape[0], filter_shape[1])
if _on_gpu() and dnn.dnn_available():
if border_mode == 'same':
np_kernel = kernel.eval()
# mode same and even filter
if len([s for s in np_kernel.shape[2:] if s % 2 == 0]) > 0.:
assert strides[0] <= np_kernel.shape[2], \
'strides should be smaller than the convolution window.'
assert strides[1] <= np_kernel.shape[3], \
'strides should be smaller than the convolution window.'
conv_out = dnn.dnn_conv(img=x,
kerns=kernel,
border_mode='full')
shift_x = (np_kernel.shape[2] - strides[0]) // 2
shift_y = (np_kernel.shape[3] - strides[1]) // 2
expected_width = (x.shape[2] + strides[0] - 1) // strides[0]
expected_height = (x.shape[3] + strides[1] - 1) // strides[1]
conv_out = conv_out[:, :,
shift_x: shift_x + expected_width,
shift_y: shift_y + expected_height]
else: # same mode and odd filter
border_mode = tuple(s // 2 for s in np_kernel.shape[2:])
conv_out = dnn.dnn_conv(img=x,
kerns=kernel,
border_mode=border_mode,
subsample=strides)
else:
conv_out = dnn.dnn_conv(img=x,
kerns=kernel,
border_mode=border_mode,
subsample=strides)
else:
if border_mode == 'same' or border_mode == 'full':
th_border_mode = 'full'
np_kernel = kernel.eval()
assert strides[0] <= np_kernel.shape[2], 'strides should be smaller than the convolution window.'
assert strides[1] <= np_kernel.shape[3], 'strides should be smaller than the convolution window.'
elif border_mode == 'valid':
th_border_mode = 'valid'
elif isinstance(border_mode, (tuple, list)):
th_border_mode = border_mode
else:
raise Exception('Border mode not supported: ' + str(border_mode))
conv_out = T.nnet.conv2d(x, kernel,
border_mode=th_border_mode,
subsample=strides,
input_shape=image_shape,
filter_shape=filter_shape)
if border_mode == 'same':
shift_x = (np_kernel.shape[2] - strides[0]) // 2
shift_y = (np_kernel.shape[3] - strides[1]) // 2
expected_width = (x.shape[2] + strides[0] - 1) // strides[0]
expected_height = (x.shape[3] + strides[1] - 1) // strides[1]
conv_out = conv_out[:, :,
shift_x: shift_x + expected_width,
shift_y: shift_y + expected_height]
if dim_ordering == 'tf':
conv_out = conv_out.dimshuffle((0, 2, 3, 1))
return conv_out
def conv3d(x, kernel, strides=(1, 1, 1),
border_mode='valid', dim_ordering='th',
image_shape=None, filter_shape=None):
'''
Run on cuDNN if available.
border_mode: string, "same" or "valid".
conv_mode: string, "conv" or "cross".
'''
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols, time)
# TH kernel shape: (depth, input_depth, rows, cols, time)
# TF input shape: (samples, rows, cols, time, input_depth)
# TF kernel shape: (rows, cols, time, input_depth, depth)
x = x.dimshuffle((0, 4, 1, 2, 3))
kernel = kernel.dimshuffle((4, 3, 0, 1, 2))
if image_shape:
image_shape = (image_shape[0], image_shape[4],
image_shape[1], image_shape[2],
image_shape[3])
if filter_shape:
filter_shape = (filter_shape[4], filter_shape[3],
filter_shape[0], filter_shape[1],
filter_shape[2])
if _on_gpu() and dnn.dnn_available():
if border_mode == 'same':
np_kernel = kernel.eval()
border_mode = tuple(s // 2 for s in np_kernel.shape[2:])
conv_out = dnn.dnn_conv3d(img=x,
kerns=kernel,
border_mode=border_mode,
subsample=strides)
else:
if border_mode == 'same':
assert(strides == (1, 1, 1))
pad_dim1 = (kernel.shape[2] - 1)
pad_dim2 = (kernel.shape[3] - 1)
pad_dim3 = (kernel.shape[4] - 1)
output_shape = (x.shape[0], x.shape[1],
x.shape[2] + pad_dim1,
x.shape[3] + pad_dim2,
x.shape[4] + pad_dim3)
output = T.zeros(output_shape)
indices = (slice(None), slice(None),
slice(pad_dim1 // 2, x.shape[2] + pad_dim1 // 2),
slice(pad_dim2 // 2, x.shape[3] + pad_dim2 // 2),
slice(pad_dim3 // 2, x.shape[4] + pad_dim3 // 2))
x = T.set_subtensor(output[indices], x)
border_mode = 'valid'
border_mode_3d = (border_mode, border_mode, border_mode)
conv_out = conv3d2d.conv3d(signals=x.dimshuffle(0, 2, 1, 3, 4),
filters=kernel.dimshuffle(0, 2, 1, 3, 4),
border_mode=border_mode_3d)
conv_out = conv_out.dimshuffle(0, 2, 1, 3, 4)
# support strides by manually slicing the output
if strides != (1, 1, 1):
conv_out = conv_out[:, :, ::strides[0], ::strides[1], ::strides[2]]
if dim_ordering == 'tf':
conv_out = conv_out.dimshuffle((0, 2, 3, 1))
return conv_out
def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
dim_ordering='th', pool_mode='max'):
# ====== dim ordering ====== #
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
x = x.dimshuffle((0, 3, 1, 2))
# ====== border mode ====== #
if border_mode == 'same':
w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
padding = (w_pad, h_pad)
elif border_mode == 'valid':
padding = (0, 0)
elif isinstance(border_mode, (tuple, list)):
padding = tuple(border_mode)
else:
raise Exception('Invalid border mode: ' + str(border_mode))
# ====== pooling ====== #
if _on_gpu() and dnn.dnn_available():
pool_out = dnn.dnn_pool(x, pool_size,
stride=strides,
mode=pool_mode,
pad=padding)
else: # CPU veresion support by theano
pool_out = pool.pool_2d(x, ds=pool_size, st=strides,
ignore_border=True,
padding=padding,
mode=pool_mode)
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 1))
return pool_out
def pool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid',
dim_ordering='th', pool_mode='max'):
# ====== dim ordering ====== #
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
x = x.dimshuffle((0, 4, 1, 2, 3))
# ====== border mode ====== #
if border_mode == 'same':
w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
d_pad = pool_size[2] - 2 if pool_size[2] % 2 == 1 else pool_size[2] - 1
padding = (w_pad, h_pad, d_pad)
elif border_mode == 'valid':
padding = (0, 0, 0)
elif isinstance(border_mode, (tuple, list)):
padding = tuple(border_mode)
else:
raise Exception('Invalid border mode: ' + str(border_mode))
# ====== pooling ====== #
if _on_gpu() and dnn.dnn_available():
pool_out = dnn.dnn_pool(x, pool_size,
stride=strides,
mode=pool_mode,
pad=padding)
else:
padding = padding[:2]
# pooling over conv_dim2, conv_dim1 (last two channels)
output = pool.pool_2d(input=x.dimshuffle(0, 1, 4, 3, 2),
ds=(pool_size[1], pool_size[0]),
st=(strides[1], strides[0]),
ignore_border=True,
padding=padding,
mode=pool_mode)
# pooling over conv_dim3
pool_out = pool.pool_2d(input=output.dimshuffle(0, 1, 4, 3, 2),
ds=(1, pool_size[2]),
st=(1, strides[2]),
ignore_border=True,
padding=padding,
mode=pool_mode)
# ====== output ====== #
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 4, 1))
return pool_out
# ===========================================================================
# RANDOMNESS
# ===========================================================================
class _RandomWrapper(object):
def __init__(self, rng, state):
super(_RandomWrapper, self).__init__()
self._rng = rng
self._state = state
def randint(self):
return self._state.randint(10e6)
def normal(self, shape, mean, std, dtype=_FLOATX):
return self._rng.normal(size=shape, avg=mean, std=std, dtype=dtype)
def uniform(self, shape, low, high, dtype=_FLOATX):
return self._rng.uniform(size=shape, low=low, high=high, dtype=dtype)
def binomial(self, shape, p, dtype=_FLOATX):
return self._rng.binomial(size=shape, n=1, p=p, dtype=dtype)
def rng(seed=None):
if seed is None:
seed = get_random_magic_seed()
return _RandomWrapper(RandomStreams(seed=seed),
np.random.RandomState(seed=seed))
def random_normal(shape, mean=0.0, std=1.0, dtype=_FLOATX, seed=None):
if seed is None:
seed = get_random_magic_seed()
rng = RandomStreams(seed=seed)
return rng.normal(size=shape, avg=mean, std=std, dtype=dtype)
def random_uniform(shape, low=0.0, high=1.0, dtype=_FLOATX, seed=None):
if seed is None:
seed = get_random_magic_seed()
rng = RandomStreams(seed=seed)
return rng.uniform(shape, low=low, high=high, dtype=dtype)
def random_binomial(shape, p, dtype=_FLOATX, seed=None):
if seed is None:
seed = get_random_magic_seed()
rng = RandomStreams(seed=seed)
return rng.binomial(size=shape, n=1, p=p, dtype=dtype)
'''
more TODO:
tensordot -> soon to be introduced in TF
batched_tensordot -> reimplement
'''
# ===========================================================================
# Comparator
# ===========================================================================
def neq(a, b):
"""a != b"""
return T.neq(a, b)
def eq(a, b):
"""a == b"""
return T.eq(a, b)
def gt(a, b):
"""a > b"""
return T.gt(a, b)
def ge(a, b):
"""a >= b"""
return T.ge(a, b)
def lt(a, b):
"""a < b"""
return T.lt(a, b)
def le(a, b):
"""a <= b"""
return T.le(a, b)
def one_hot(x, nb_class):
''' x: 1D-integer vector '''
ret = T.zeros((x.shape[0], nb_class), dtype=_FLOATX)
ret = T.set_subtensor(ret[T.arange(x.shape[0]), x], 1)
return ret
def one_hot_max(x, axis=-1):
'''
Example
-------
>>> Input: [[0.0, 0.0, 0.5],
>>> [0.0, 0.3, 0.1],
>>> [0.6, 0.0, 0.2]]
>>> Output: [[0.0, 0.0, 1.0],
>>> [0.0, 1.0, 0.0],
>>> [1.0, 0.0, 0.0]]
'''
return T.cast(
T.eq(T.arange(x.shape[axis])[None, :],
T.argmax(x, axis=axis, keepdims=True)),
_FLOATX
)
def apply_mask(x, mask):
'''
x : 3D tensor
mask : 2D tensor
Example
-------
>>> Input: [128, 500, 120]
>>> Mask: [1, 1, 0]
>>> Output: [128, 500, 0]
'''
return T.mul(x, expand_dims(mask, -1))
|
trungnt13/odin_old
|
odin/tensor/theano_backend.py
|
theano_backend.py
|
py
| 43,646
|
python
|
en
|
code
| 2
|
github-code
|
6
|
73061190269
|
import os
import threading
high_value_extensions = [
".csv", ".json", ".xls", ".xlsx", ".doc", ".docx", ".pdf", ".ppt", ".pptx",
".html", ".htaccess", ".properties", ".env", ".yml", ".yaml", ".py", ".php",
".asp", ".aspx", ".jsp", ".war", ".jar", ".gz", ".tar.gz", ".zip", ".rar",
".dbf", ".ini", ".rc", ".log", ".xml", ".pem", ".bak", ".backup", ".sql",
".conf", ".config", ".pbx", ".p12", ".old"
]
def search_files(path, output_file):
with open(output_file, 'w', encoding='utf-8') as output:
for root, dirs, files in os.walk(path):
for file in files:
if os.path.splitext(file)[1].lower() in high_value_extensions:
output.write(os.path.join(root, file) + '\n')
def search_files_thread(path, output_file):
thread = threading.Thread(target=search_files, args=(path, output_file))
thread.start()
return thread
if __name__ == "__main__":
search_path = input("Enter the path to search: ")
output_file_name = input("Enter the output file name: ")
threads = []
for _ in range(5): # Number of threads, you can adjust this as needed
thread = search_files_thread(search_path, output_file_name)
threads.append(thread)
for thread in threads:
thread.join()
print("Search completed. Results saved in", output_file_name)
|
tp9222/python-for-hackers
|
tools/High_Value_Files_Finder/High_Value_Files_Finder(HVFF).py
|
High_Value_Files_Finder(HVFF).py
|
py
| 1,356
|
python
|
en
|
code
| 0
|
github-code
|
6
|
16010093346
|
import toga
from colosseum import CSS
def build(app):
def on_load(widget):
print('Finished loading!')
print(widget.dom)
def on_key(event, flag):
print('Key down: ', event, ' Flag: ', flag)
webview = toga.WebView(on_key_down=on_key, on_webview_load=on_load, style=CSS(flex=1))
url_input = toga.TextInput(
initial='https://github.com/',
style=CSS(flex=1, margin=5)
)
def load_page(widget):
print('loading: ', url_input.value)
webview.url = url_input.value
def print_dom(widget):
print(webview.dom)
box = toga.Box(
children=[
toga.Box(
children=[
url_input,
toga.Button('Go', on_press=load_page, style=CSS(width=50)),
],
style=CSS(
flex_direction='row',
padding_top=25
)
),
webview,
toga.Box(
children=[
toga.Button('Print DOM', on_press=print_dom)
]
)
],
style=CSS(
flex_direction='column'
)
)
webview.url = url_input.value
# Show the main window
return box
def main():
# This needs to return an object that has a main_loop() method.
return toga.App('Graze', 'org.pybee.graze', startup=build)
if __name__ == '__main__':
app = main()
app.main_loop()
|
Ocupe/toga_test_app_collection
|
webview/webview/app.py
|
app.py
|
py
| 1,488
|
python
|
en
|
code
| 0
|
github-code
|
6
|
5125409200
|
from heatmappy import Heatmapper
from PIL import Image
import database_func as db
import img_lib
def percent_to_diameter(percent):
default = 150
if percent == 0:
return 0
elif percent <= 10:
return default
elif percent <= 20:
return default + 50
elif percent <= 30:
return default + 100
elif percent <= 40:
return default + 150
elif percent <= 50:
return default + 200
elif percent <= 60:
return default + 250
elif percent <= 70:
return default + 300
elif percent <= 80:
return default + 350
elif percent <= 90:
return default + 400
else:
return default + 450
def heatmap_creaate(user):
img_tup = db.select_user_imgstr(user)
num = 1
for img_str in img_tup:
img = img_lib.str_to_img(img_str[0])
img_lib.img_save(img, user, num)
num = num+1
points = [(320, 270), (960, 270), (1600, 270), (320, 810), (960, 810), (1660, 810)]
info = db.select_user_info(user)
# 입력 이미지 경로 설정
num = 1
for gaze in info:
img_path = 'data/' + user + '_' + str(num) + '.png'
img = Image.open(img_path)
for i in range(0, 6):
point = [points[i]]
percent = gaze[i+2]
diameter = percent_to_diameter(percent)
if diameter == 0:
continue
# 히트맵 그리기
heatmapper = Heatmapper(
point_diameter=diameter, # the size of each point to be drawn
point_strength=1, # the strength, between 0 and 1, of each point to be drawn
opacity=0.6, # the opacity of the heatmap layer
colours='default', # 'default' or 'reveal'
# OR a matplotlib LinearSegmentedColorMap object
# OR the path to a horizontal scale image
grey_heatmapper='PIL' # The object responsible for drawing the points
# Pillow used by default, 'PySide' option available if installed
)
# 이미지 위에 히트맵 그리기
heatmap = heatmapper.heatmap_on_img(point, img)
heatmap.save(img_path)
img = Image.open(img_path)
num = num + 1
|
jinho17/eye_tracking_project
|
eye_tracking/database/heatmap.py
|
heatmap.py
|
py
| 2,434
|
python
|
en
|
code
| 0
|
github-code
|
6
|
20594474782
|
import torch
import torch.nn.functional as F
def global_align_loss(
visual_embed,
textual_embed,
labels,
mixture=False,
alpha=0.6,
beta=0.4,
scale_pos=10,
scale_neg=40,
):
batch_size = labels.size(0)
visual_norm = F.normalize(visual_embed, p=2, dim=1)
textual_norm = F.normalize(textual_embed, p=2, dim=1)
similarity = torch.matmul(visual_norm, textual_norm.t())
labels_ = (
labels.expand(batch_size, batch_size)
.eq(labels.expand(batch_size, batch_size).t())
.float()
)
pos_inds = labels_ == 1
neg_inds = labels_ == 0
loss_pos = torch.log(1 + torch.exp(-scale_pos * (similarity[pos_inds] - alpha)))
loss_neg = torch.log(1 + torch.exp(scale_neg * (similarity[neg_inds] - beta)))
loss = (loss_pos.sum() + loss_neg.sum()) * 2.0
if mixture:
margin = alpha - beta
tmp = similarity
tmp[neg_inds] = 1
hard_v_pos, _ = torch.min(tmp, dim=1)
hard_t_pos, _ = torch.min(tmp, dim=0)
tmp = similarity
tmp[pos_inds] = 0
hard_v_neg, _ = torch.max(tmp, dim=1)
hard_t_neg, _ = torch.max(tmp, dim=0)
# y = torch.ones_like(hard_v_neg)
# loss_v_dist = F.margin_ranking_loss(hard_v_neg, hard_v_pos, y, margin=margin, reduction="sum")
# loss_t_dist = F.margin_ranking_loss(hard_t_neg, hard_t_pos, y, margin=margin, reduction="sum")
v_dist = hard_v_pos - hard_v_neg
t_dist = hard_t_pos - hard_t_neg
loss_v_dist = torch.log(1 + torch.exp(margin - v_dist))
loss_t_dist = torch.log(1 + torch.exp(margin - t_dist))
loss = loss + loss_t_dist.sum() + loss_v_dist.sum()
loss /= batch_size
return loss
def global_align_loss_from_sim(
similarity,
labels,
alpha=0.6,
beta=0.4,
scale_pos=10,
scale_neg=40,
):
batch_size = labels.size(0)
labels_ = (
labels.expand(batch_size, batch_size)
.eq(labels.expand(batch_size, batch_size).t())
.float()
)
pos_inds = labels_ == 1
neg_inds = labels_ == 0
loss_pos = torch.log(1 + torch.exp(-scale_pos * (similarity[pos_inds] - alpha)))
loss_neg = torch.log(1 + torch.exp(scale_neg * (similarity[neg_inds] - beta)))
loss = (loss_pos.sum() + loss_neg.sum()) * 2.0
loss /= batch_size
return loss
def local_align_no_sampling_loss(
part_embed,
attr_embed,
labels,
part_masks,
attr_masks,
num_parts=5,
alpha=0.6,
beta=0.4,
scale_pos=10,
scale_neg=40,
):
batch_size = labels.size(0)
part_embed = F.normalize(part_embed, p=2, dim=2)
attr_embed = F.normalize(attr_embed, p=2, dim=2)
labels_ = labels.expand(batch_size, batch_size).eq(
labels.expand(batch_size, batch_size).t()
)
pos_inds = labels_ == 1
neg_inds = labels_ == 0
local_loss = 0.0
for i in range(num_parts):
filter_inds = torch.ones_like(labels_)
filter_inds[~attr_masks[:, i], :] = 0
filter_inds[:, ~part_masks[:, i]] = 0
filter_pos_inds = filter_inds & pos_inds
filter_neg_inds = filter_inds & neg_inds
local_similarity = torch.matmul(attr_embed[i], part_embed[i].t())
loss_pos = torch.log(
1 + torch.exp(-scale_pos * (local_similarity[filter_pos_inds] - alpha))
)
loss_neg = torch.log(
1 + torch.exp(scale_neg * (local_similarity[filter_neg_inds] - beta))
)
local_loss += (loss_pos.sum() + loss_neg.sum()) * 2.0
return local_loss / batch_size / num_parts
def local_align_loss(
part_embed,
attribute_embed,
labels,
part_masks,
attr_masks,
num_parts=5,
alpha=0.6,
beta=0.4,
scale_pos=10,
scale_neg=40,
topK=8,
):
batch_size = labels.size(0)
part_embed = F.normalize(part_embed, p=2, dim=2)
attribute_embed = F.normalize(attribute_embed, p=2, dim=2)
labels_ = labels.expand(batch_size, batch_size).eq(
labels.expand(batch_size, batch_size).t()
)
losses = 0
for i in range(num_parts):
part_mask = part_masks[:, i]
attr_mask = attr_masks[:, i]
similarity = torch.matmul(part_embed[i], attribute_embed[i].t())
rank1 = torch.argsort(similarity, dim=1, descending=True)
rank2 = torch.argsort(similarity.t(), dim=1, descending=True)
loss = 0
for j in range(batch_size):
if part_mask[j] == 0:
continue
pred = similarity[j, attr_mask]
# k-reciprocal sample
label = labels_[j, :].float()
forward_k_idx = rank1[i, :topK]
backward_k_idx = rank2[forward_k_idx, :topK]
sample_pos_idx = torch.nonzero(backward_k_idx == i)[:, 0]
sample_pos_idx = torch.unique(forward_k_idx[sample_pos_idx])
label[sample_pos_idx] = 1
label = label[attr_mask]
pos_inds = torch.nonzero(label == 1).squeeze(1)
neg_inds = torch.nonzero(label == 0).squeeze(1)
if pos_inds.numel() > 0:
loss_pos = torch.log(
1 + torch.exp(-scale_pos * (pred[pos_inds] - alpha))
)
loss += loss_pos.sum()
if neg_inds.numel() > 0:
loss_neg = torch.log(1 + torch.exp(scale_neg * (pred[neg_inds] - beta)))
loss += loss_neg.sum()
if attr_mask[j] == 0:
continue
pred = similarity[part_mask, j]
# k-reciprocal sample
label = labels_[j, :].float()
forward_k_idx = rank2[i, :topK]
backward_k_idx = rank1[forward_k_idx, :topK]
sample_pos_idx = torch.nonzero(backward_k_idx == i)[:, 0]
sample_pos_idx = torch.unique(forward_k_idx[sample_pos_idx])
label[sample_pos_idx] = 1
label = label[part_mask]
pos_inds = torch.nonzero(label == 1).squeeze(1)
neg_inds = torch.nonzero(label == 0).squeeze(1)
if pos_inds.numel() > 0:
loss_pos = torch.log(
1 + torch.exp(-scale_pos * (pred[pos_inds] - alpha))
)
loss += loss_pos.sum()
if neg_inds.numel() > 0:
loss_neg = torch.log(1 + torch.exp(scale_neg * (pred[neg_inds] - beta)))
loss += loss_neg.sum()
loss /= batch_size
losses += loss
losses /= num_parts
return losses
|
CCNU-DigitalLibrary/CCNU-DigitalLibrary
|
MCM-HC/lib/models/losses/align_loss.py
|
align_loss.py
|
py
| 6,662
|
python
|
en
|
code
| 0
|
github-code
|
6
|
2107589551
|
import pygame
import sys
from space_objects import *
from tools import *
pygame.init()
infoObject = pygame.display.Info()
W_SIZE = WIDTH, HEIGHT = (infoObject.current_w, infoObject.current_h)
H_SIZE = H_WIDTH, H_HEIGHT = WIDTH // 2, HEIGHT // 2
screen = pygame.display.set_mode(W_SIZE, pygame.FULLSCREEN)
clock = pygame.time.Clock()
FPS = 60
rotate_speed = 500
length = 10
radius = 1 / 100
sun = Object(
screen,
radius * 40000,
"data/sun.png",
rotate_speed / 3600,
"Sun"
)
mercury = MovingObject(
screen,
radius * 2439,
"data/mercury.png",
rotate_speed / 80,
"Mercury",
length * 70,
rotate_speed / 88,
sun,
)
venus = MovingObject(
screen,
radius * 6051,
"data/venus.png",
rotate_speed / 80,
"Venus",
length * 108,
rotate_speed / 224,
sun,
)
earth = MovingObject(
screen,
radius * 6371,
"data/earth.png",
rotate_speed / 365,
"Earth",
length * 151,
rotate_speed / 365,
sun,
)
mars = MovingObject(
screen,
radius * 3389,
"data/mars.png",
rotate_speed / 70,
"Mars",
length * 250,
rotate_speed / 687,
sun,
)
jupiter = MovingObject(
screen,
radius * 40000,
"data/jupiter.png",
rotate_speed / 70,
"Jupiter",
length * 741,
rotate_speed / 4329,
sun,
)
saturn = MovingObject(
screen,
radius * 30000,
"data/saturn.png",
rotate_speed / 70,
"Saturn",
length * 1464,
rotate_speed / 10768,
sun,
)
uranus = MovingObject(
screen,
radius * 21000,
"data/uranus.png",
rotate_speed / 70,
"Uranus",
length * 2938,
rotate_speed / 30660,
sun,
)
neptune = MovingObject(
screen,
radius * 20000,
"data/neptune.png",
rotate_speed / 70,
"Neptune",
length * 4473,
rotate_speed / 59860,
sun,
)
moon = MovingObject(
screen,
radius * 1737,
"data/moon.png",
rotate_speed / 20,
"Moon",
length * 40,
rotate_speed / 30,
earth,
)
objects = Objects((H_WIDTH, H_HEIGHT), sun, mercury, venus, earth, mars, jupiter, saturn, uranus, neptune, moon)
mouse_pos = mx, my = 0, 0
is_drag = False
scale_factor = 1.1
class Panel:
def __init__(self, screen, width, objects):
self.screen = screen
self.width = width
self.screen_size = self.screen.get_size()
self.objects = objects
self.image = pygame.Surface((width, screen.get_height()))
self.image.set_alpha(170)
self.half_button_background = pygame.Surface((15, 100))
self.half_button_background.set_alpha(170)
pygame.draw.rect(
self.half_button_background, (1, 1, 1), (0, 1, 14, 98), 0, -1, -1, 5, -1, 5
)
self.half_button_background.set_colorkey((0, 0, 0))
self.button_background = pygame.Surface((30, 100))
self.button_background.set_alpha(170)
pygame.draw.rect(self.button_background, (1, 1, 1), (1, 1, 28, 98), 0, 5)
self.button_background.set_colorkey((0, 0, 0))
self.buttons = list()
for i, obj in enumerate(self.objects.objects):
button = TextButton(screen, obj.name, (20, i * 40 + 200))
self.buttons.append(button)
self.is_opened = False
self.draw_trajectory_button = TextButton(
self.screen, "draw trajectory", (20, 30)
)
self.speed_label = pygame.font.Font(None, 32).render("speed", True, (200,) * 3)
self.speed_slider = Slider(self.screen, (self.width // 2, 140), (210, 15))
self.speed_slider.set_value(1 / 1.5)
self.exit_button = TextButton(
self.screen, "exit", (20, self.screen_size[1] - 30)
)
image = pygame.Surface((30, 100))
image.set_colorkey((0, 0, 0))
image_pressed = image.copy()
points = ((10, 30), (22, 50), (10, 70))
pygame.draw.polygon(image, (200,) * 3, points)
pygame.draw.polygon(image_pressed, (240,) * 3, points)
rect_values = ((1, 1, 28, 98), 2, 5)
pygame.draw.rect(image, (200,) * 3, *rect_values)
pygame.draw.rect(image_pressed, (240,) * 3, *rect_values)
self.open_button = Button(
screen, image, image_pressed, (15, self.screen_size[1] // 2), True
)
image = pygame.Surface((30, 100))
image.set_colorkey((0, 0, 0))
image_pressed = image.copy()
points = ((20, 30), (8, 50), (20, 70))
pygame.draw.polygon(image, (200,) * 3, points)
pygame.draw.polygon(image_pressed, (240,) * 3, points)
pygame.draw.rect(image, (200,) * 3, *rect_values)
pygame.draw.rect(image_pressed, (240,) * 3, *rect_values)
self.close_button = Button(
screen, image, image_pressed, (self.width, self.screen_size[1] // 2), True
)
def update(self, mouse_pos, clicked):
change_visibility = False
speed = False
is_exit = False
if self.is_opened:
surf = blur(self.get_sub_surf(), 15)
surf.blit(self.image, (0, 0))
self.screen.blit(surf, (0, 0))
self.screen.blit(
self.half_button_background, (self.width, self.screen_size[1] // 2 - 50)
)
for i, button in enumerate(self.buttons):
button.update(mouse_pos, clicked)
if button.triggered():
self.objects.set_main_object(i)
self.screen.blit(self.speed_label, (20, 100))
self.speed_slider.update(clicked, mouse_pos)
speed = self.speed_slider.get_value()
self.draw_trajectory_button.update(mouse_pos, clicked)
if self.draw_trajectory_button.triggered():
change_visibility = True
self.close_button.update(mouse_pos, clicked)
if self.close_button.triggered():
self.is_opened = False
self.exit_button.update(mouse_pos, clicked)
if self.exit_button.triggered():
is_exit = True
pygame.draw.line(
self.screen,
(200,) * 3,
(self.width, 0),
(self.width, self.screen_size[1] // 2 - 50),
)
pygame.draw.line(
self.screen,
(200,) * 3,
(self.width, self.screen_size[1] // 2 + 49),
(self.width, self.screen_size[1]),
)
else:
self.screen.blit(self.button_background, (0, self.screen_size[1] // 2 - 50))
self.open_button.update(mouse_pos, clicked)
if self.open_button.triggered():
self.is_opened = True
return change_visibility, speed, is_exit
def mouse_in_panel(self, mouse_pos):
return panel.is_opened and mouse_pos[0] < self.width
def get_sub_surf(self):
sub = self.screen.subsurface((0, 0, self.width, self.screen_size[1]))
return sub
panel = Panel(screen, 250, objects)
while True:
screen.fill((0, 0, 0))
mouse_pos = mx, my = pygame.mouse.get_pos()
if is_drag:
y_movement = prev_mouse_pos[1] - my
x_movement = prev_mouse_pos[0] - mx
objects.move_camera(x_movement, y_movement)
prev_mouse_pos = mx, my
clicked = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_1:
objects.camera.set_offsets((H_WIDTH, H_HEIGHT))
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
clicked = True
if event.button == 4:
objects.scale(scale_factor)
if event.button == 5:
objects.scale(1 / scale_factor)
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
panel.speed_slider.release()
is_drag = False
if clicked and not panel.mouse_in_panel(mouse_pos):
is_drag = True
objects.update()
change_visibility, speed, is_exit = panel.update(mouse_pos, clicked)
if change_visibility:
objects.change_trajectory_visible()
if speed:
objects.set_speed(speed * 1.5)
if is_exit:
pygame.quit()
sys.exit()
pygame.display.update()
clock.tick(FPS)
|
Programmer-Anchous/Solar-system-model
|
main.py
|
main.py
|
py
| 8,409
|
python
|
en
|
code
| 0
|
github-code
|
6
|
12836912861
|
import sys
from typing import Optional
import PySide6
from PySide6 import QtWidgets
from qt_material import QtStyleTools, list_themes
from safebox.gui.widgets import cycle_generator, CreatorWidget
class MainWindow(QtWidgets.QMainWindow, QtStyleTools):
def __init__(self, parent: Optional[PySide6.QtWidgets.QWidget] = ...,
flags: PySide6.QtCore.Qt.WindowFlags = ...) -> None:
super().__init__()
self.themes = cycle_generator(list_themes())
self.apply_stylesheet(self, "dark_teal.xml")
self.setCentralWidget(CreatorWidget(parent=self))
def change_theme(self):
self.apply_stylesheet(self, next(self.themes))
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
main_window= MainWindow()
main_window.show()
sys.exit(app.exec())
|
pouralijan/SafeBox
|
safebox/gui/safebox_creator_main_window.py
|
safebox_creator_main_window.py
|
py
| 829
|
python
|
en
|
code
| 2
|
github-code
|
6
|
36388156115
|
from typing import Union
import psutil
def get_cpu_temp() -> Union[float, None]:
temperature_file_path = "/sys/class/thermal/thermal_zone0/temp"
try:
raw_temp = None
with open(temperature_file_path) as f:
raw_temp = f.readline().strip("\n")
return float(raw_temp) / 1000
except (FileNotFoundError, TypeError, ValueError) as e:
print(e)
print("Could not read CPU temperature")
return None
def get_cpu_count() -> int:
return psutil.cpu_count()
def get_cpu_percent(interval: Union[float, None]) -> float:
return psutil.cpu_percent(interval=interval, percpu=True)
def get_cpu_usage(interval: Union[float, None]) -> dict:
return {
"count": get_cpu_count(),
"percent": get_cpu_percent(interval),
"temp": get_cpu_temp(),
}
def get_mem_usage() -> dict:
mem_usage = psutil.virtual_memory()
return {
"total": mem_usage.total,
"used": mem_usage.used,
"available": mem_usage.available,
"percent": mem_usage.percent,
}
def get_disk_usage() -> dict:
disk_usage = psutil.disk_usage("/")
return {
"total": disk_usage.total,
"used": disk_usage.used,
"available": disk_usage.free,
"percent": disk_usage.percent,
}
def get_pids() -> list[int]:
return psutil.pids()
|
noahtigner/homelab
|
api/diagnostics/retrieval.py
|
retrieval.py
|
py
| 1,365
|
python
|
en
|
code
| 0
|
github-code
|
6
|
911107140
|
from collections import Counter
import re
from xml.etree import ElementTree
from trapdoor import TrapdoorProgram, Message, run_command
exclusion_rules = [
re.compile(r'^[\s]*raise NotImplementedError')
]
def excluded_from_coverage(source_line):
"""Determine of the given line should be excluded from the coverage analysis."""
for re in exclusion_rules:
if re.match(source_line) is not None:
return True
return False
class CoverageTrapdoorProgram(TrapdoorProgram):
"""A trapdoor program running nosetests with coverage analysis."""
def __init__(self):
"""Initialize the CoverageTrapdoorProgram."""
TrapdoorProgram.__init__(self, 'coverage')
def add_argparse_arguments(self, parser):
"""Add command-line arguments to the argument parser.
Parameters
----------
parser : argparse.ArgumentParser
The parser to which arguments must be added.
"""
TrapdoorProgram.add_argparse_arguments(self, parser)
parser.add_argument('--nproc', type=int, default=1,
help='Number of parallel processes when running nose. '
'[default=%(default)s]')
def get_stats(self, config, args):
"""Run tests using nosetests with coverage analysis.
Parameters
----------
config : dict
The dictionary loaded from ``trapdoor.cfg``.
args : argparse.Namespace
The result of parsing the command line arguments.
Returns
-------
counter : collections.Counter
Counts of the number of messages of a specific type in a certain file.
messages : Set([]) of strings
All errors encountered in the current branch.
"""
# Get version
command = ['nosetests', '--version']
print('USING :', run_command(command, verbose=False)[0].strip())
command = ['coverage', '--version']
print('USING :', run_command(command, verbose=False)[0].split('\n')[0])
# Results will be stored in the following variables
counter = Counter()
messages = set([])
# Run fast unit tests with nosetests, with coverage
command = ['nosetests', '-v', '-A', 'not (slow or rt)',
'--with-coverage',
'--cover-erase',
'--cover-branches',
'--cover-package=%s' % ','.join(config['py_packages'])] + \
config['py_directories']
if args.nproc > 1:
command.extend(['--processes=%s' % args.nproc,
'--process-timeout=600'])
output = run_command(command)[0]
lines = [line.strip() for line in output.split('\n')]
# Parse the output of the unit tests
iline = 0
for line in lines:
if len(line) == 0:
break
elif line.endswith('FAIL'):
counter['unit_tests_failed'] += 1
messages.add(Message(None, None, None, 'nosetests ' + line))
elif line.endswith('ERROR'):
counter['unit_tests_error'] += 1
messages.add(Message(None, None, None, 'nosetests ' + line))
iline += 1
# Run the coverage program for a full report. This separate call is needed
# since coverage-4.1.
fn_coverage = '%s/coverage.xml' % self.qaworkdir
command = ['coverage', 'xml', '-o', fn_coverage,
'--omit=%s' % ','.join(config['py_test_files'])]
output = run_command(command)[0]
# Parse coverage xml output
et = ElementTree.parse(fn_coverage)
for class_tag in et.getroot().iter('class'):
filename = class_tag.attrib['filename']
with open(filename) as fsource:
source_lines = fsource.readlines()
for line_tag in class_tag.iter('line'):
if line_tag.attrib['hits'] == '0':
line = int(line_tag.attrib['number'])
if excluded_from_coverage(source_lines[line-1]):
continue
branch_ends = line_tag.get('missing-branches')
if branch_ends is not None:
for branch_end in branch_ends.split(','):
if branch_end.isdigit():
delta = int(branch_end) - line
msg = Message(filename, line, None,
'Missed branch to line %+i' % (delta))
else:
msg = Message(filename, line, None,
'Missed branch to %s' % branch_end)
messages.add(msg)
counter[filename] += 1
messages.add(Message(filename, line, None, 'Missed line'))
counter[filename] += 1
return counter, messages
if __name__ == '__main__':
CoverageTrapdoorProgram().main()
|
theochem/horton
|
tools/qa/trapdoor_coverage.py
|
trapdoor_coverage.py
|
py
| 5,168
|
python
|
en
|
code
| 83
|
github-code
|
6
|
16293536002
|
import os
from time import sleep
import boto3
from botocore.exceptions import ClientError
IAM_R = boto3.resource('iam')
IAM_C = boto3.client('iam')
LAMBDA_C = boto3.client('lambda')
EVENTS_C = boto3.client('events')
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
def setup_iam_role():
"""
Setup the AWS IAM role
"""
try:
IAM_C.get_role(RoleName='aws_monitor')
except ClientError as err:
if err.response['Error']['Code'] == 'NoSuchEntity':
with open('{}/lambda_role_policy.json'.format(BASE_DIR), 'r') as policy_file:
policy = policy_file.read()
IAM_C.create_role(RoleName='aws_monitor',
AssumeRolePolicyDocument=policy)
else:
raise err
for pol in ['ec2_access', 'sns_access', 'cloudwatch_access', 'rds_access',
'as_access', 's3_access']:
with open('{}/{}.json'.format(BASE_DIR, pol), 'r') as policy_file:
policy = policy_file.read()
IAM_C.put_role_policy(RoleName='aws_monitor',
PolicyName=pol,
PolicyDocument=policy)
try:
IAM_C.get_instance_profile(InstanceProfileName='aws_monitor')
except ClientError as err:
if err.response['Error']['Code'] == 'NoSuchEntity':
IAM_C.create_instance_profile(InstanceProfileName='aws_monitor')
else:
raise err
role_instance_profiles = IAM_C.list_instance_profiles_for_role(RoleName='aws_monitor')
add_instance_profile = True
for profile in role_instance_profiles['InstanceProfiles']:
if profile['InstanceProfileName'] == 'aws_monitor':
add_instance_profile = False
if add_instance_profile:
IAM_C.add_role_to_instance_profile(InstanceProfileName='aws_monitor',
RoleName='aws_monitor')
return IAM_R.Role('aws_monitor')
def configure_vpc():
"""
Provide vpc/sg for lambda function
"""
vpc_config = {}
subnet_id = os.environ.get('SUBNET_ID')
security_group_id = os.environ.get('SECURITY_GROUP_ID')
if subnet_id:
vpc_config['SubnetIds'] = [subnet_id]
if security_group_id:
vpc_config['SecurityGroupIds'] = [security_group_id]
return vpc_config
def upload_lambda_function():
"""
main function of deployment.
Ensure IAM is setup. Upload zip. Create function.
"""
vpc_config = configure_vpc()
role = setup_iam_role()
rule = EVENTS_C.put_rule(Name='DiscoverInstancesSchedule',
ScheduleExpression=os.environ.get('DISCOVERY_SCHEDULE'),
State='ENABLED',
Description='Run the instance discovery')
with open('{}/../aws_monitor.zip'.format(BASE_DIR), 'rb') as zip_file:
zip_bytes = zip_file.read()
fcn = {}
try:
LAMBDA_C.get_function(FunctionName='DiscoverInstances')
fcn = LAMBDA_C.update_function_code(FunctionName='DiscoverInstances',
ZipFile=zip_bytes,
Publish=True)
except ClientError as err:
if err.response['Error']['Code'] == 'ResourceNotFoundException':
sleep(10)
fcn = LAMBDA_C.create_function(FunctionName='DiscoverInstances',
Code={'ZipFile': zip_bytes},
Runtime='python2.7',
Role=role.arn,
Handler='zumoco.main',
Timeout=300,
Description="Discover, add cloudwatch alerts",
MemorySize=128,
VpcConfig=vpc_config)
else:
raise err
try:
LAMBDA_C.add_permission(FunctionName='DiscoverInstances',
StatementId='DiscoverInstancesSchedule-Permission',
Action='lambda:InvokeFunction',
Principal='events.amazonaws.com',
SourceArn=rule['RuleArn'])
except ClientError as err:
if err.response['Error']['Code'] != 'ResourceConflictException':
# ignore conflicts if the rule exists
raise err
EVENTS_C.put_targets(Rule='DiscoverInstancesSchedule',
Targets=[{'Id': 'DiscoverInstances-schedule',
'Arn': fcn['FunctionArn'],}])
upload_lambda_function()
|
zulily/aws_monitor
|
deployscripts/setup_lambda.py
|
setup_lambda.py
|
py
| 4,849
|
python
|
en
|
code
| 3
|
github-code
|
6
|
44757415813
|
from telegram.ext import *
from telegram import *
import openai
openai.api_key = "YOUR OPENAI API KEY" # Enter your OpenAI Secret Key.
telegram_token = "YOUR TELEGRAM BOT TOKEN" # Enter your Telegram Bot Token.
conversation=[{"role": "system", "content": "You are a helpful assistant."}] # Defined the assistant role.
def main():
app = Application.builder().token(telegram_token).build() # Created a Telegram app.
app.add_handler(CommandHandler('start', start_command)) # Added start_command function.
app.add_handler(CommandHandler('restart', restart_command)) # Added restart_command function.
app.add_handler(MessageHandler(filters.TEXT, handle_message)) # Added handle_message function.
app.add_error_handler(error) # Added error_handle function.
app.run_polling() # Started the app.
def reply(lastMessage): # ChatGPT conversation function
if(len(conversation)>=7): # The conversation has a limit. Only assistant role, last 3 messages and last 3 replies are saved. Other messages and replies are deleted.
conversation.pop(1)
conversation.append({"role": "user", "content": lastMessage}) # Added last request.
completion = openai.ChatCompletion.create( # Sent completion request and received ChatGPT message.
model="gpt-3.5-turbo", # Used "gpt-3.5-turbo" model. "gpt-4" can also be used.
messages=conversation, # Sent all conversation.
max_tokens=1000 # Defined as max 1000 tokens. Changeable value.
)
if(len(conversation)>7): # The conversation has a limit. Only assistant role, last 3 messages and last 3 replies are saved. Other messages and replies are deleted.
conversation.pop(1)
lastReply = completion.choices[0].message['content'] # Read last reply from completion.
conversation.append({"role": "assistant", "content": lastReply}) # Added last reply.
return lastReply # Returned last reply.
def replyStartRestart():
global conversation
conversation.clear()
conversation=[{"role": "system", "content": "You are a helpful assistant."}] # Defined the assistant role.
return 'Hello! How can I help you?'
async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE):
text: str = update.message.text # Read last Telegram message from user.
await update.message.reply_text(reply(text)) # Sent ChatGPT message to Telegram user.
async def start_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
await update.message.reply_text(replyStartRestart()) # Replied to Telegram user.
async def restart_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
await update.message.reply_text(replyStartRestart()) # Replied to Telegram user.
async def error(update: Update, context: ContextTypes.DEFAULT_TYPE):
print(f'Error: {context.error}') # Printed error log
await update.message.reply_text('Please wait! If I don\'t respond within a few minutes, try again') # Replied to Telegram user
if __name__ == "__main__":
main()
|
muhammetharundemir/Telegram-ChatGPT
|
telegramChatGPT.py
|
telegramChatGPT.py
|
py
| 3,703
|
python
|
en
|
code
| 1
|
github-code
|
6
|
42488414261
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 23 12:34:08 2018
@author: michal
"""
import networkx as nx
from networkx.algorithms.isomorphism import GraphMatcher
from networkx.readwrite.json_graph import node_link_data
from os.path import isdir, join, isfile
from os import mkdir
import json
from glob import glob
import shutil
class anionMatcher(GraphMatcher):
def semantic_feasibility(self, G1_node, G2_node):
if "charged" in self.G1.node[G1_node]:
if self.G1.node[G1_node]["charged"] != self.G2.node[G2_node]["charged"]:
return False
elif self.G2.node[G2_node]["charged"]:
return False
if self.G2.node[G2_node]["terminating"]:
if len(list(self.G2.neighbors(G2_node))) != len(list(self.G1.neighbors(G1_node))):
return False
if ( self.G2.node[G2_node]["element"] == "X" or "X" in self.G2.node[G2_node]["aliases"] ) and not self.G1.node[G1_node]["element"] in self.G2.node[G2_node]["notAliases"] :
return True
return self.G1.node[G1_node]["element"] == self.G2.node[G2_node]["element"] or self.G1.node[G1_node]["element"] in self.G2.node[G2_node]["aliases"]
def addAtribute( graph, nodes, key ):
if isinstance(nodes, list):
for nodeId in nodes:
graph.node[nodeId][key] = True
else:
graph.node[nodes][key] = True
def saveAnion( atoms, bonds, charged, name, priority , terminating = [], aliases = {},
notAliases = {}, geometry = {}, fullIsomorphism = False, nameMapping = {} , nonUniqueCharge = [] ,
properties2measure = [] ):
graph = nx.Graph()
nonUniqueCharge = set(nonUniqueCharge)
for i, el in enumerate(atoms):
graph.add_node(i, element = el, terminating = False, bonded = False, aliases = [], charged = False )
graph.add_edges_from(bonds)
addAtribute( graph, terminating, "terminating")
for nodeId in aliases:
graph.node[nodeId]["aliases"] = aliases[nodeId]
for nodeId in notAliases:
graph.node[nodeId]["notAliases"] = notAliases[nodeId]
if not geometry:
graph.graph["geometry"]= "no restrictions"
else:
graph.graph["geometry"]= geometry
graph.graph["fullIsomorphism"] = fullIsomorphism
graph.graph["name"] = name
graph.graph["nameMapping"] = nameMapping
graph.graph["priority"] = priority
graph.graph["properties2measure"] = properties2measure
fileName = str(priority)+"_"+name
if isinstance( charged , list ) :
uniqueCharges = set(charged)
for nodeId in charged:
nuc = uniqueCharges | nonUniqueCharge
nuc.remove(nodeId)
saveAnionJson(graph, fileName, nodeId, nuc)
else:
saveAnionJson(graph, fileName, charged, nonUniqueCharge)
def saveAnionJson( graph, fileName, charged, nonUniqueCharges = []):
mainElement = graph.node[charged]["element"]
elements = [ mainElement ]
if "aliases" in graph.node[charged]:
elements += graph.node[charged]["aliases"]
graph.node[charged]["aliases"] = []
graph.node[charged]["charged"] = True
graph.graph["charged"] = charged
graph.graph["otherCharges"] = list(nonUniqueCharges)
oldName = ""
nameMapping = False
if "X" in graph.graph["name"] and charged in graph.graph["nameMapping"]:
oldName = graph.graph["name"]
nameMapping = graph.graph["nameMapping"][charged]
graph.graph["nameMapping"].pop(charged)
for element in elements:
graph.node[charged]["element"] = element
if nameMapping:
graph.graph["name"] = oldName.replace( nameMapping , element)
dir_path = join("anion_templates", element)
if not isdir( dir_path ):
mkdir( dir_path )
path2save = getUniquePath( dir_path , fileName)
output = open(path2save, 'w')
json.dump(node_link_data(graph), output )
output.close()
graph.node[charged]["charged"] = False
def getUniquePath(dirPath, fileName):
path2save = join( dirPath , fileName+".json")
if not isfile(path2save):
return path2save
similarFiles = glob( join(dirPath, fileName)+"_*.json" )
if not similarFiles:
return join( dirPath , fileName+"_0.json")
maxNumber = -1
for s in similarFiles:
newNumber = int( s[:-5].split("_")[-1] )
maxNumber = max(maxNumber, newNumber)
return join( dirPath , fileName+"_"+str(maxNumber+1)+".json")
def clearAnionTemplates():
if isdir("anion_templates"):
shutil.rmtree("anion_templates")
mkdir("anion_templates")
if __name__ == "__main__":
clearAnionTemplates()
# atoms, bonds, charged, name, priority, terminating = [], aliases = {}, notAliases = {}, geometry = {}, fullIsomorphism = False
#OXYGEN
# #RCOOH
saveAnion( [ "C" , "C", "O", "O" ], [ (0,1), (1,2), (1,3) ],
2, "RCOO", 0, terminating = [1, 2, 3],
geometry = "planar", nonUniqueCharge = [3],
properties2measure= [ { "kind" : "plane", "atoms" : [ 1, 2, 3 ] , "directionalVector" : [ { "atom" : 1 }, { "center" : [ 2, 3] } ] } ] )
#ClO, BrO, IO,
saveAnion([ "CL", "O" ], [(0, 1)],
1, "XO", 5, fullIsomorphism = True,
aliases = { 0 : [ "BR", "I" ] }, nameMapping = { 0 : "X"}, properties2measure= [ { "kind" : "line", "atoms" : [ 0, 1 ] } ] )
#NO2, ClO2, BRO2,
saveAnion([ "N", "O" , "O" ], [(0, 1), (0,2)],
1, "XO2", 10, fullIsomorphism = True, aliases = { 0 : ["CL", "BR"]}, nameMapping = { 0 : "X" },
nonUniqueCharge=[2], properties2measure= [ { "kind" : "plane" , "atoms" : [ 0, 1, 2 ],
"directionalVector" : [ { "atom" : 0 }, { "center" : [ 1, 2] } ] } ])
#NO3, CO3, PO3, SO3, AsO3, BO3, ClO3, BRO3
saveAnion( ["N", "O", "O", "O"], [(0,1), (0,2), (0,3)],
1, "XO3", 15, fullIsomorphism = True,
aliases = { 0 : [ "C", "P", "B", "S", "AS", "CL", "BR", "I" ] }, nameMapping = { 0 : "X" },
nonUniqueCharge= [2, 3],
properties2measure= [ { "kind" : "plane", "atoms" : [ 1, 2, 3 ] , "directionalVector" : [ { "closest" : [1, 2, 3] }, { "center" : [ 1, 2, 3] } ]} ])
#PO4, SO4, AsO4, ClO4, BRO4
saveAnion( ["P", "O", "O", "O", "O"], [(0,1), (0,2), (0,3), (0, 4)],
1, "XO4", 20, fullIsomorphism = True,
aliases = { 0 : [ "S", "AS", "CL", "BR", "I" ] }, nameMapping = { 0 : "X" },
nonUniqueCharge=[2, 3, 4])
# Ph-OH
# saveAnion( [ "C" , "C" , "C" , "C" , "C", "C" , "O" ], [(0,1),(1,2), (2,3), (3,4),( 4, 5), (5, 0), (5,6)],
# 6, "PhOH", 25, terminating = [6], geometry = "planarWithSubstituents")
# #RBOOH
saveAnion( [ "X" , "B", "O", "O" ], [ (0,1), (1,2), (1,3) ],
2, "RBOO", 30, terminating = [2, 3],
notAliases = {0 : [ "O" ] },
nonUniqueCharge=[3],
properties2measure= [ { "kind" : "plane" , "atoms" : [ 1, 2, 3 ] , "directionalVector" : [ { "atom" : 1 }, { "center" : [ 2, 3] } ]} ])
#COO
saveAnion( [ "C", "O", "O" ], [ (0,1), (0,2) ],
1, "COO", 35, terminating = [1, 2], nonUniqueCharge=[2],
properties2measure= [ { "kind" : "plane", "atoms" : [ 0, 1, 2 ], "directionalVector" : [ { "atom" : 0 }, { "center" : [ 1, 2] } ] } ] )
#R-PO4, R-SO4, R-AsO4
saveAnion( ["P", "O", "O", "O", "O"], [(0,1), (0,2), (0,3), (0, 4)],
1, "R-XO4", 45, terminating = [ 1, 2, 3 ] ,
aliases = { 0 : [ "S", "AS" ] }, nameMapping = { 0 : "X" },
nonUniqueCharge=[2,3])
#R2-PO4, R2-SO4, R2-AsO4
saveAnion( ["P", "O", "O", "O", "O"], [(0,1), (0,2), (0,3), (0, 4)],
1, "R2-XO4", 47, terminating = [ 1, 2 ] ,
aliases = { 0 : [ "S", "AS" ] }, nameMapping = { 0 : "X" },
nonUniqueCharge=[2])
#R3-PO4, R3-SO4, R3-AsO4
# saveAnion( ["P", "O", "O", "O", "O"], [(0,1), (0,2), (0,3), (0, 4)],
# 1, "R2-XO4", 48, terminating = [ 1 ] ,
# aliases = { 0 : [ "S", "AS" ] }, nameMapping = { 0 : "X" } )
#RAsO3, RPO3, RSO3
saveAnion( ["P", "O", "O", "O", "C"], [(0,1), (0,2), (0,3), (0, 4)],
1, "RXO3", 50, terminating = [1, 2, 3] ,
aliases = { 0 : [ "S", "AS" ] }, nameMapping = { 0 : "X" },
nonUniqueCharge=[2,3])
#R2AsO2, R2PO2, RRSO2
# saveAnion( ["P", "O", "O", "C", "C"], [(0,1), (0,2), (0,3), (0, 4)],
# 1, "R2XO2", 55, terminating = [1, 2],
# aliases = { 0 : [ "S", "AS" ] }, nameMapping = { 0 : "X" } )
#F, CL, BR, I, S
saveAnion( [ "F" ], [], 0, "X", 55, aliases = { 0 : [ "CL", "BR", "I", "S"] },
fullIsomorphism = True, nameMapping = { 0 : "X"})
#SCN
saveAnion([ "S", "C" , "N" ], [(0, 1), (0,2)],
[0,1,2], "SCN", 62, fullIsomorphism = True, properties2measure= [ { "kind" : "line", "atoms" : [ 0, 2 ] } ])
# #RSH
# saveAnion( [ "X" , "S" ], [ (0,1)],
# 1, "RSH", 60, terminating = [1],
# notAliases = {0 : [ "O" ] } )
#
#N3
saveAnion([ "N", "N" , "N" ], [(0, 1), (0,2)],
[0,1], "N3", 70, fullIsomorphism = True, nonUniqueCharge=[2], properties2measure= [ { "kind" : "lineSymmetric", "atoms" : [ 0, 2 ] } ])
#CN
saveAnion([ "C" , "N" ], [(0, 1)],
[0,1], "CN", 75, fullIsomorphism = True, properties2measure= [ { "kind" : "line", "atoms" : [ 0, 1 ] } ])
# #RSSR
# saveAnion( [ "X" , "S", "S" ], [ (0,1), (1,2)],
# 1, "RSS", 80 ,
# notAliases = {0 : [ "O" ] } )
|
chemiczny/PDB_supramolecular_search
|
anionTemplateCreator.py
|
anionTemplateCreator.py
|
py
| 10,065
|
python
|
en
|
code
| 1
|
github-code
|
6
|
17514206848
|
from unittest import TestCase
from unittest.mock import MagicMock, patch
from src.utils.callgrind import extract_function_calls, extract_hotspots
class TestExtractHotspots(TestCase):
def test(self):
callgrind = MagicMock()
count = 2
# Fake CallgrindParser internals
event1 = MagicMock()
event1.name = "Time ratio"
event2 = MagicMock()
event2.name = "Samples"
fun1 = MagicMock()
fun1.events = MagicMock()
fun1.events.items.return_value = [(event1, 0.1), (event2, 1)]
fun2 = MagicMock()
fun2.events = MagicMock()
fun2.events.items.return_value = [(event1, 0.2), (event2, 1)]
fun3 = MagicMock()
fun3.events = MagicMock()
fun3.events.items.return_value = [(event1, 0.3), (event2, 1)]
fake_functions = {"name1": fun1,
"name2": fun2,
"name3": fun3, }
profile = MagicMock()
profile.functions = fake_functions
parser = MagicMock()
parser.parse.return_value = profile
with patch("src.utils.callgrind.open"):
with patch("src.utils.callgrind.gprof2dot.CallgrindParser") as mock_parser:
mock_parser.return_value = parser
result = extract_hotspots(callgrind, count)
self.assertEqual(len(result), 2)
self.assertTrue("name3" in result[0] and "30.0%" in result[0])
self.assertTrue("name2" in result[1] and "20.0%" in result[1])
class TestExtractFunctionCalls(TestCase):
def test(self):
callgrind = MagicMock()
# Fake CallgrindParser internals
event = MagicMock()
event.name = "Samples"
call1 = MagicMock()
call1.callee_id = "name2"
call1.events = {event: 5}
call2 = MagicMock()
call2.callee_id = "name3"
call2.events = {event: 6}
caller = MagicMock()
caller.calls = {"key": call1, "another_key": call2}
callee1 = MagicMock()
callee1.name = "function2"
callee2 = MagicMock()
callee2.name = "function3"
fake_functions = {"name1": caller,
"name2": callee1,
"name3": callee2}
profile = MagicMock()
profile.functions = fake_functions
parser = MagicMock()
parser.parse.return_value = profile
with patch("src.utils.callgrind.open"):
with patch("src.utils.callgrind.gprof2dot.CallgrindParser") as mock_parser:
mock_parser.return_value = parser
result = extract_function_calls(callgrind, "name1")
self.assertEqual(result["function2"], 5)
self.assertEqual(result["function3"], 6)
|
haggj/bachelors-thesis
|
container/src/test/test_callgrind.py
|
test_callgrind.py
|
py
| 2,772
|
python
|
en
|
code
| 0
|
github-code
|
6
|
75319095866
|
import random
from pypinyin import lazy_pinyin
from nonebot import require, on_command, on_message, on_keyword, on_shell_command, on_request
from nonebot.rule import command
from nonebot.permission import SUPERUSER
from nonebot.typing import T_State,T_Handler
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.message import Message, MessageSegment
from nonebot.adapters.cqhttp.event import MessageEvent, GroupMessageEvent, GroupRequestEvent
from nonebot.adapters.cqhttp.permission import PRIVATE, GROUP, GROUP_ADMIN, GROUP_OWNER
from nonebot.adapters.cqhttp.utils import unescape, escape
from src.utils.util import gen_parser, call_api_delay
from .data_source import get_group_id_list, gen_qq, gentracker
__doc__ = '''to -[ugsabf] [args,]
-u: 私聊,args为 私聊对象qq号 消息
-g: 群聊,args为 群聊群qq号 消息
-s: 多个消息目标,args为 qq号 qq号 qq号 消息
-a: 以所有群聊为消息目标,args为 消息
-b: 只有-a时生效,以除了某群的所有群聊为消息目标,args为 qq号 消息
-f: 结束当前会话
'''
to_cmd = on_command('to', aliases={'转发'}, permission=SUPERUSER)
to_parser = gen_parser()
to_parser.add_argument('-u', dest='to_user', action='store_true')
to_parser.add_argument('-g', dest='to_group', action='store_true')
to_parser.add_argument('-s', dest='several', action='store_true')
to_parser.add_argument('-a', dest='all_group', action='store_true')
to_parser.add_argument('-b', dest='ban', action='store_true')
@to_cmd.handle()
async def first_receive(bot: Bot, event: MessageEvent, state: T_State):
msg = str(event.message).strip()
if msg:
state['args'] = msg
@to_cmd.got('args', __doc__)
async def _(bot: Bot, state: T_State):
args = state['args'].split(None, 1)
if args[0] == state['_prefix']['raw_command']:
args = args[1].split(None, 1)
try:
cmd = to_parser.parse_args([args[0]])
except Exception as e:
await to_cmd.finish('命令解析失败' + str(e))
return
if args[0] == args[-1]:
await to_cmd.reject('命令缺少[args,]\n' + __doc__)
param = args[-1]
if cmd.help:
await to_cmd.reject(__doc__)
elif cmd.finish:
await to_cmd.finish('本次命令结束')
if cmd.several:
qq_list = list(gen_qq(param))
if cmd.to_user:
for qq in qq_list[:-1]:
await bot.send_private_msg(user_id=qq, message=unescape(qq_list[-1]))
elif cmd.to_group:
for qq in qq_list[:-1]:
await bot.send_group_msg(group_id=qq, message=unescape(qq_list[-1]))
elif cmd.all_group:
group_list = await get_group_id_list(bot)
if cmd.ban:
qq_list = list(gen_qq(param))
for qq in (i for i in group_list if i not in qq_list):
await bot.send_group_msg(group_id=qq, message=unescape(qq_list[-1]))
else:
for qq in group_list:
await bot.send_group_msg(group_id=qq, message=unescape(param))
elif cmd.to_user:
params = param.split(None, 1)
if params[0] == params[-1]:
await to_cmd.reject('缺少需要发送的消息\n' + __doc__)
else:
await bot.send_private_msg(user_id=params[0], message=unescape(params[1]))
elif cmd.to_group:
params = param.split(None, 1)
if params[0] == params[-1]:
await to_cmd.reject('缺少需要发送的消息\n' + __doc__)
else:
await bot.send_group_msg(group_id=params[0], message=unescape(params[1]))
await to_cmd.finish(Message('[CQ:face,id=124]'))
request_cmd = on_request()
@request_cmd.handle()
async def request(bot: Bot, event: GroupRequestEvent):
f_group = event.group_id
f_user = event.user_id
if event.sub_type == 'invite':
result = request_cmd.new("message",
permission=SUPERUSER | PRIVATE,
temp=True,
priority=5)
await bot.send_private_msg(user_id=912871833,
message=f'有新的群邀请:\n群:{f_group}\n邀请人:{f_user}')
request_event = event
@result.handle()
async def _(bot: Bot, event: MessageEvent):
msg = 'reject'
if str(event.message) in ['yes', '1']:
msg = 'approve'
await request_event.approve(bot)
else:
await request_event.reject(bot)
await result.finish(msg)
# def is_sublist(a, b):
# if a == []: return True
# if b == []: return False
# return b[:len(a)] == a or is_sublist(a, b[1:])
def sublist(a, b):
if a == []: return (0, 0)
if b == []: return False
for i in range(len(b)):
if not b[:len(a)] == a:
b = b[1:]
else:
return (i, i + len(a))
def pinyin2api(s):
api_pinyin = lazy_pinyin(s)
cmd_map = {
'send': ['sen', 'de'],
'set': ['sai', 'te'],
'get': ['gei', 'te'],
'delate': ['di', 'lei', 'te'],
'group': ['ge', 'rou', 'pu'],
'private': ['pu', 'rui', 'wei', 'te'],
'msg': ['mai', 'shei', 'ji'],
'ban': ['ban'],
'whole': ['hou'],
'friend': ['fu', 'run', 'de'],
'id': ['ai', 'di'],
'user': ['you', 're'],
}
for k, v in cmd_map.items():
r = sublist(v, api_pinyin)
if r:
del api_pinyin[r[0]:r[1]]
api_pinyin.insert(r[0], k)
return '_'.join(api_pinyin)
def isall_chinese(s):
return all(u'\u4e00' <= ch <= u'\u9fa5' for ch in s)
call_api = on_command('api', aliases={'call', '希司提姆靠鲁', '希斯提姆靠鲁', '希司提姆考鲁', '希斯提姆考鲁'}, permission=SUPERUSER)
@call_api.handle()
async def _(bot: Bot, event: MessageEvent):
msg = str(event.message).split()
param = event.dict()
if msg:
api, *params = msg
if isall_chinese(api):
api = pinyin2api(api)
# _input = {}
# for i in params:
# k, v = i.split('=', 1)
# _input[pinyin2api(k) if isall_chinese(k) else k] = v
param.update(dict(i.split('=', 1) for i in params))
# param.update(_input)
# if MessageSegment.reply in event.message:
# ...
if param.get('message'):
param['message'] = Message(unescape(str(param.get('message'))))
res = await bot.call_api(api, **param)
if res:
await call_api.finish(message=Message(str(res)))
iptracker = on_command('iptracker', permission=SUPERUSER)
@iptracker.handle()
async def _(bot: Bot, event: MessageEvent):
type_ = str(event.message)
randnum = random.random()
await bot.send(event, message=str(randnum))
await iptracker.finish(message=Message(gentracker(randnum, type=int(type_) if type_ else 0)))
show_me = on_keyword({'闪光弹', '照明弹'}, permission=SUPERUSER)
@show_me.handle()
async def _(bot: Bot, event: GroupMessageEvent):
if 'reply' in event.raw_message:
msg = event.reply.raw_message.replace(',type=flash', '')
await bot.send(event, Message(msg))
# scheduler = require('nonebot_plugin_apscheduler').scheduler
#
# @scheduler.scheduled_job('cron', hour='*', id='ti_gang')
# async def ti_gang():
# await call_api_delay('send_group_msg',
# random.randint(1, 100),
# group_id=476328543,
# message=Message('[CQ:image,file=d01d3883a38999345e536012aeb18c76.image,url=https://c2cpicdw.qpic.cn/offpic_new/912871833//912871833-2997538805-D01D3883A38999345E536012AEB18C76/0?term=3]'))
# temp = """<section style="text-align: center; line-height: 1.75em; margin-left: 8px; margin-right: 8px;">
# <section style="margin-right: auto;margin-left: auto;width: 100%;vertical-align: middle;display: inline-block;line-height: 0;box-sizing: border-box;">
# <section style="display: inline-block;width: 100%;vertical-align: top;background-position: 0% 0%;background-repeat: no-repeat;background-size: 100%;background-attachment: scroll;background-image: url("{url2}");-webkit-tap-highlight-color: transparent;">
# <svg enable-background="new 0 0 1080 435" space="preserve"
# style="display: inline-block;width: 100%;vertical-align: top;background-position: 0% 0%;background-repeat: no-repeat;background-size: 100%;background-attachment: scroll;background-image: url("{url1}");-webkit-tap-highlight-color:transparent;"
# version="1.1" viewBox="0 0 1080 435" x="0px" xlink="http://www.w3.org/1999/xlink" xml=""
# xmlns="http://www.w3.org/2000/svg" y="0px">
# <animate attributeName="opacity" begin="click" dur="0.5s" values="1;0" fill="freeze" restart="never"></animate>
# </svg>
# </section>
# </section>
# </section>"""
# merge_cmd = on_command('代码')
# @merge_cmd.handle()
# async def _(bot: Bot, event: MessageEvent):
# try:
# url1, url2 = event.message.__str__().split()
# await bot.send(event, message=temp.format(url1=url1, url2=url2))
# except:
# print('error')
# request_cmd = on_message(permission=PRIVATE)
#
#
# @request_cmd.handle()
# async def request(bot: Bot, event: MessageEvent):
# # 接收私聊消息
# f_user = event.user_id
# if True:
# # 创建临时 matcher
# request_cmd.new("message",
# handlers=[decide],
# permission=SUPERUSER,
# temp=True)
#
# await bot.send_private_msg(user_id=912871833,
# message=f'{f_user}:\n{event.raw_message}')
#
#
# async def decide(bot: Bot, event: MessageEvent):
# # 临时 matcher 响应事件
# await request_cmd.send(message=event.message)
|
Joenothing-lst/qbot
|
src/plugins/admin/__init__.py
|
__init__.py
|
py
| 9,965
|
python
|
en
|
code
| 0
|
github-code
|
6
|
26109711840
|
"""
The customers resource is a representation of the customer accounts.
All the REST API calls to the Customer or the Address Database are housed here.
Customers Service with Swagger and Flask RESTX
Paths:
------
GET / - Displays a UI for Selenium testing
GET /customers - Lists a list all of Customers
GET /customers/{customer_id} - Reads the Customer with given Customer ID
POST /customers - Creates a new Customer in the database
PUT /customers/{customer_id} - Updates a Customer with given customer ID
DELETE /customers/{customer_id} - Deletes a Customer with given ID
GET /customers/{customer_id}/addresses - Lists all the addresses of the customer with given ID
GET /customers/{customer_id}/addresses/{address_id} - Reads the Address with given ID of the customer with given ID
POST /customers/{customer_id}/addresses - Creates a new address of the customer with given Customer ID
PUT /customers/{customer_id}/addresses/{address_id} - Updates the address with given address ID of customer with given ID
DELETE /customers/{customer_id}/addresses/{address_id} - Deletes the address with given address ID of customer with given ID
PUT /customers/{customer_id}/activate - Activates a Customer with given Customer ID
PUT /customers/{customer_id}/deactivate - Deactivates a Customer with given Customer ID
"""
# pylint: disable=cyclic-import
from flask import jsonify
# from flask_restx import Api, Resource
from flask_restx import fields, reqparse, inputs, Resource
from service.common import status # HTTP Status Codes
from service.models import Customer, Address
# Import Flask application
from . import app, api
create_address_model = api.model('Address', {
'street': fields.String(required=True, description='The address street'),
'city': fields.String(required=True, description='The address city'),
'state': fields.String(required=True, description='The address state'),
'country': fields.String(description='The address country'),
'pin_code': fields.String(required=True, description='The address pin code'),
'customer_id': fields.Integer(required=True, description='The customer ID corresponding to the Address')
})
address_model = api.inherit(
'AddressModel',
create_address_model,
{
'address_id': fields.Integer(readOnly=True, description='The unique id assigned internally by service')
}
)
create_customer_model = api.model('Customer', {
'first_name': fields.String(required=True, description='The First Name of the customer'),
'last_name': fields.String(required=True, description='The Last Name of the customer'),
'password': fields.String(required=True, description='The password of the customer'),
'email': fields.String(required=True, description='The email of the customer'),
'active': fields.Boolean(required=True, description='The active/inactive state of the customer'),
'addresses': fields.List(fields.Nested(address_model,
required=False,
description='List of addresses that the customer has'))
})
customer_model = api.inherit(
'CustomerModel',
create_customer_model,
{
'id': fields.Integer(readOnly=True, description='The unique id assigned internally by service'),
}
)
# query string arguments
customer_args = reqparse.RequestParser()
customer_args.add_argument('first_name', type=str, location='args', required=False, help='Find Customers by First Name')
customer_args.add_argument('last_name', type=str, location='args', required=False, help='Find Customers by Last Name')
customer_args.add_argument('email', type=str, location='args', required=False, help='Find Customers by Email')
customer_args.add_argument('active', type=inputs.boolean, location='args', required=False, help='Is the Customer active?')
customer_args.add_argument('street', type=str, location='args', required=False, help='Find Customers by Address street')
customer_args.add_argument('city', type=str, location='args', required=False, help='Find Customers by Address city')
customer_args.add_argument('state', type=str, location='args', required=False, help='Find Customers by Address state')
customer_args.add_argument('country', type=str, location='args', required=False, help='Find Customers by Address country')
customer_args.add_argument('pin_code', type=str, location='args', required=False, help='Find Customers by Address Pin Code')
############################################################
# Health Endpoint
############################################################
@app.route("/health")
def health():
"""Health Status"""
return jsonify(dict(status="OK")), status.HTTP_200_OK
######################################################################
# GET INDEX
######################################################################
@app.route('/')
def index():
"""Root URL response"""
app.logger.info("Request for Root URL")
return app.send_static_file('index.html')
######################################################################
# R E S T A P I E N D P O I N T S
######################################################################
######################################################################
# PATH: /customers/{customer_id}
######################################################################
@api.route('/customers/<int:customer_id>')
@api.param('customer_id', 'The Customer identifier')
class CustomerResource(Resource):
"""
CustomerResource class
Allows the manipulation of a single customer
GET /customer{customer_id} - Returns a Customer with the customer_id
PUT /customer{customer_id} - Update a Customer with the customer_id
DELETE /customer{customer_id} - Deletes a Customer with the customer_id
"""
# ------------------------------------------------------------------
# RETRIEVE A CUSTOMER
# ------------------------------------------------------------------
@api.doc('get_customers')
@api.response(404, 'Customer not found')
@api.marshal_with(customer_model)
def get(self, customer_id):
"""
Retrieve a single Customer
This endpoint will return a Customer based on its ID.
"""
app.logger.info("Request to Retrieve a Customer with id [%s]", customer_id)
customer = Customer.find(customer_id)
if not customer:
abort(status.HTTP_404_NOT_FOUND, f"Customer with id '{customer_id}' was not found.")
app.logger.info('Returning customer: %s', customer.id)
return customer.serialize(), status.HTTP_200_OK
# ------------------------------------------------------------------
# UPDATE AN EXISTING CUSTOMER
# ------------------------------------------------------------------
@api.doc('update_customers')
@api.response(404, 'Customer not found')
@api.response(400, 'The posted Customer data was not valid')
@api.expect(customer_model)
@api.marshal_with(customer_model)
def put(self, customer_id):
"""
Update a Customer
This endpoint will update a Customer based on the body that is posted.
"""
app.logger.info('Request to Update a Customer with id [%s]', customer_id)
customer = Customer.find(customer_id)
original_password = None
if not customer:
abort(status.HTTP_404_NOT_FOUND, f"Customer with id '{customer_id}' was not found.")
else:
original_password = customer.password
app.logger.debug('Payload = %s', api.payload)
data = api.payload
customer.deserialize(data)
customer.id = customer_id
customer.update(original_password)
app.logger.info('Customer with ID [%s] updated.', customer.id)
return customer.serialize(), status.HTTP_200_OK
# ------------------------------------------------------------------
# DELETE A CUSTOMER
# ------------------------------------------------------------------
@api.doc('delete_customers')
@api.response(204, 'Customer deleted')
def delete(self, customer_id):
"""
Delete a Customer
This endpoint will delete a Customer based on the ID specified in the path.
"""
app.logger.info('Request to Delete a Customer with id [%s]', customer_id)
customer = Customer.find(customer_id)
if customer:
customer.delete()
app.logger.info('Customer with id [%s] was deleted', customer_id)
return '', status.HTTP_204_NO_CONTENT
######################################################################
# PATH: /customers
######################################################################
@api.route('/customers', strict_slashes=False)
class CustomerCollection(Resource):
""" Handles all interactions with collections of Customers """
# ------------------------------------------------------------------
# LIST ALL CUSTOMERS
# ------------------------------------------------------------------
@api.doc('list_customers')
@api.expect(customer_args, validate=True)
@api.marshal_list_with(customer_model)
def get(self):
"""
Lists all of the Customers
This endpoint will list all the customers.
"""
app.logger.info('Request to list customers...')
customers = []
args = customer_args.parse_args()
if args['first_name']:
app.logger.info('Filtering by first name: %s', args['first_name'])
customers = Customer.find_by_first_name(args['first_name'])
elif args['last_name']:
app.logger.info('Filtering by last name: %s', args['last_name'])
customers = Customer.find_by_last_name(args['last_name'])
elif args['active'] is not None:
app.logger.info('Filtering by active state: %s', args['active'])
customers = Customer.find_by_active(args['active'])
elif args['email']:
app.logger.info('Filtering by email: %s', args['email'])
customers = Customer.find_by_email(args['email'])
elif args['street']:
app.logger.info('Filtering by street: %s', args['street'])
customers = Address.find_by_street(args['street'])
elif args['city']:
app.logger.info('Filtering by city: %s', args['city'])
customers = Address.find_by_city(args['city'])
elif args['state']:
app.logger.info('Filtering by state: %s', args['state'])
customers = Address.find_by_state(args['state'])
elif args['country']:
app.logger.info('Filtering by country: %s', args['country'])
customers = Address.find_by_country(args['country'])
elif args['pin_code']:
app.logger.info('Filtering by pin code: %s', args['pin_code'])
customers = Address.find_by_pin_code(args['pin_code'])
else:
app.logger.info('Returning unfiltered list.')
customers = Customer.all()
# app.logger.info('[%s] Customers returned', len(customers))
results = [customer.serialize() for customer in customers]
return results, status.HTTP_200_OK
# ------------------------------------------------------------------
# ADD A NEW CUSTOMER
# ------------------------------------------------------------------
@api.doc('create_customers')
@api.response(400, 'The posted data was not valid')
@api.expect(create_customer_model)
@api.marshal_with(customer_model, code=201)
def post(self):
"""
Creates a Customer
This endpoint will create a Customer based on the data in the body that is posted.
"""
app.logger.info('Request to Create a Customer')
customer = Customer()
app.logger.debug('Payload = %s', api.payload)
customer.deserialize(api.payload)
customer.create()
app.logger.info('Customer with new id [%s] created!', customer.id)
location_url = api.url_for(CustomerResource, customer_id=customer.id, _external=True)
return customer.serialize(), status.HTTP_201_CREATED, {'Location': location_url}
######################################################################
# Activate / Deactivate Customer
######################################################################
######################################################################
# PATH: /customers/{customer_id}/activate
######################################################################
@api.route('/customers/<int:customer_id>/activate')
@api.param('customer_id', 'The Customer identifier')
class ActivateResource(Resource):
""" Activate actions on a Customer """
@api.doc('activate_customers')
@api.response(404, 'Customer not found')
def put(self, customer_id):
"""
Activate a Customer
This endpoint will activate a Customer.
"""
app.logger.info(f'Request to Activate a Customer with ID: {customer_id}')
customer = Customer.find(customer_id)
if not customer:
abort(status.HTTP_404_NOT_FOUND, f'Customer with id [{customer_id}] was not found.')
customer.id = customer_id
customer.active = True
customer.update()
app.logger.info('Customer with id [%s] has been activated!', customer.id)
return customer.serialize(), status.HTTP_200_OK
######################################################################
# PATH: /customers/{customer_id}/deactivate
######################################################################
@api.route('/customers/<int:customer_id>/deactivate')
@api.param('customer_id', 'The Customer identifier')
class DeactivateResource(Resource):
""" Deactivate actions on a Customer """
@api.doc('deactivate_customers')
@api.response(404, 'Customer not found')
def put(self, customer_id):
"""
Deactivate a Customer
This endpoint will deactivate a Customer.
"""
app.logger.info(f'Request to Deactivate a Customer with ID: {customer_id}')
customer = Customer.find(customer_id)
if not customer:
abort(status.HTTP_404_NOT_FOUND, f'Customer with id [{customer_id}] was not found.')
customer.id = customer_id
customer.active = False
customer.update()
app.logger.info('Customer with id [%s] has been deactivated!', customer.id)
return customer.serialize(), status.HTTP_200_OK
######################################################################
# PATH: /customers/{customer_id}/addresses/{address_id}
######################################################################
@api.route('/customers/<int:customer_id>/addresses/<int:address_id>')
@api.param('customer_id', 'The Customer identifier')
@api.param('address_id', 'The Address identifier')
class AddressResource(Resource):
"""
AddressResource class
Allows the manipulation of a single Address
GET /customers/{customer_id}/addresses/{address_id} - Returns an Address with the id
PUT /customers/{customer_id}/addresses/{address_id} - Update an Address with the id
DELETE /customers/{customer_id}/addresses/{address_id} - Deletes an Address with the id
"""
# ------------------------------------------------------------------
# RETRIEVE AN ADDRESS
# ------------------------------------------------------------------
@api.doc('get_addresses')
@api.marshal_with(address_model)
@api.response(404, 'Address not found')
def get(self, address_id, customer_id):
"""
Retrieve an address
This endpoint will return an address from a customer based on its ID.
"""
app.logger.info('Request to retrieve an Address %s from Customer with id: %s', address_id, customer_id)
customer = Customer.find(customer_id)
if not customer:
abort(
status.HTTP_404_NOT_FOUND,
f"Customer with id '{customer_id}' was not found.",
)
address = Address.find(address_id)
if not address or address.customer_id != customer.id:
abort(
status.HTTP_404_NOT_FOUND,
f"Address with id '{address_id}' could not be found for the customer with id {customer.id}.",
)
app.logger.info('Returning address: %s', address.address_id)
return address.serialize(), status.HTTP_200_OK
# ------------------------------------------------------------------
# UPDATE AN EXISTING ADDRESS
# ------------------------------------------------------------------
@api.doc('update_addresses')
@api.response(404, 'Address not found')
@api.expect(address_model)
@api.marshal_with(address_model)
def put(self, address_id, customer_id):
"""
Update an address of a customer
This endpoint will update an Address based on the body that is posted.
"""
app.logger.info('Request to Address with address_id [%s] and customer_id [%s] ...', address_id, customer_id)
customer = Customer.find(customer_id)
if not customer:
abort(status.HTTP_404_NOT_FOUND, f"Customer with id '{customer_id}' was not found.")
# Find customer address with address_id
addr_to_update = None
for addr in customer.addresses:
if addr.address_id == address_id:
addr_to_update = addr
break
# if not found
if not addr_to_update:
abort(status.HTTP_404_NOT_FOUND, f"Address id '{address_id}' not found for customer '{customer_id}'.")
data = api.payload
addr_to_update.deserialize(data)
addr_to_update.address_id = address_id
addr_to_update.customer_id = customer_id
addr_to_update.update()
app.logger.info('Address with address_id [%s] and customer_id [%s] updated.', address_id, customer.id)
return addr_to_update.serialize(), status.HTTP_200_OK
# ------------------------------------------------------------------
# DELETE AN ADDRESS
# ------------------------------------------------------------------
@api.doc('delete_addresses')
@api.response(204, 'Address deleted')
def delete(self, address_id, customer_id):
"""
Delete an address from a customer
This endpoint will delete an Address based on the ID specified in the path.
"""
app.logger.info('Request to delete address with address_id [%s] and customer_id [%s] ...', address_id, customer_id)
address = Address.find(address_id)
if address and address.customer_id == customer_id:
address.delete()
app.logger.info('Address with ID [%s] and customer ID [%s] delete completed.', address_id, customer_id)
return '', status.HTTP_204_NO_CONTENT
######################################################################
# PATH: /customers/{customer_id}/addresses
######################################################################
@api.route('/customers/<int:customer_id>/addresses', strict_slashes=False)
@api.param('customer_id', 'The Customer identifier')
class AddressCollection(Resource):
""" Handles all interactions with collections of addresses """
# ------------------------------------------------------------------
# LIST ALL ADDRESSES FOR A CUSTOMER
# ------------------------------------------------------------------
@api.doc('list_addresses')
@api.marshal_list_with(address_model)
def get(self, customer_id):
"""
List all of the addresses of a Customer
This endpoint will list all addresses of a Customer.
"""
app.logger.info('Request to list Addresses for Customer with id: %s', customer_id)
customer = Customer.find(customer_id)
if not customer:
abort(status.HTTP_404_NOT_FOUND, f"Customer with id '{customer_id}' was not found.")
results = [address.serialize() for address in customer.addresses]
app.logger.info("Returning %d addresses", len(results))
return results, status.HTTP_200_OK
# ------------------------------------------------------------------
# ADD A NEW ADDRESS FOR A CUSTOMER
# ------------------------------------------------------------------
@api.doc('create_addresses')
@api.response(400, 'The posted data was not valid')
@api.expect(create_address_model)
@api.marshal_with(address_model, code=201)
def post(self, customer_id):
"""
Create an address for a customer
This endpoint will add a new address for a customer.
"""
app.logger.info('Request to create an address for customer with id: %s', customer_id)
customer = Customer.find(customer_id)
if not customer:
abort(status.HTTP_404_NOT_FOUND, f"Customer with id '{customer_id}' was not found.")
# Create an address instance for the customer = customer_id
data = api.payload
address = Address()
address.deserialize(data)
customer.addresses.append(address)
customer.update()
location_url = api.url_for(AddressResource,
customer_id=address.customer_id,
address_id=address.address_id,
_external=True)
app.logger.info('Address with ID [%s] created for Customer: [%s].', address.address_id, customer.id)
return address.serialize(), status.HTTP_201_CREATED, {"Location": location_url}
######################################################################
# U T I L I T Y F U N C T I O N S
######################################################################
def abort(error_code: int, message: str):
"""Logs errors before aborting"""
app.logger.error(message)
api.abort(error_code, message)
|
CSCI-GA-2820-SP23-003/customers
|
service/routes.py
|
routes.py
|
py
| 21,967
|
python
|
en
|
code
| 3
|
github-code
|
6
|
33963223285
|
from http import HTTPStatus
from django.test import TestCase, Client
class AboutTests(TestCase):
def setUp(self):
self.guest_client = Client()
def test_about_urls_uses_correct_templates(self):
templates_url_names_quest = {
'/about/author/': 'about/author.html',
'/about/tech/': 'about/tech.html'
}
for address, template in templates_url_names_quest.items():
with self.subTest(address=address):
response = self.guest_client.get(address)
self.assertEqual(
response.status_code,
HTTPStatus.OK
)
|
Mashabor/hw05_final
|
yatube/about/tests.py
|
tests.py
|
py
| 660
|
python
|
en
|
code
| 0
|
github-code
|
6
|
73675897789
|
import math
import numpy as np
from numpy.linalg import norm
from random import randint
import os
from select import select
os.environ["PYTHONDONTWRITEBYTECODE"]="True"
from servThread import servThread
BUFFER_SIZE = 32
alfa = 1
mi = 0.001
nfeatures = 4
#funkcija koja pokusava predvidjeti y
def h(theta,x):
return(1/(1+math.e**(x.dot(theta))))
#Ucenje koje uzima fiksan broj iteracija
def nauciIter(X,y,theta0,n=500):
theta = theta0
for i in range(n):
theta = theta - alfa*X.T.dot(y1-h(theta,X))
return theta
#funkcija koja uci dok razlika parametara u uzastopnim iteracijama ne postane dovoljno mala
def nauci(X,y,theta0):
theta = theta0
M = len(X)
while(True):
oldTheta = theta;
theta = theta - alfa*X.T.dot(y-h(theta,X))/M
razlika = norm(theta - oldTheta)
print("Razlika starog i novog: " + str(razlika))
if( razlika < mi ):
break
return theta
#funkcija koja nam vraca srednju vrijednost i standardnu devijaciju mjerenih podataka
def skala(X):
mean = np.empty(shape=(len(X[0])))
sigma = np.empty(shape=(len(X[0])))
for i in range(len(X[0])):
mean[i] = np.mean(X[:,i])
sigma[i] = np.std(X[:,i])
#Specijalni slucajevi
if (mean[i] == 0) and (sigma[i] == 0):
sigma[i] = 1
if sigma[i] == 0:
sigma[i] = mean[i]
mean[i] = 0
return mean,sigma
#funkcija za skaliranje podataka
def skaliraj(X,mean,sigma):
return np.divide((X -mean),sigma)
#funkcija za drugacije oblikovanj podataka, nepotrebna i nekoristena
def oblikujPodatke(X,y,uzastopni):
#u X1 i y1 spremamo nove, oblikovane podatke za treniranje
y1=np.array([])
X1 = np.empty(shape=(0,4*uzastopni+1))
#Dodajemo pozitivnu klasu y = 1
indeksi,=np.where(y==1)
broj = len(indeksi) #broj mjerenja s y = 1
indeksi2 = np.zeros(shape=(broj,uzastopni),dtype='int32')
#Spremamo indekse okolnih mjerenja koje cemo poslije spojiti u oblik podataka za treniranje
for i in range(uzastopni):
indeksi2[:,i] = indeksi-2+i
for red in indeksi2:
odabrani = X[red,:].ravel()
#dodajemo x0 = 1 na pocetak novih podataka
mjerenje = np.append([1],odabrani)
X1 = np.append(X1,[mjerenje],axis=0)
y1 = np.append(y1,[1])
#Brisemo sva mjerenja koja smo uzeli kao pozitivnu klasu
X = np.delete(X,indeksi2.ravel(),0)
#Dodajemo negativnu klasu y = 0, dodajemo isti broj mjerenja koji imamo za pozitivnu klasu
for i in range(broj):
#Za negativnu klasu uzimamo nasumicno odabrana mjerenja iz preostalih
centralniIndeks = randint(uzastopni//2,len(X)-uzastopni//2 - 2)
indeksi=np.array(range(uzastopni))-uzastopni//2 + centralniIndeks
odabrani = X[indeksi,:].ravel()
mjerenje = np.append([1],odabrani)
X1 = np.append(X1,[mjerenje],axis=0)
y1 = np.append(y1,[0])
return X1,y1
#funkcija koja vraca true ako je resurs od servera iskoristen, a false ako je proslo odredeno vrijeme
def resBusy(res,timeout):
i,_,_ = select([res],[],[],timeout)
return i
def provjeravaj(server):
server.socket.listen(1)
print("Cekam konekciju 50 sekundi")
uspjeh = resBusy(server.socket,50)
if uspjeh:
conn, addr = server.socket.accept()
print("Konekcija s adrese: "+str(addr))
while server.thrRunning:
#Provjerava jesu li podatci poslani preko konekcije i treba li u meduvremenu ugasiti server
uspjeh2 = resBusy(conn,1)
if uspjeh2:
#Prihvacamo podatke preko mreze i pretvaramo ih u numpy array
data = np.fromstring(conn.recv(BUFFER_SIZE),sep='\t')
#Odbacujemo y, dodajemo x0 na pocetak i skaliramo mjerenje
praviData = data[:nfeatures]
praviData = np.append([1],praviData)
praviData = skaliraj(praviData,mean,sigma)
#Ispisujemo ako je vjerojatnost preko 50%
predvidanje = h(theta, praviData)
if predvidanje > 0.5:
print("Pozitivno: "+ str(predvidanje))
conn.close()
else:
print("Nema konekcije.")
return
#
# Kod za ucenje
#
#Citanje podataka iz file-a
lista = np.fromfile("podaci",sep="\t")
lista = lista.reshape(-1,nfeatures+1)
#y nam je zadnji stupac
y=lista[:,nfeatures]
m = np.size(y)
#X0 je prvi stupac jedinica
X0 = np.ones(shape=(m,1))
#Potpuna matrica s traning data
X= np.append(X0, lista[:,0:nfeatures], axis=1)
#Pocetni koeficijenti theta
theta0 = np.zeros(nfeatures+1)
#Skaliranje svih stupaca za bolju konvergenciju ucenja
mean,sigma = skala(X)
X = skaliraj(X,mean,sigma)
#Ucenje koeficijenata
theta = nauci(X,y,theta0)
#
# Pokretanje servera
#
TCP_IP = raw_input("Unesi IP adresu racunala: ")
TCP_PORT = 5005
server = servThread((TCP_IP,TCP_PORT),provjeravaj)
server.start()
inp=""
while inp!="quit":
inp= raw_input("Upisi quit za ugasiti\n")
print("RIP server")
server.thrRunning=False
server.join()
|
termistotel/microbitML
|
server/learn.py
|
learn.py
|
py
| 4,883
|
python
|
hr
|
code
| 0
|
github-code
|
6
|
25495485263
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 27 17:39:39 2020
@author: satya
"""
import pandas as pd
import scipy.cluster.hierarchy as sch
from sklearn.cluster import DBSCAN
data=pd.read_csv('cars_clus.csv')
featureset = data[['engine_s', 'horsepow', 'wheelbas', 'width', 'length', 'curb_wgt', 'fuel_cap', 'mpg']]
featureset=featureset.dropna()
featureset=featureset.replace('$null$',0)
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
featureset=sc.fit_transform(featureset)
from sklearn.cluster import AgglomerativeClustering
dendogram=sch.dendrogram(sch.linkage(featureset,method='ward'))
plt.show()
hc=AgglomerativeClustering(n_clusters=5,affinity='euclidean',linkage='ward')
y=hc.fit_predict(featureset)
df=DBSCAN(eps=0.3,min_samples=2)
y=df.fit(featureset)
y=y.labels_
sample_cores=np.zeros_like(y)
sample_cores[df.core_sample_indices_]=True
np.unique(y)
|
Satyake/Deep-Learning
|
DBSCAN and HC.py
|
DBSCAN and HC.py
|
py
| 928
|
python
|
en
|
code
| 1
|
github-code
|
6
|
2736213027
|
from keras.optimizers import Nadam, Optimizer
from keras import backend as K
class Nadam_entropy(Nadam):
def __init__(self, temperature=0.1, **kwargs):
self.temperature = temperature
super(Nadam_entropy, self).__init__(**kwargs)
def get_gradients(self, loss, params):
grads = K.gradients(loss, params)
probs = grads
for i in range(len(params)):
grads[i] /= params[i] + K.epsilon()
#probs = grads / (params + K.epsilon())
probs = K.abs(probs)
probs /= K.sum(K.flatten(probs)) + K.epsilon()
Ts = -self.temperature*K.sum(K.flatten(probs * K.log(probs)))
delta_s = K.gradients(Ts, params)
for i in range(len(grads)):
grads[i] = grads[i] + delta_s[i]
# grads = grads + delta_s
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
if hasattr(self, 'clipvalue') and self.clipvalue > 0:
grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
return grads
|
twoev/APEMEN
|
utils/optimisers.py
|
optimisers.py
|
py
| 1,081
|
python
|
en
|
code
| 0
|
github-code
|
6
|
38456424440
|
import re
import os
import torch
import base64
import uvicorn
import numpy as np
from io import BytesIO
from PIL import Image
from typing import Union
from fastapi import FastAPI, File, Form
from pydantic import BaseModel
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.engine.predictor_glip import GLIPDemo
def base64_to_image(base64_str, image_path=None):
base64_data = re.sub('^data:image/.+;base64,', '', base64_str)
byte_data = base64.b64decode(base64_data)
image_data = BytesIO(byte_data)
img = Image.open(image_data)
if image_path:
img.save(image_path)
return img
def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right
if clip:
clip_coords(x, (h - eps, w - eps)) # warning: inplace clip
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center
y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center
y[:, 2] = (x[:, 2] - x[:, 0]) / w # width
y[:, 3] = (x[:, 3] - x[:, 1]) / h # height
return y
def predict2json(image,caption):
image = np.array(image)[:,:,::-1]
predictions = glip_demo.compute_prediction(image, caption)
glip_demo.confidence_threshold = 0.5
top_predictions = glip_demo._post_process_fixed_thresh(predictions)
boxs = top_predictions.bbox
index = top_predictions.get_field("labels")
probs = top_predictions.get_field("scores")
h,w,_ = image.shape
xywhs = xyxy2xywhn(x=boxs,w=w,h=h)
res = {}
for c, (i,loc,prob) in enumerate(zip(index,xywhs,probs)):
x,y,w,h = loc
res[c] = {}
res[c]['index'] = int(i) -1
res[c]['label'] = glip_demo.entities[int(i) -1]
res[c]['prob'] = float(prob)
res[c]['x'] = float(x)
res[c]['y'] = float(y)
res[c]['w'] = float(w)
res[c]['h'] = float(h)
return res
config_file = "configs/pretrain/glip_Swin_T_O365_GoldG.yaml"
weight_file = "MODEL/glip_tiny_model_o365_goldg_cc_sbu.pth"
cfg.local_rank = 0
cfg.num_gpus = 1
cfg.merge_from_file(config_file)
cfg.merge_from_list(["MODEL.WEIGHT", weight_file])
cfg.merge_from_list(["MODEL.DEVICE", "cuda"])
glip_demo = GLIPDemo(
cfg,
min_image_size=800,
confidence_threshold=0.5,
show_mask_heatmaps=False
)
app = FastAPI()
class Item(BaseModel):
name: str
price: float
is_offer: Union[bool, None] = None
@app.get("/")
def read_root():
return {"Hello": "World"}
@app.post("/upload")
def upload(base64_str: str = Form(...), caption: str = Form(...)):
try:
image = base64_to_image(base64_str)
res = predict2json(image,caption)
except Exception as e:
return {"message": f"{e}"}
return res
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=5000)
|
bensonbs/GLIP
|
main.py
|
main.py
|
py
| 2,914
|
python
|
en
|
code
| 5
|
github-code
|
6
|
29401120526
|
import json
import os
from googleapiclient.discovery import build
class Channel:
"""Класс для ютуб-канала"""
def __init__(self, channel_id: str) -> None:
"""Экземпляр инициализируется id канала. Дальше все данные будут подтягиваться по API."""
self.__channel_id = channel_id
api_key: str = os.getenv('API_KEY')
youtube = build('youtube', 'v3', developerKey=api_key)
channel = youtube.channels().list(id=self.__channel_id, part='snippet,statistics').execute()
self.title = channel['items'][0]['snippet']['title']
self.description = channel['items'][0]['snippet']['description']
self.url = 'https://www.youtube.com/channel/' + self.__channel_id
self.subscribers = channel['items'][0]['statistics']['subscriberCount']
self.video_count = channel['items'][0]['statistics']['videoCount']
self.views = channel['items'][0]['statistics']['viewCount']
def __str__(self):
return f"{self.title} ({self.url})"
def __add__(self, other):
""" Метод для операции сложения"""
return int(self.subscribers) + int(other.subscribers)
def __sub__(self, other):
""" Метод для операции вычитания"""
return int(self.subscribers) - int(other.subscribers)
def __lt__(self, other):
""" Метод для операции сравнения «меньше»"""
if int(self.subscribers) < int(other.subscribers):
return True
else:
return False
def __le__(self, other):
""" Метод для операции сравнения «меньше или равно»"""
if int(self.subscribers) <= int(other.subscribers):
return True
else:
return False
def __gt__(self, other):
""" Метод для операции сравнения «больше»"""
if int(self.subscribers) > int(other.subscribers):
return True
else:
return False
def __ge__(self, other):
""" Метод для операции сравнения «больше или равно»"""
if int(self.subscribers) >= int(other.subscribers):
return True
else:
return False
def __eq__(self, other):
""" Поведение оператора равенства"""
if int(self.subscribers) == int(other.subscribers):
return True
else:
return False
@property
def channel_id(self):
return self.__channel_id
def print_info(self) -> None:
"""Выводит в консоль информацию о канале."""
api_key: str = os.getenv('API_KEY')
youtube = build('youtube', 'v3', developerKey=api_key)
channel = youtube.channels().list(id=self.__channel_id, part='snippet,statistics').execute()
print(json.dumps(channel, indent=2, ensure_ascii=False))
@classmethod
def get_service(cls):
"""
Возвращает объект для работы с YouTube API
"""
api_key: str = os.getenv('API_KEY')
youtube = build('youtube', 'v3', developerKey=api_key)
return youtube
def to_json(self, name_json):
"""
Сохраняет в файл значения атрибутов экземпляра Channel
"""
attribute_dict = {'channel_id': self.__channel_id,
'title': self.title,
'description': self.description,
'url': self.url,
'subscribers': self.subscribers,
'video_count': self.video_count,
'views': self.views,
}
with open(name_json, "w", encoding="utf-8") as file:
file.write(json.dumps(attribute_dict))
|
AnastasiaLykova/youtube-analytics-project
|
src/channel.py
|
channel.py
|
py
| 4,052
|
python
|
ru
|
code
| null |
github-code
|
6
|
41912482635
|
from array import array
import datetime
from datetime import datetime, timezone
import requests
import math
from app.core.common import cf
# import json
from app.core.wcommon import wcf
from app.db.database import couch
class WSearch():
def __init__(self) -> None:
self.SEARCH_TAGS = [
"archived",
"author",
"certainty",
"colour",
"incident",
"msgType",
"phenomenon",
"severity",
"source",
"status",
# "uuid"
]
pass
def getCList(self, query):
# input: query dict
# output: list
# query example
# query = {'db':'warnings',
# 'design': 'metcap',
# 'view':'phenomenon',
# 'key': 'lightning'
# }
self.query = query
self.qs = wcf.getQueryString(self.query)
self.result = []
response, status = couch.get(self.qs)
if len(response.json().keys()) >= 0:
if('rows' in response.json().keys()):
for doc in response.json()['rows']:
self.result.append(doc['id'])
return self.result
else:
return response.json()
def getWarningsArchivedList(self):
# input:
# output: list of CAP warning archive statuses in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'archived'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsAreaDescList(self):
# input:
# output: list of CAP warning area descriptions in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'areaDesc'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsAuthorList(self):
# input:
# output list of CAP warning authors in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'author'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsCertaintyList(self):
# input:
# output list of CAP warning certainties in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'certainty'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsColourList(self):
# input:
# output: list of CAP warning colours in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'colour'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsCustomAreaList(self):
# input:
# output: list of CAP warning custom areas in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'customArea'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsMsgTypeList(self):
# input:
# output: list of CAP warning message types in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'msgType'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getIncidentsNameList(self):
# input:
# output: list of CAP incident names in database
self.query = {'db': 'incidents',
'design': 'metcap',
'view': 'name'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getIncidentsDescriptionList(self):
# input:
# output: list of CAP incident descriptions in database
self.query = {'db': 'incidents',
'design': 'metcap',
'view': 'description'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsPhenomenonList(self):
# input:
# output: list of CAP warning phenomena in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'phenomenon'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsSeverityList(self):
# input:
# output list of CAP warning severities in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'severity'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsStatusList(self):
# input:
# output list of CAP warning statuses in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'status'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsByIncidentId(self, id):
# input: string id
# output: cap id
# query example
# '0000000008'
# Incident IDs and names must be unique
#
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'incident',
'key': id
}
qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"'
# qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"&include_docs=true'
response, status = couch.get(qs)
result = []
if(not response.json()['rows']):
return
else:
for doc in response.json()['rows']:
result.append(doc['id'])
# result.append(doc['doc'])
return result
# def getWarningsByIncidentDescription(self, description):
# # input: string description
# # output: cap
# # query example
# # 'description'
# # Incident IDs and names must be unique
# #
# incidentId = self.getIncidentByDescription(description)
# # test{
# print(incidentId)
# # test}
# self.query = {'db': 'warnings',
# 'design': 'metcap',
# 'view': 'incident',
# 'key': incidentId
# }
# qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"'
# # qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"&include_docs=true'
# response, status = couch.get(qs)
# result = []
# if(not response.json()['rows']):
# return result
# else:
# for doc in response.json()['rows']:
# result.append(doc['id'])
# # result.append(doc['doc'])
# return result
# def getIncidentByDescription(self, description):
# # input: description string
# # output: incident id
# self.query = {'db': 'incidents',
# 'design': 'metcap',
# 'view': 'description',
# 'key': description
# }
# qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"'
# # qs = wcf.getQueryString(self.query)
# response, status = couch.get(qs)
# result = []
# if(not response.json()['rows']):
# return
# else:
# for doc in response.json()['rows']:
# result.append(doc['id'])
# return str(result[0])
def getWarningsByIncidentName(self, name):
# input: string name
# output: cap
# query example
# 'Muninn'
# Incident IDs and names must be unique
#
incidentId = self.getIncidentByName(name)
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'incident',
'key': incidentId
}
qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"'
# qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"&include_docs=true'
response, status = couch.get(qs)
result = []
if(not response.json()['rows']):
return result
else:
for doc in response.json()['rows']:
result.append(doc['id'])
# result.append(doc['doc'])
return result
def getIncidentByName(self, name):
# input: name string
# output: incident id
self.query = {'db': 'incidents',
'design': 'metcap',
'view': 'name',
'key': name
}
qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"'
# qs = wcf.getQueryString(self.query)
response, status = couch.get(qs)
result = []
if(not response.json()['rows']):
return
else:
for doc in response.json()['rows']:
result.append(doc['id'])
return str(result[0])
def getWarningsInPeriod(self, onset, expires):
# input: time stamps from warning database ("onset", "expires")
# output: list of valid CAP messages in the time interval
# self.result = []
self.dt = datetime.now(timezone.utc)
self.utc_time = self.dt.replace(tzinfo=timezone.utc)
self.utc_timestamp = math.floor(self.utc_time.timestamp())
self.lq = {'db': 'warnings',
'design': 'metcap',
'view': 'epochToOnset',
'startkey': wcf.getCapEpoch(onset),
'endkey': self.utc_timestamp
}
self.rq = {'db': 'warnings',
'design': 'metcap',
'view': 'epochToExpires',
'startkey': 0,
'endkey': wcf.getCapEpoch(expires)
}
la = ws.getCList(self.lq)
ra = ws.getCList(self.rq)
return list(set(la).intersection(ra))
def getWarningsTemporal(self, query):
self.query = query
return self.getWarningsInPeriod(self.query['onset'],self.query['expires'])
def debug(self, query):
self.query = query
return self.getWarningsInPeriod(self.query['onset'],self.query['expires'])
# return list(self.query.keys())
def capPolygonSearch(self, query):
self.query = query
self.iDList = wcf.findMatchingBounds(cf.getBounds(self.query))
self.q = {'db': 'warnings',
'design': 'metcap',
'view': 'polygon',
'keys': self.iDList
}
self.qs = wcf.getQueryString(self.q)
self.result = []
response, status = couch.get(self.qs)
if len(response.json().keys()) >= 0:
if('rows' in response.json().keys()):
for doc in response.json()['rows']:
if 'cutoff' in self.query.keys():
if (cf.polyOverlaps(wcf.getPolygon(doc['value']), cf.getQueryPoly(self.query), cutoff=self.query['cutoff'])):
self.result.append(doc['id'])
else:
if (cf.polyOverlaps(wcf.getPolygon(doc['value']), cf.getQueryPoly(self.query))):
self.result.append(doc['id'])
return self.result
else:
return response.json()
return self.result
def getWarningsInHeightRange(self, bottom, top):
self.lq = {'db': 'warnings',
'design': 'metcap',
'view': 'altitude',
'startkey': bottom,
'endkey': 2e6
}
self.rq = {'db': 'warnings',
'design': 'metcap',
'view': 'ceiling',
'startkey': 0,
'endkey': top
}
la = ws.getCList(self.lq)
ra = ws.getCList(self.rq)
return list(set(la).intersection(ra))
def getWarningsSpatial(self, query):
self.query = query
return self.getWarningsInHeightRange(self.query['altitude'],self.query['ceiling'])
def capSearch(self, query):
self.query = query
return self.getCAPsIntersection(self.query,self.SEARCH_TAGS)
def getCAPsIntersection(self,query,tags):
rSets = []
for t in tags:
if t in query.keys():
q = {'db': 'warnings',
'design': 'metcap',
'view': t,
'key': query[t]
}
rSets.append(set(self.getCList(q)))
if 'features' in query.keys():
if query['features'][0]['geometry']['type'] == 'Polygon':
rSets.append(set(self.capPolygonSearch(query)))
if ('onset' in query.keys() and 'expires' in query.keys()):
rSets.append(set(self.getWarningsInPeriod(query['onset'],query['expires'])))
# test{
if ('incidentName' in query.keys()):
rSets.append(set(self.getWarningsByIncidentName(query['incidentName'])))
# test}
return set.intersection(*rSets)
def capSearchLong(self,query):
self.query = query
self.query['db'] = 'warnings'
documents = []
idSet = self.getCAPsIntersection(self.query,self.SEARCH_TAGS)
for elem in idSet:
documents.append(elem)
self.result = []
for item in documents:
qs = f'/{self.query["db"]}/{item}'
response, status = couch.get(qs)
self.result.append(response.json())
# print(response.json())
return self.result
def getCapXMLNameByWarning(self,id):
# input: string id
# output: cap XML file name (array)
# query example
# input: getCapXMLNameByWarning('2.49.0.1.578.0.20220602073715')
# output: ['cap_xml']
#
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'capXML',
'key': id
}
qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"'
response, status = couch.get(qs)
self.result = []
if(not response.json()['rows']):
return
else:
for doc in response.json()['rows']:
self.result.append(doc['value'])
return self.result
def getWarningCapXML(self,id):
attachments = self.getCapXMLNameByWarning(id)
if(len(attachments) == 0):
return
else:
self.query = {'db': 'warnings',
'key': attachments[0]
}
qs = f'/{self.query["db"]}/{id}/{self.query["key"]}'
response,status = couch.get(qs)
return(response.content.decode())
def getCapJSONNameByWarning(self,id):
# input: string id
# output: cap JSON file name (array)
# query example
# input: getCapJSONNameByWarning('2.49.0.1.578.0.20220602073715')
# output: ['cap_json']
#
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'capJSON',
'key': id
}
qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"'
response, status = couch.get(qs)
self.result = []
if(not response.json()['rows']):
return
else:
for doc in response.json()['rows']:
self.result.append(doc['value'])
return self.result
def getWarningCapJSON(self,id):
attachments = self.getCapJSONNameByWarning(id)
if(len(attachments) == 0):
return
else:
self.query = {'db': 'warnings',
'key': attachments[0]
}
qs = f'/{self.query["db"]}/{id}/{self.query["key"]}'
response,status = couch.get(qs)
return(response.content.decode())
###############################################################################
ws = WSearch()
|
metno/weamyl-metcap
|
app/app/core/wsearch.py
|
wsearch.py
|
py
| 19,290
|
python
|
en
|
code
| 0
|
github-code
|
6
|
16120458600
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Dr Ekaterina Abramova, 2017
STRUCTURED TYPES.
Sequence type: lists
"""
# -----------------------------------------------------------------------------
# ----------------------------- create a list ---------------------------------
L = []
# List Comprehension
L1 = [ii for ii in range(5)] # [] make it a list. New list L1 = [0,1,2,3,4,5].
L2 = [x**2 for x in range(1,7) ] # new list L2 = [1,4,9,16,25,36]
# typcast range (which is a generator) to a list
L2 = list(range(10)) #[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
mixed = [1, 2, 'a', 3, 4.0]
L = [x**2 for x in mixed if type(x) == int] # [1, 4, 9]
mylist = [x*x for x in range(3)]
for ii in mylist:
print(ii)
# 0, 1, 4
# -----------------------------------------------------------------------------
# ---------------------------- simple operations ------------------------------
L = ['H',"e", 'l', 1,"o"]
for ii in L:
print(ii)
# H e l 1 o
# Looping
L = []
for ii in range(10):
L.append(ii)
print(L) # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# extend vs append method
L1 = [1,2,3]
L2 = [4,5,6]
L3 = L1 + L2
print( 'L3 =', L3 ) # [1, 2, 3, 4, 5, 6]
L1.extend(L2) # list concatenetion
print( 'L1 =', L1 ) # [1, 2, 3, 4, 5, 6]
L1.append(L2) # structure is maintained, get list inside a list
print( 'L1 =', L1 ) # [1, 2, 3, 4, 5, 6, [4, 5, 6]]
# -----------------------------------------------------------------------------
# ------------------------- side effects / aliasing ----------------------------
Techs = ['MIT', 'Caltech']
Ivys = ['Harvard', 'Yale', 'Brown']
Univ = [Techs, Ivys]
Techs.append('RPI')
# Obj to which Univ is bound still contains 2 lists, but their contents have changed.
print(Univ) # [['MIT', 'Caltech', 'RPI'], ['Harvard', 'Yale', 'Brown']]
# -----------------------------------------------------------------------------
# ------------------- build a list of integer values --------------------------
L = [] # empty list
for ii in range(10):
L.append(ii)
print(L) # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# ------------ build a lsit of integer values raised to power n ---------------
# via for append
def powers_forLoop(pwr, N=5):
L = []
for ii in range(N):
L.append(ii**pwr)
return L
# via comprehension
def powers_listComprehension(pwr, N=5):
L = [ii**pwr for ii in range(N)]
return L
# use next 2 lines on the IPython command line
# %timeit powers_forLoop(2,1000) # 1000 loops, best of 3: 332 µs per loop
# %timeit powers_listComprehension(2,1000) # 1000 loops, best of 3: 253 µs per loop
# --------------- remove duplicate characters from 2 strings ------------------
def removeDups(L1,L2):
newL = L1 # safe copy of L1
print(id(newL)==id(L1)) # True
for e1 in newL:
if e1 in L2:
L1.remove(e1) # mutates original list L1
L1 = [1,2,3,4]
L2 = [1,2,5,6]
removeDups(L1,L2)
print(L1) # [2, 3, 4]
# ----------------- find position of an int within a list --------------------
def findPos(L):
num = 5 # number to be found
index = 0
found = False
while not found:
if L[index] == num:
found = True
print(index+1)
elif index == (len (L)-1):
print('Number not in list')
break # come out of current loop
index += 1
# function call
L = [1,2,4,5,4,5,6]
findPos(L)
|
EkaterinaAbramova/python_exercises
|
Structured Types/lists.py
|
lists.py
|
py
| 3,389
|
python
|
en
|
code
| 0
|
github-code
|
6
|
17351623183
|
import vertexai
from vertexai.language_models import ChatModel, InputOutputTextPair
vertexai.init(project="samwin", location="us-central1")
chat_model = ChatModel.from_pretrained("chat-bison@001")
parameters = {
"max_output_tokens": 256,
"temperature": 0.2,
"top_p": 0.8,
"top_k": 40
}
chat = chat_model.start_chat(
context="""you are a doctor that is responsible for answering queries that patients regarding their prescription. Also you are not allowed to tell that you are an AI model and have to answer the question with full confidence""",
examples=[
InputOutputTextPair(
input_text="""hello """,
output_text="""hey there how can I help you out """
),
InputOutputTextPair(
input_text="""can you tell me your name """,
output_text="""My name is doctor Charis your person medical assistant """
),
InputOutputTextPair(
input_text="""is it dangerous to take Asparin during Dengu""",
output_text="""yes it is dangerous to take Asparin as it is a blood thinner and can cause excess thinning """
)
]
)
data = input("enter the text: ")
response = chat.send_message(f'{data}' , **parameters)
print(f"Response from the model {response.text}")
|
samwinp/rock-paper-sisor
|
future.py
|
future.py
|
py
| 1,288
|
python
|
en
|
code
| 0
|
github-code
|
6
|
36690654635
|
#!.venv/bin/python
# File: bracket.py
# Author: Jonathan Belden
# Description: A small utility to check for the correct
# amount of brackets (and possibly other
# formatting irregularities)
import os
def is_valid(input_file):
return os.path.isfile(input_file)
def analyze(input_file, input_bracket, save_profile=False) -> bool:
with open(input_file, "r") as file:
content = file.readlines()
open_bracket = f"{input_bracket}_left"
close_bracket = f"{input_bracket}_right"
if input_bracket == "curly":
analysis_profile = count_curly_brackets(content)
bracket_count = analysis_profile[open_bracket] + analysis_profile[close_bracket]
description = "bracket"
elif input_bracket == "single" or input_bracket == "double":
analysis_profile = count_quotes(content, input_bracket)
bracket_count = analysis_profile[f"{input_bracket}_quotes"]
description = "quotes"
else:
analysis_profile = count_in_line_brackets(content, input_bracket)
bracket_count = analysis_profile[open_bracket] + analysis_profile[close_bracket]
description = "bracket"
output = ""
if not bracket_count % 2 == 0 or save_profile:
output += f"Total {input_bracket} quote count: {bracket_count}\n" if input_bracket == "single" or input_bracket == "double" else f"Total {input_bracket} bracket count: {bracket_count}\n"
try:
output += f"\t'{open_bracket}': {analysis_profile[open_bracket]}\n"
output += f"\t'{close_bracket}': {analysis_profile[close_bracket]}\n"
except KeyError:
pass
output += f"=====================================================\n\n"
output += analysis_profile["lines"]
print(output)
if save_profile:
if not os.path.isdir(save_profile):
os.mkdir(save_profile)
file_name = os.path.join(save_profile, f"{input_bracket}-{description}-profile_{os.path.basename(input_file)}")
with open(file_name, "w") as file:
file.write(output)
return True
def count_curly_brackets(content) -> dict:
bracket_profile = {"curly_left": 0, "curly_right": 0, "lines": ""}
for i, line in enumerate(content):
if line.strip().startswith("#") or line.strip().startswith("//"):
continue
elif "{" in line:
bracket_profile["lines"] += f"{i+1} | {line.replace(' ', '.')}"
bracket_profile["curly_left"] += 1
elif "}" in line:
bracket_profile["lines"] += f"{i+1} | {line.replace(' ', '.')}"
bracket_profile["curly_right"] += 1
return bracket_profile
def count_in_line_brackets(content, bracket_type) -> dict:
in_line_brackets = {"angle": ["<", ">"],
"round": ["(", ")"],
"square": ["[", "]"]}
left_bracket = in_line_brackets[bracket_type][0]
right_bracket = in_line_brackets[bracket_type][1]
bracket_profile = {f"{bracket_type}_left": 0, f"{bracket_type}_right": 0, "lines": ""}
for i, line in enumerate(content):
if line.strip().startswith("#") or line.strip().startswith("//"):
continue
elif left_bracket in line or right_bracket in line:
left_count = line.count(left_bracket)
right_count = line.count(right_bracket)
bracket_profile[f"{bracket_type}_left"] += left_count
bracket_profile[f"{bracket_type}_right"] += right_count
if left_count != right_count:
bracket_profile["lines"] += f"{i+1} [{left_count}:{right_count}]* | {line}"
else:
bracket_profile["lines"] += f"{i+1} [{left_count}:{right_count}] | {line}"
return bracket_profile
def count_quotes(content, input_quote) -> dict:
quote_types = {"double": "\"", "single": "'"}
quotes_profile = {f"{input_quote}_quotes": 0, "lines": ""}
for i, line in enumerate(content):
if line.strip().startswith("#") or line.strip().startswith("//"):
continue
elif quote_types[input_quote] in line:
qoute_count = line.count(quote_types[input_quote])
quotes_profile[f"{input_quote}_quotes"] += qoute_count
if qoute_count % 2 != 0:
quotes_profile["lines"] += f"{i+1} [{qoute_count}]* | {line}"
else:
quotes_profile["lines"] += f"{i+1} [{qoute_count}] | {line}"
return quotes_profile
|
rckt-cmdr/bracket
|
bracket/bracket.py
|
bracket.py
|
py
| 4,556
|
python
|
en
|
code
| 0
|
github-code
|
6
|
6960045652
|
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(10, 90, 10.)
y = np.array([25, 70, 380, 550, 610, 1220, 830, 1450])
plt.figure(1)
plt.plot(x, y, 'ro-')
plt.grid()
xsum=np.sum(x)
ysum=np.sum(y)
xysum=sum(x*y)
n=np.size(x)
xavg=xsum/n
yavg=ysum/n
a1=(n*xysum-xsum*ysum)/(n*sum(x**2)-xsum**2)
a0= yavg-xavg*a1
plt.figure(2)
y1=a1*x+a0
plt.plot(x, y, 'ro-', x, y1, 'b*-')
plt.grid()
p1=np.polyfit(x,y,1)
# array([ 19.4702381 , -234.28571429])
plt.figure(3)
y1=a1*x+a0
plt.plot(x, y, 'ro-', x, y1, 'b*-', x, np.polyval(p1, x), 'mp-')
plt.grid()
|
SCKIMOSU/Numerical-Analysis
|
polyfit_implement.py
|
polyfit_implement.py
|
py
| 566
|
python
|
en
|
code
| 17
|
github-code
|
6
|
33362804571
|
import os
class Student:
def __init__(self,name,path):
'''
name : Name of the student should correspond to records in moodle
path : path to the folder with name "name"
'''
self.name = name
self.path = path+"/"+name
self.solved_problems = dict()
for p in self.get_problems_list(self.path):
self.solved_problems[int(p)] = p
self.grade = 0.
self.checks_p = []
self.checks_s = []
self.assigned = []
def can_assign(self,s,p,max_p):
# if a given student was already assigned, pick another one
if self.can_assign_stud(s):
# if number of needed problems exceeded, pick another one
if self.checks_p.count(p)<max_p:
return True
return False
def can_assign_stud(self,s):
if s not in self.checks_s and s!=self.name:
return True
return False
def can_assign_id(self,p_id):
if p_id in self.assigned:
return False
return True
def assign(self,s,p,p_id):
self.checks_p.append(p)
self.assign_stud(s)
self.assigned.append(p_id)
def assign_stud(self,s):
self.checks_s.append(s)
def get_problems_list(self,path):
prob_list = os.listdir(path)
plist = []
for s in prob_list:
try:
prob = int(s)
if not (prob<0 or prob>=10):
plist.append(s)
except ValueError:
continue
return plist
|
VitalyRomanov/p2p_hw_grading
|
student.py
|
student.py
|
py
| 1,575
|
python
|
en
|
code
| 0
|
github-code
|
6
|
37708709276
|
from django.urls import path
from . import views
app_name = "shop"
urlpatterns = [
path("", views.all_products, name="all_products"),
path("<slug:c_slug>/", views.all_products, name="category_products"),
path("product/new/", views.add_product, name="add_product"),
path("product/remove/<slug:p_slug>", views.remove_product, name="remove_product"),
path("product/edit/<slug:p_slug>", views.update_product, name="edit_product"),
path("product/<slug:p_slug>/", views.product_detail, name="product_detail"),
]
|
aleksandr-hilko/alex_online_shop
|
homeshop/shop/urls.py
|
urls.py
|
py
| 532
|
python
|
en
|
code
| 0
|
github-code
|
6
|
6815148797
|
import pygame
import numpy as np
import pickle
import datetime
import os
from snake import Snake
from map import Map
from agent import Agent
# Version 1.1
MODEL_DIR = "models"
MODEL_NAME = "model_1v7" # Name of the pickle file in which we store our model.
MODEL_PATH = os.path.join(MODEL_DIR, MODEL_NAME)
# MODEL_NAME = "models/Best_model" # Name of the pickle file in which we store our model.
GATHER_DATA = True
DATA_DIR = r"..\data"
DATA_PATH = os.path.join(DATA_DIR, f"data_{MODEL_NAME}_dis")
learn = 1
if learn:
VISUAL = False
GENERATIONS = 50
save = False
epsilon_dec = 0.000_03
else:
VISUAL = True
GENERATIONS = 30
save = False
epsilon_dec = 0.1
MAX_ITERATIONS = 7_000 # max iterations in game # Dropped to 5_000!!!
MIN_EPSILON = 0.0001
epsilon_dec = 0.1
GAMMA = 0.4
LEARNING_RATE = 0.2
MIN_LEARNING_RATE = 0.3
def redraw_window(win: pygame.display.set_mode, snake: Snake, playground: Map):
win.fill((25, 119, 207))
playground.draw(win)
snake.draw(win, playground)
pygame.display.update() # This updates the screen so we can see our rectangle
def main(visual: bool = True):
start = datetime.datetime.now()
st2 = datetime.datetime.now()
best_score = 0
best_time = 0
# MODEL
if os.path.isfile(MODEL_PATH):
with open(MODEL_PATH, 'rb') as f:
q_table, generation = pickle.load(f)
else:
if not os.path.isdir(MODEL_DIR):
os.mkdir(MODEL_DIR)
q_table = np.zeros((2 ** 11, 3))
generation = 0
if os.path.isfile(DATA_PATH):
with open(DATA_PATH, 'rb') as f:
gameplay_data = pickle.load(f)
else:
if not os.path.isdir(DATA_DIR):
os.mkdir(DATA_DIR)
gameplay_data = []
# Classes
agent = Agent()
playground = Map()
snake = Snake()
playground.random_snack_pos(snake)
# PyGame
if visual:
win = pygame.display.set_mode((playground.map_size, playground.map_size))
clock = pygame.time.Clock()
pygame.display.set_caption("Snake Game, Generation: 0")
generations_rewards = []
generation_time = []
for gen in range(GENERATIONS):
generation += 1
current_state = agent.get_state(snake, playground)
current_binary_state = agent.make_binary(current_state)
# It should work as proper reset, but who knows...
snake.reset()
playground.reset()
# game_over = False
generation_reward = 0
iteration = 0
# epsilon = max(MIN_EPSILON, 0.9 - generation * 0.0008)
epsilon = max(MIN_EPSILON, 0.9 - generation * epsilon_dec)
# LEARNING_RATE = max(0.95 - generation * 0.000_000_004, MIN_LEARNING_RATE)
if visual:
pygame.display.set_caption(f"Snake Game, Generation: {generation}")
for iteration in range(MAX_ITERATIONS):
if visual:
clock.tick(30)
pygame.time.delay(20)
redraw_window(win, snake, playground)
# Maybe it can go to agent as get_action.
# Action ==> 0 - straight, 1 - left, 2 - right
if np.random.uniform(0, 1) < epsilon:
action = np.random.randint(3)
else:
action = np.argmax(q_table[int(current_binary_state, 2), :])
probability = max(q_table[int(current_binary_state, 2), :])
if GATHER_DATA:
gameplay_data.append([current_state, probability])
snake.move_action(action, visual)
playground.random_snack_pos(snake)
# It can be as one function.
next_state = agent.get_state(snake, playground)
next_binary_state = agent.make_binary(next_state)
game_over, reward = snake.collision(playground, add_snack=True)
bellman_equation = (1 - LEARNING_RATE) * q_table[int(current_binary_state, 2), action] + LEARNING_RATE *\
(reward + GAMMA * max(q_table[int(next_binary_state, 2), :]))
# bellman_equation = max(q_table[int(next_binary_state, 2), :]) + LEARNING_RATE * (reward + GAMMA + (
# max(q_table[int(next_binary_state, 2), :]) - q_table[int(current_binary_state, 2), action]))
q_table[int(current_binary_state, 2), action] = bellman_equation
generation_reward += reward
if game_over:
if playground.score > best_score:
best_score = playground.score
if best_score > 10 and save:
with open(f"models/Best_model", "wb") as f:
data = (q_table, generation)
pickle.dump(data, f)
if iteration > best_time:
best_time = iteration
break
# current_state = next_state
current_binary_state = next_binary_state
if visual:
print(f"SCORE: {playground.score}")
print(f"Reward: {reward}, time: {iteration} iterations")
generations_rewards.append(generation_reward)
generation_time.append(iteration)
# print(f"Rewards : {generations_rewards}")
# print(f"Time : {generation_time}")
if generation % 100 == 0:
print(generation, datetime.datetime.now() - st2, best_score, best_time)
if save:
with open(MODEL_PATH, "wb") as f:
data = (q_table, generation)
pickle.dump(data, f)
if GATHER_DATA:
with open(DATA_PATH, "wb") as f:
pickle.dump(gameplay_data, f)
st2 = datetime.datetime.now()
print(f"\nTime of leaning last: {datetime.datetime.now() - start}, for {GENERATIONS} generations.")
print(f"Best score was: {best_score} and best time was {best_time}.")
print(f"Age: {generation} generations.")
if save:
with open(MODEL_PATH, "wb") as f:
data = (q_table, generation)
pickle.dump(data, f)
if GATHER_DATA:
with open(DATA_PATH, "wb") as f:
pickle.dump(gameplay_data, f)
if __name__ == "__main__":
main(VISUAL)
|
Dawir7/Reinforcement-Learing-Bot-to-play-Snake-game
|
Reinforcement_learninig/main_learning.py
|
main_learning.py
|
py
| 6,247
|
python
|
en
|
code
| 0
|
github-code
|
6
|
7973610749
|
import logging
from dataclasses import asdict
from typing import List
from game_service.routers.templates import BasicResponse
from game_service.services.game_manager import CodingConundrumManager
logging.basicConfig(format='%(name)s-%(levelname)s|%(lineno)d: %(message)s', level=logging.INFO)
log = logging.getLogger(__name__)
from fastapi import (
APIRouter,
HTTPException,
Request,
Response,
WebSocket,
WebSocketDisconnect,
status,
)
from pydantic import BaseModel
ROUTE_PREFIX = '/games'
router = APIRouter(
prefix=ROUTE_PREFIX,
)
class WebSocketConnectionManager:
def __init__(self):
self.active_connections: List[WebSocket] = []
async def connect(self, websocket: WebSocket):
await websocket.accept()
self.active_connections.append(websocket)
def disconnect(self, websocket: WebSocket):
self.active_connections.remove(websocket)
async def send_personal_message(self, message: str, websocket: WebSocket):
await websocket.send_json(message)
async def broadcast(self, message: str):
for connection in self.active_connections:
await connection.send_json(message)
connection_manager = WebSocketConnectionManager()
game_manager = CodingConundrumManager(connection_manager)
@router.websocket('/codingconundrum')
async def coding_conundrum_websocket_endpoint(websocket: WebSocket):
await connection_manager.connect(websocket)
await game_manager.handle_new_connection(websocket)
try:
while True:
data = await websocket.receive_text()
await game_manager.handle_incoming_message(data)
except WebSocketDisconnect:
connection_manager.disconnect(websocket)
@router.get('/')
async def compiler_status(request: Request):
return BasicResponse(message="we're up!")
|
zhuweiji/CPP-FYP-Proj
|
game_service/game_service/routers/game_handlers.py
|
game_handlers.py
|
py
| 1,858
|
python
|
en
|
code
| 0
|
github-code
|
6
|
27986005563
|
# Author: Vivian Long
# Assignment: Lab 7
# Completed:
import sys
# Problem 1
# Step 1
x = 1
data = list()
while x > 0:
x = float(input("Enter a score (0 to quit): "))
if x > 0:
data.append(x)
print("Initial list:", data)
print("Size of list:", len(data))
# Step 2
high = data[0]
for i in data[1:]:
if i > high:
high = i
print("Max of list:", high)
# Step 3
newList = list()
for j in data:
if j >= 25:
newList.append(j)
print("New list:", newList)
'''
Problem 1 output:
Enter a number (0 to quit): 25
Enter a number (0 to quit): 35.5
Enter a number (0 to quit): 15
Enter a number (0 to quit): 45.5
Enter a number (0 to quit): 55
Enter a number (0 to quit): 30.2
Enter a number (0 to quit): 49.4
Enter a number (0 to quit): 21.1
Enter a number (0 to quit): 41.8
Enter a number (0 to quit): 37
Enter a number (0 to quit): 0
Initial list: [25.0, 35.5, 15.0, 45.5, 55.0, 30.2, 49.4, 21.1, 41.8, 37.0]
Size of list: 10
Max of list: 55.0
New list: [25.0, 35.5, 45.5, 55.0, 30.2, 49.4, 41.8, 37.0]
'''
# Problem 2
# Step 1
names = list()
while len(names) < len(data):
n = input("Enter a name: ")
names.append(n)
# Step 2
d = dict(zip(names, data))
print("Dictionary:", d)
# Step 3
name = input("Enter a name to search: ")
if name not in d:
print("Name not found.")
else:
print(name, "Score:", d[name])
# Step 4
d["Alice"] = 56.6
print("Adding Alice...\n", d)
# Step 5
del d["Mary"]
print("Deleting Mary...\n", d)
'''
Problem 2 output:
Enter a score (0 to quit): 25
Enter a score (0 to quit): 35.5
Enter a score (0 to quit): 15
Enter a score (0 to quit): 45.5
Enter a score (0 to quit): 55
Enter a score (0 to quit): 30.2
Enter a score (0 to quit): 49.4
Enter a score (0 to quit): 21.1
Enter a score (0 to quit): 41.8
Enter a score (0 to quit): 37
Enter a score (0 to quit): 0
Initial list: [25.0, 35.5, 15.0, 45.5, 55.0, 30.2, 49.4, 21.1, 41.8, 37.0]
Size of list: 10
Max of list: 55.0
New list: [25.0, 35.5, 45.5, 55.0, 30.2, 49.4, 41.8, 37.0]
Enter a name: Mary
Enter a name: Ted
Enter a name: Bob
Enter a name: Sally
Enter a name: Sara
Enter a name: Tom
Enter a name: Alex
Enter a name: Jordan
Enter a name: Robert
Enter a name: Kim
Dictionary: {'Mary': 25.0, 'Ted': 35.5, 'Bob': 15.0, 'Sally': 45.5, 'Sara': 55.0, 'Tom': 30.2, 'Alex': 49.4, 'Jordan': 21.1, 'Robert': 41.8, 'Kim': 37.0}
Enter a name to search: Ted
Ted Score: 35.5
Adding Alice...
{'Mary': 25.0, 'Ted': 35.5, 'Bob': 15.0, 'Sally': 45.5, 'Sara': 55.0, 'Tom': 30.2, 'Alex': 49.4, 'Jordan': 21.1, 'Robert': 41.8, 'Kim': 37.0, 'Alice': 56.6}
Deleting Mary...
{'Ted': 35.5, 'Bob': 15.0, 'Sally': 45.5, 'Sara': 55.0, 'Tom': 30.2, 'Alex': 49.4, 'Jordan': 21.1, 'Robert': 41.8, 'Kim': 37.0, 'Alice': 56.6}
'''
|
vwlong/CS299
|
lab7.py
|
lab7.py
|
py
| 2,738
|
python
|
en
|
code
| 0
|
github-code
|
6
|
7782101624
|
import cv2
import random
import numpy as np
frameWidth = 640
frameHeight = 480
cap = cv2.VideoCapture(0)
cap.set(3, frameWidth)
cap.set(4, frameHeight)
save = False
colors = [[51, 153, 255],
[255, 0, 255],
[0, 255, 0],
[255, 0, 0],
[0, 0, 255]]
color = random.choice(colors)
points=[]
def draw_event(event, x, y,flags,params):
global save, img, color
if event == cv2.EVENT_LBUTTONDOWN and ~save:
save = True
if event == cv2.EVENT_LBUTTONUP:
save = False
color = random.choice(colors)
if (save):
points.append((x,y))
while True:
success, img = cap.read()
clone = img.copy()
cv2.imshow("Drawing", img)
cv2.setMouseCallback('Drawing', draw_event)
if len(points):
for point in points:
x,y=point
cv2.circle(img, (x, y), 4, color, cv2.FILLED)
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# lower boundary RED color range values; Hue (0 - 10)
lower1 = np.array([0, 100, 20])
upper1 = np.array([10, 255, 255])
# upper boundary RED color range values; Hue (160 - 180)
lower2 = np.array([160, 100, 20])
upper2 = np.array([179, 255, 255])
lower_mask = cv2.inRange(imgHSV, lower1, upper1)
upper_mask = cv2.inRange(imgHSV, lower2, upper2)
full_mask = lower_mask + upper_mask
contours, _ = cv2.findContours(full_mask, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
for cnt in contours:
approx = cv2.approxPolyDP(
cnt, 0.2 * cv2.arcLength(cnt, True), True)
cv2.drawContours(img, [approx], 0, (0, 255, 5), 1)
cv2.imshow('Drawing', img)
key = cv2.waitKey(1) & 0xFF
if key == ord("r"):
img = clone.copy()
elif key == ord("x"):
break
|
tarekbrahmi/Open-cv-project
|
learining/projects and apps/other/webcam-drawing.py
|
webcam-drawing.py
|
py
| 1,934
|
python
|
en
|
code
| 0
|
github-code
|
6
|
4341271396
|
from __future__ import annotations
import logging
import os
from time import sleep
from typing import List, Optional, Union, ClassVar, Dict, Type, Optional, Iterable
from queue import Queue, Empty
from easyflow.common.logger import setupLogger
from easyflow.common.utils import Timer
import threading
logger = setupLogger(__name__)
class ProcessorFactory:
processorDict: Dict[str, Type[Processor]] = {}
@classmethod
def getProcessor(cls: Type['ProcessorFactory'], processorType: str) -> Type[Processor]:
if processorType in cls.processorDict:
return cls.processorDict[processorType]
raise Exception()
@classmethod
def register(cls, class_: Type[Processor]) -> Type[Processor]:
cls.processorDict[class_.type] = class_
return class_
class Processor:
type: ClassVar[str] = ""
def __init__(self, name) -> None:
self.name = name
def run(self) -> None:
pass
@ProcessorFactory.register
class EmptyProcessor(Processor):
type: ClassVar[str] = "EmptyProcessor"
def run(self) -> None:
return
@ProcessorFactory.register
class CommandProcessor(Processor):
type: ClassVar[str] = "CommandProcessor"
def __init__(self, name, command: Union[list, str]):
super().__init__(name)
self.command: str
if isinstance(command, list):
self.command = " && ".join(command)
else:
self.command = command
def run(self) -> None:
os.system(self.command)
class Module:
def __init__(self, name: str,
processor: Processor,
inputs: Optional[List[Data]] = None,
outputs: Optional[List[Data]] = None,
checkInterval: int = 10) -> None:
self.name = name
self.inputs: List[Data] = []
if inputs:
for inputNode in inputs:
self.addInput(inputNode)
self.outputs: List[Data] = []
if outputs:
for outputNode in outputs:
self.addOutput(outputNode)
self.processor = processor
self.checkInterval = checkInterval
# To avoid this module ran by multiple inputNode.
self.running = False
def addInput(self, inputNode: Data) -> None:
self.inputs.append(inputNode)
inputNode.addDownStream(self)
def addOutput(self, outputNode: Data) -> None:
self.outputs.append(outputNode)
def setWorkflow(self, workflow) -> None:
self.workflow = workflow
def _run(self, reportError: bool = False, *args, **kwargs) -> int:
notExists: List[Data] = []
for inputNode in self.inputs:
if not inputNode.checkExists():
notExists.append(inputNode)
if notExists:
if reportError:
raise Exception(f"The following inputs are detected as nonexisting node: {notExists}")
else:
print(f"Module {self.name} failed to run, errorCode: -1")
return -1
self.processor.run()
return 0
def run(self, *args, **kwargs) -> int:
verbose = kwargs.get('verbose', True)
errorCode = -1
while True:
errorCode = self._run(*args, **kwargs)
if errorCode != 0:
sleep(self.checkInterval)
else:
if verbose:
print(f"Module: {self.name} ran successfully!")
for node in self.outputs:
for module in node.downstreamModules:
if not module.running:
self.workflow.addNodeToQueue(module)
module.running = True
break
return errorCode
class DataFactory:
dataTypes: ClassVar[Dict[str, Type[Data]]] = {}
@classmethod
def getData(cls, dataNodeType: str) -> Type[Data]:
if dataNodeType in cls.dataTypes:
return cls.dataTypes[dataNodeType]
raise Exception(f"No such dataNodeType: {dataNodeType}")
@classmethod
def register(cls, dataClass: Type[Data]) -> Type[Data]:
cls.dataTypes[dataClass.type] = dataClass
return dataClass
class Data:
type: ClassVar[str] = ""
def __init__(self, name: str):
self.name = name
self.time: int = -1
self.downstreamModules: List[Module] = []
def addDownStream(self, downStream: Module):
self.downstreamModules.append(downStream)
def checkExists(self) -> bool:
pass
@DataFactory.register
class NormalFileData(Data):
type: ClassVar[str] = "NormalFileData"
def __init__(self, name: str, path: str) -> None:
super().__init__(name)
self.path = path
def checkExists(self) -> bool:
return os.path.exists(self.path)
def func(node, pool):
node.run(pool=pool)
class Workflow:
def __init__(self,
modules: Optional[Dict[str, Module]]=None,
datas: Optional[Dict[str, Data]]=None,
processors: Optional[Dict[str, Processor]]=None,
startNodes: Optional[List[Module]]=None) -> None:
super().__init__()
self.modules: Dict[str, Module] = {}
self.nFinished = 0
if modules:
for node in modules.values():
self.addNode(node)
self.datas: Dict[str, Data] = {} if not datas else datas
self.startNodes: List[Module] = [] if not startNodes else startNodes
self.processors: Dict[str, Processor] = {} if not processors else processors
self.queue = Queue() # type:ignore
def setStartNode(self, moduleNode: Module) -> None:
self.startNodes.append(moduleNode)
def addNode(self, node: Union[Module, Data]) -> None:
if isinstance(node, Data):
self.datas[node.name] = node
if isinstance(node, Module):
self.modules[node.name] = node
node.setWorkflow(self)
def addNodes(self, nodes: Iterable[Union[Module, Data]]) -> None:
for node in nodes:
self.addNode(node)
def addNodeToQueue(self, node: Module):
self.queue.put((lambda node: node.run(), (node,), {}))
def run(self, *args, **kwargs) -> None:
logger.info("Workflow start!")
class Logger:
def write(self, messages: str):
for mess in messages.strip('\n').split('\n'):
logger.info(mess)
with Timer(stdout=Logger()):
workers = []
for i in range(10):
worker = Worker(i, self)
workers.append(worker)
worker.start()
logger.debug("All workers started!")
for node in self.startNodes:
self.addNodeToQueue(node)
for worker in workers:
worker.join()
logger.info("Workflow finished!")
class Worker(threading.Thread):
def __init__(self, i: int, workflow: Workflow):
super().__init__()
self.i = i
self.workflow = workflow
self.nFinished = 0
def log(self, message, severity=logging.INFO):
if severity == logging.INFO:
logger.info(f"[Worker{self.i}]{message}")
else:
logger.debug(f"[Worker{self.i}]{message}")
def debug(self, message):
self.log(message, severity=logging.DEBUG)
def run(self):
self.debug(f"Starts to work")
while self.workflow.nFinished != len(self.workflow.modules):
if self.workflow.nFinished == len(self.workflow.modules):
self.log(f"[{self.nFinished}/{self.workflow.nFinished}] jobs are finished!")
break
try:
with Timer(descStart="Job start to run!", descEnd="Job end to run!") as timeUsed:
func, args, kwargs = self.workflow.queue.get(timeout=5)
self.debug(f"func:{func}\nargs: {args}\nkwargs: {kwargs}")
self.debug(f"Time used: {timeUsed}")
except Empty:
self.debug("Wait to get job")
continue
except Exception as e:
raise Exception(f'[Worker{self.i}]Bad execution: %s' % str(e))
try:
func(*args,**kwargs)
except Exception as e:
raise Exception(f'[Worker{self.i}]Bad execution: %s' % str(e))
else:
self.workflow.nFinished += 1
self.nFinished += 1
|
catwang01/easyflow
|
easyflow/obj.py
|
obj.py
|
py
| 8,548
|
python
|
en
|
code
| 0
|
github-code
|
6
|
10663274434
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 16 11:25:30 2020
@author: Rijk
Extracts the resistance from the IV curves measured
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 17:10:35 2019
@author: LocalAdmin
Curve fitting script
"""
import os
import math as m
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import instrument_module as instr
def linear(x, a):
return a * x
# =============================================================================
# # Inputs
# =============================================================================
meas_name = '0117_1703_WO3196_full_IV_curve'
source_folder = r'D:\Rijk\MEP_control_software'
num_points = 10
folder = os.path.join(source_folder, meas_name)
fit_folder = folder + '\\fit_minus'
try:
os.mkdir(fit_folder)
except:
pass
file_name = os.path.join(source_folder, meas_name, 'data', meas_name)
file_current = file_name +'_current'
file_voltage = file_name + '_voltage'
func = linear
start = 17
stop = start
#p0 = [1E12, -3/2]
#p0 = [2E7, 1E4, 2E7]
#bounds = (0, np.inf)
# =============================================================================
# # Import data
# =============================================================================
ts = instr.load_data(file_current)[0]
currents = instr.load_data(file_current)[1][101:]
voltages = instr.load_data(file_voltage)[1][101:]
stop = len(currents) - stop
if start > 0:
if stop < len(currents):
currents = currents[start:stop]
voltages = voltages[start:stop]
else:
print('Stop index too large for current array')
currents = currents[start:]
voltages = voltages[start:]
currents = currents - min(currents)
else:
print('Start index zero or lower, so not used')
if stop < len(currents):
currents = currents[:stop]
voltages = voltages[:stop]
else:
print('Stop index too large for current array')
# =============================================================================
# # Perform regular fit and constrained fit
# =============================================================================
res_mean, res_var = curve_fit(func, currents, voltages, maxfev=int(1E9))
#popt, pcov = curve_fit(func, currents, voltages, p0, maxfev=int(1E9))
#popt, pcov = curve_fit(func, xdata, ydata, p0, maxfev=int(1E7), bounds=bounds)
res_std = np.sqrt(res_var)
ohm_res = np.zeros(0)
ohm_res_curr = np.zeros(0)
for n, i in enumerate(currents):
if i != 0:
ohm_res_curr = np.append(ohm_res_curr, i)
ohm_res = np.append(ohm_res, voltages[n]/i)
else:
pass
# =============================================================================
# # Plot fit
# =============================================================================
#plt.close('all')
plt.figure()
plt.plot(currents, voltages)
plt.plot(currents, func(currents, res_mean))
plt.title('IV curve of 33MOhm')
plt.xlabel('Current (A)')
plt.ylabel('Voltage (V)')
plt.legend(['Data', 'Fit'])
instr.save_plot(os.path.join(fit_folder, meas_name + '_datafit'))
plt.figure()
plt.plot(ohm_res_curr, ohm_res)
plt.plot(currents, res_mean * np.ones(len(currents)))
#plt.plot(currents, func(currents, *popt))
plt.title('IV of 33MOhm with %.2e mean and %.2e std' % (res_mean, res_std))
plt.xlabel('Source current (A)')
plt.ylabel('Resistance (Ohm)')
plt.legend(['V/I Resistance', 'Fit Resistance'])
instr.save_plot(os.path.join(fit_folder, meas_name + '_resistances'))
|
rehogenbirk/MEP_control_software
|
fit_IVcurve_single.py
|
fit_IVcurve_single.py
|
py
| 3,578
|
python
|
en
|
code
| 0
|
github-code
|
6
|
74916425146
|
import util
import cv2
import torch
import os
def compareTensors(refs, target, targetName):
sum_ = 0
if len(refs) == 0:
print("no reference images")
return
for i in range(len(refs)):
ref = refs[i]
dotself = torch.tensordot(ref , ref, dims=2)
sum_ = sum_ + torch.tensordot(ref, target, dims=2) / dotself
'''
Trying straight up distance. Note: need to reverse sort max/min.
sum = sum + torch.dist(ref,target)
Trying mean squred error. Note: need to reverse sort max/min.
local mse = nn.MSECriterion()
mse.sizeAverage = false
local loss = mse:forward(ref,target) print("loss=", loss)
sum = sum + loss
note, max/min reversed
'''
return sum_ / len(refs)
def compareFile(selectedLayer, refs, targetsFolder, fileName, net):
img = util.process(cv2.imread(targetsFolder+"/"+fileName))
#net.forward(img)
img = torch.from_numpy(img)
img = img.unsqueeze(0)
net.fc.fc8.register_forward_hook(get_activation('fc8'))
output = net(img.float())
output = activation['fc8']
return compareTensors(refs, output, fileName)
activation = {}
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
|
EunbinSeo/Pytorch-vgg-memoji
|
compare.py
|
compare.py
|
py
| 1,363
|
python
|
en
|
code
| 1
|
github-code
|
6
|
42090679043
|
from events import OnEvents
from environment import Environment
from util import Util
class Component(OnEvents):
""" Base Class for individual processes.
"""
def __init__(self):
super(Component, self).__init__()
self.exec_times = []
self.Util = Util()
def run(self, **kwargs):
pass
def execute(self, kwargs, stdout=None, stdin=None,
return_output=False, print_output=False,
current_wd=None, logger=None, hook=True):
cmd = [self.executable]
for arg in self.args:
if 'stdout' == arg:
stdout = arg
elif 'stdin' == arg:
stdin = arg
else:
if isinstance(arg, list):
#value = [arg[0], getattr(self, arg[1])]
if kwargs[arg[1]] is not None:
value = [arg[0], kwargs[arg[1]]]
else:
value = None
else:
value = kwargs[arg]
if value is not None:
if not isinstance(value, list):
value = [value,]
for v in value:
if v not in (None, '') and not (not v and isinstance(v, bool)):
cmd.append(str(v))
output = self.Util.exec_cmd(cmd, stdout, stdin,
return_output, print_output,
current_wd, logger)
self.exec_times.append(output['exec_time'])
if hook:
retval = output['retval']
kwargs.update({'output': output})
success = True if retval == 0 else False
self.event_trigger(success, **kwargs)
return output
def get_last_exec_time(self):
if self.exec_times:
return self.exec_times[-1]
else:
return 0
def get_avg_exec_time(self):
return sum(self.exec_times)/len(self.exec_times)
|
tom-kerr/bookmaker
|
components/component.py
|
component.py
|
py
| 2,107
|
python
|
en
|
code
| 6
|
github-code
|
6
|
73706334586
|
from django.shortcuts import render
from django.http import HttpResponse
from app1.models import Topic, Webpage, AccessRecord
from app1.forms import App1Form
# Create your views here.
def home(request):
#return HttpResponse("Hello Hao!")
my_dict = {'insert_me':"Goodbye now from view.py!!"}
return render(request, 'app1/home.html', context=my_dict)
def index(request):
wp_list = AccessRecord.objects.order_by('date')
date_dict = {'access_records':wp_list}
return render(request, 'app1/index.html', context=date_dict)
def test(request):
return HttpResponse("Goodbye!")
def form(request):
theForm = App1Form()
if request.method == 'POST':
theForm = App1Form(request.POST)
if theForm.is_valid():
# process form
print("Validation success:")
print("top_name: " + theForm.cleaned_data['top_name'])
theForm.save(commit=True)
print("Topic created in DB, going back to index page...")
return topics(request)
else:
print("Form Error")
return render(request, 'app1/form.html', {'the_form':theForm})
def topics(request):
t_list = Topic.objects.order_by('top_name')
t_dict = {'topics':t_list, 'section':{'title':'Topics', 'parent':'App1'}}
return render(request, 'app1/topics.html', context=t_dict)
|
haozer/project1
|
app1/views.py
|
views.py
|
py
| 1,277
|
python
|
en
|
code
| 0
|
github-code
|
6
|
38814850733
|
import matplotlib.pyplot as plt
import pandas as pd
import argparse
import seaborn as sns
sns.set_context("notebook", font_scale=1.8)
plt.style.use('fivethirtyeight')
parser = argparse.ArgumentParser()
parser.add_argument('--classifier', default="svm", type=str, nargs='?', help='classifier')
args = parser.parse_args()
classifier = args.classifier
# plot accuracy
acc_result = './result/_result_{}_acc.csv'.format(classifier)
df = pd.read_csv(acc_result, header=0, sep=",")
print("plot accuracy")
g = sns.catplot(x="Dataset", y="Accuracy", hue="Method", data=df, kind="bar", ci="sd", height=5, aspect=2, palette="Set1")
g.set_xlabels("Dataset")
g.set_ylabels("Accuracy")
for idx, p in enumerate(g.ax.patches):
height = round(p.get_height(), 2)
g.ax.text(p.get_x()+p.get_width()/2, height+1, str(round(height, 2)), ha="center", fontsize=10)
plt.savefig("./result/_plot_{}_accuracy.pdf".format(classifier), bbox_inches="tight")
plt.close()
# plot AUC
auc_result = './result/_result_{}_auc.csv'.format(classifier)
df = pd.read_csv(auc_result, header=0, sep=",")
print("plot AUC")
g = sns.catplot(x="Dataset", y="AUC", hue="Method", data=df, kind="bar", ci="sd", height=5, aspect=2, palette="Set1")
g.set_xlabels("Dataset")
g.set_ylabels("AUC")
for idx, p in enumerate(g.ax.patches):
height = round(p.get_height(), 2)
g.ax.text(p.get_x()+p.get_width()/2, height+1, str(round(height, 2)), ha="center", fontsize=10)
plt.savefig("./result/_plot_{}_auc.pdf".format(classifier), bbox_inches="tight")
plt.close()
|
nphdang/CCRAL
|
visualize.py
|
visualize.py
|
py
| 1,524
|
python
|
en
|
code
| 3
|
github-code
|
6
|
19581520317
|
import os
import time
from collections import defaultdict
from os.path import join as osjoin
import csv
from pyspark.sql import SparkSession
import pyspark.sql.types as T
from util.file_manager import file_manager
from util.cosine_similarity import calculate_cosine_similarity
from core.directory import (
src_embeddings_dir, susp_embeddings_dir, susp_stats_dir, csv_dir,
parquet_train_classifier_dir, train_classifier_log_file
)
spark = SparkSession.builder.appName('test_csv').getOrCreate()
schema = T.StructType([
T.StructField('cosine_similarity', T.FloatType(), False),
T.StructField('is_plagiarism', T.IntegerType(), False)
])
def convert_from_csv_to_parquet(
csv_dir, csv_file, parquet_root_dir, parquet_filename
):
df = spark.read.csv(osjoin(csv_dir, csv_file), header=False, schema=schema)
df.write.format('parquet').save(osjoin(parquet_root_dir, parquet_filename))
print(f'done\t', end='')
# stats for a single suspicious file
# convert susp json stats file to stats that can be use for compare susp file with src files
# stats = {'src_name.txt': [{ 'src': set(), 'susp': set() }]
def get_stats_for_a_susp_file(file):
raw_susp_stats = file_manager.read_json(file)
stats = defaultdict(list)
for item in raw_susp_stats['file_stats']:
para_len = item['paragraph_length']
start_index_in_src = item['src_start_index']
insert_index_in_susp = item['susp_insert_index']
stats[item['src_file']].append({
'src': set(range(start_index_in_src, start_index_in_src+para_len)),
'susp': set(range(insert_index_in_susp, insert_index_in_susp+para_len))
})
return stats
# main_stats = {
# 'src_name.txt': [{'src': set(), 'susp': set()}],
# 'src_name.txt': [{'src': set(), 'susp': set()}]
# }
def is_plagiarism_sentence(src_index, susp_index, src_name, main_stats):
if src_name in main_stats:
for index, item in enumerate(main_stats[src_name]):
if src_index in item['src'] and susp_index in item['susp']:
main_stats[src_name][index]['src'].remove(src_index)
main_stats[src_name][index]['susp'].remove(susp_index)
return 1, main_stats
return 0, main_stats
def read_embeddings(dir, file):
return file_manager.pickle_load(osjoin(dir, file))
def stream_source_embeddings_from_pickle(num_of_file=3):
src_embeddings_files = os.listdir(src_embeddings_dir)
for start_index in range(0, len(src_embeddings_files), num_of_file):
source_embeddings = []
for src_emb in src_embeddings_files[start_index: start_index+num_of_file]:
source_embeddings.extend(
file_manager.pickle_load(osjoin(src_embeddings_dir, src_emb))
)
yield source_embeddings
susp_list_file = osjoin('..', 'stats_about_files', 'susp_for_train_model.txt')
susp_list = file_manager.read_line_by_line(susp_list_file)
susp_list = [f'embddings_{file}.pk' for file in susp_list]
for susp_embeddings_file in susp_list:
start = time.time()
suspicious_embeddings = read_embeddings(susp_embeddings_dir, susp_embeddings_file)
susp_file_name = susp_embeddings_file[:-7]
main_stats = get_stats_for_a_susp_file(osjoin(susp_stats_dir, susp_file_name + '.json'))
csv_file = osjoin(csv_dir, susp_file_name + '.csv')
print(f'Convert {susp_file_name}...', end='')
for source_embeddings in stream_source_embeddings_from_pickle():
result = []
for susp_row in suspicious_embeddings:
for src_row in source_embeddings:
sim = calculate_cosine_similarity(susp_row['embedding'], src_row['embedding'])
is_plg, main_stats = is_plagiarism_sentence(
src_row['index'], susp_row['index'], src_row['filename'], main_stats
)
result.append((sim, is_plg))
with open(csv_file, 'a') as f:
writer = csv.writer(f)
writer.writerows(result)
# for performace in read/write dataframe and disk storage
# convert csv to parquet format and then remove csv file
convert_from_csv_to_parquet(csv_dir, csv_file, parquet_train_classifier_dir, susp_file_name)
os.remove(osjoin(csv_dir, csv_file))
execute_time = round(time.time() - start, 2) / 60
log_content = f'{susp_embeddings_file} {execute_time} mins'
file_manager.append_single_line(train_classifier_log_file, log_content)
print(execute_time, 'mins')
|
oldguard69/lvtn
|
server/core/4_make_data_for_training_classifier.py
|
4_make_data_for_training_classifier.py
|
py
| 4,560
|
python
|
en
|
code
| 0
|
github-code
|
6
|
18932875190
|
# === Úloha 22===
# Napíšte program, ktorý zo súboru zoznam.txt vypíše pod seba meno a vek všetkých žiakov, ktorí majú aspoň 17 rokov. Údaje v súbore zoznam.txt sú zoradené tak, že v každom riadku je postupne vek a meno jedného žiaka.
subor = open("ziaci.txt", "r")
ziaci = list(map(lambda x: x.split(" "), subor.read().split("\n")))
aspon17 = filter(lambda x: int(x[0]) >= 17, ziaci)
subor.close()
for z in aspon17:
print(f"{z[1]}: {z[0]} rokov")
|
Plasmoxy/MaturitaInformatika2019
|
ulohyPL/u22.py
|
u22.py
|
py
| 472
|
python
|
sk
|
code
| 2
|
github-code
|
6
|
18091330209
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0060_auto_20150130_1750'),
]
operations = [
migrations.AlterField(
model_name='basicmemberinformation',
name='auth_key',
field=models.CharField(default='031910ad27f4d5c4ffa8ec23fe5ce895d59611079de70db9c7597121bfc2c443', max_length=64),
preserve_default=True,
),
]
|
hongdangodori/slehome
|
slehome/account/migrations/0061_auto_20150201_1909.py
|
0061_auto_20150201_1909.py
|
py
| 531
|
python
|
en
|
code
| 0
|
github-code
|
6
|
35727586260
|
#!/usr/bin/python
import pygame, sys, game
from pygame.locals import *
WIDTH = 640
HEIGHT = 480
DRAWSTEP = 3
TICK = 30
VOLATILITY = 0.8
TIMESTEP = float(TICK)/1000
if len(sys.argv) < 2:
ORDER = 2
else:
ORDER = int(sys.argv[1])
BLACK = pygame.Color(0,0,0)
WHITE = pygame.Color(255,255,255)
pygame.init()
fpsClock = pygame.time.Clock()
font = pygame.font.Font(None, 36)
window = pygame.display.set_mode((WIDTH,HEIGHT))
pygame.display.set_caption('Deriv')
drawX = range(0, WIDTH/2, DRAWSTEP)
drawY = [HEIGHT/2] * len(drawX)
numDraw = len(drawX)
cDerivatives = [0] * (ORDER+1)
pDerivatives = cDerivatives
paused = True
game = game.Game(ORDER, len(drawX))
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == MOUSEMOTION:
mouseX, mouseY = event.pos
elif event.type == MOUSEBUTTONUP:
paused = not paused
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
if not paused:
mouseX, mouseY = pygame.mouse.get_pos()
game.tick(VOLATILITY * (1-2*float(mouseY)/HEIGHT), TIMESTEP)
#cDerivatives[ORDER] = VOLATILITY * (1 - 2*float(mouseY)/HEIGHT)
#for i in range(ORDER,0,-1):
#cDerivatives[i-1] = pDerivatives[i-1] + 0.5*TIMESTEP*(pDerivatives[i] + cDerivatives[i])
#pDerivatives = cDerivatives
#drawY.append(int(0.5*HEIGHT*(1-cDerivatives[0])))
drawY.append(int(0.5*HEIGHT*(1-game.history[-1])))
drawY.pop(0)
window.fill(BLACK)
if paused:
text = font.render("Paused", True, WHITE)
textpos = text.get_rect(centerx = WIDTH/2)
textpos.top = 50
window.blit(text, textpos)
for i in range(0, min(len(drawY),numDraw)-1):
pygame.draw.line(window, WHITE, (drawX[i],drawY[i]), (drawX[i+1],drawY[i+1]))
pygame.display.update()
fpsClock.tick(TICK)
|
TheBB/deriv
|
deriv.py
|
deriv.py
|
py
| 1,999
|
python
|
en
|
code
| 0
|
github-code
|
6
|
25847899408
|
from pyautocad import Autocad
class Channel(object):
instance = None
def __init__(self):
self._session = None
@property
def session(self):
if not self._session:
try:
self._session = session = Autocad(create_if_not_exists=False)
session.prompt("Python trying to connect...")
session.prompt("Python connected!")
except OSError:
raise Exception("Could not connect to the AUTOCAD process. Please start AUTOCAD before running the script.")
return self._session
if Channel.instance is None:
Channel.instance = Channel()
channel = Channel.instance
|
akila122/pycad
|
autocad_session/__init__.py
|
__init__.py
|
py
| 679
|
python
|
en
|
code
| 0
|
github-code
|
6
|
12211334459
|
'''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import os
import sys
import time
import math
import torch
import torchvision
import torch.nn as nn
import torch.nn.init as init
import torchvision.transforms as transforms
from TinyImageNetDataset import TinyImageNetDataset
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
def save_checkpoint(state, is_best, filename):
torch.save(state, filename + ".pth.tar")
if is_best:
shutil.copyfile(filename + ".pth.tar", filename + "_best.pth.tar")
def load_checkpoint(path, model, optimizer=None):
if os.path.isfile(path):
logging.info("=== loading checkpoint '{}' ===".format(path))
checkpoint = torch.load(path)
model.load_state_dict(checkpoint["state_dict"], strict=False)
if optimizer is not None:
best_prec = checkpoint["best_prec"]
last_epoch = checkpoint["last_epoch"]
optimizer.load_state_dict(checkpoint["optimizer"])
logging.info(
"=== done. also loaded optimizer from "
+ "checkpoint '{}' (epoch {}) ===".format(path, last_epoch + 1)
)
return best_prec, last_epoch
def get_data_loader(transform_train, transform_test, config):
# assert config.dataset == "cifar10" or config.dataset == "cifar100"
if config.dataset == "cifar10":
trainset = torchvision.datasets.CIFAR10(
root=config.data_path, train=True, download=True, transform=transform_train
)
testset = torchvision.datasets.CIFAR10(
root=config.data_path, train=False, download=True, transform=transform_test
)
elif config.dataset == "cifar100":
trainset = torchvision.datasets.CIFAR100(
root=config.data_path, train=True, download=True, transform=transform_train
)
testset = torchvision.datasets.CIFAR100(
root=config.data_path, train=False, download=True, transform=transform_test
)
elif config.dataset == "tiny-imagenet":
trainset = TinyImageNetDataset(
root=config.data_path, download=True, mode='train', task='classification', transform=transform_train
)
testset = TinyImageNetDataset(
root=config.data_path, download=True, mode='val', task ='classification', transform=transform_test
)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=config.batch_size, shuffle=True, num_workers=config.workers
)
test_loader = torch.utils.data.DataLoader(
testset, batch_size=config.test_batch, shuffle=False, num_workers=config.workers
)
return train_loader, test_loader
def data_augmentation(config, is_train=True):
aug = []
if is_train:
# random crop
if config.augmentation.random_crop:
aug.append(transforms.RandomCrop(config.input_size, padding=4))
# horizontal filp
if config.augmentation.random_horizontal_filp:
aug.append(transforms.RandomHorizontalFlip())
aug.append(transforms.ToTensor())
# normalize [- mean / std]
if config.augmentation.normalize:
if config.dataset == "cifar10":
aug.append(
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
)
elif config.dataset == "cifar100":
aug.append(
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
)
elif config.dataset == "tiny-imagenet":
aug.append(
transforms.Normalize((0.4775, 0.4806, 0.4805), (0.1592, 0.1611, 0.1653))
)
if is_train and config.augmentation.cutout:
# cutout
aug.append(
Cutout(n_holes=config.augmentation.holes, length=config.augmentation.length)
)
return aug
|
zarekxu/QuadraLib
|
image_classification/utils.py
|
utils.py
|
py
| 7,113
|
python
|
en
|
code
| 6
|
github-code
|
6
|
35260443444
|
import logging
from typing import List, Optional
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from debunkbot.models import (
Claim,
GoogleSheetCredentials,
MessageTemplate,
MessageTemplateSource,
)
logger = logging.getLogger(__name__)
class GoogleSheetHelper(object):
"""Helper class for getting data from google sheet"""
def __init__(self) -> None:
"""Instance method to initialize Google Drive API
:param self:
:return: None
"""
self.__scope = [
"https://spreadsheets.google.com/feeds",
"https://www.googleapis.com/auth/drive",
]
credentials = GoogleSheetCredentials.objects.first()
if credentials:
google_credentials = GoogleSheetCredentials.objects.first().credentials
else:
raise Exception("Google credentials have not been set up.")
self.__credentials = ServiceAccountCredentials.from_json_keyfile_dict(
google_credentials, scopes=self.__scope
)
self.__client = gspread.authorize(self.__credentials)
def get_sheet(self, sheet_key):
return self.__client.open_by_key(sheet_key)
def open_work_sheet(self, sheet_id, work_sheet_name) -> Optional[List[dict]]:
"""Instance method to open a worksheet and get the data
in Space Allocation sheet
:param self: Instance of GoogleSheetHelper
:return: Sheet Record as dict or None
"""
sheet = self.get_sheet(sheet_id)
worksheet = sheet.worksheet(work_sheet_name)
try:
return worksheet.get_all_records()
except gspread.exceptions.SpreadsheetNotFound:
return None
def get_claims(self) -> Optional[List[dict]]:
"""
Instance method that loads the claims either from the
cache or directly from google's servers depending on whether
we have a saved version in our cache or not
:param self: Instance of GoogleSheetHelper
:return: Claims
"""
claims = Claim.objects.all()
return claims
def fetch_response_messages(self):
# Delete all existing messages and create new ones.
MessageTemplate.objects.all().delete()
message_template_sources = MessageTemplateSource.objects.all()
message_templates = []
for message_template_source in message_template_sources:
try:
sheet = self.get_sheet(
message_template_source.spreadsheet_id
).worksheet(message_template_source.worksheet)
response_message_templates = sheet.get_all_records()
for response_message_template in response_message_templates:
message_template = response_message_template.get(
message_template_source.column
)
if message_template and message_template != "":
message_template_category = message_template_source.worksheet
message_templage = MessageTemplate(
message_template=message_template,
message_template_source=message_template_source,
message_template_category=message_template_category,
)
message_templates.append(message_templage)
except Exception:
continue
MessageTemplate.objects.bulk_create(message_templates)
|
CodeForAfrica/DebunkBot
|
debunkbot/utils/gsheet/helper.py
|
helper.py
|
py
| 3,569
|
python
|
en
|
code
| 8
|
github-code
|
6
|
42926779466
|
'''
在一个二维数组中(每个一维数组的长度相同),每一行都按照从左到右递增
的顺序排序,每一列都按照从上到下递增的顺序排
序。请完成一个函数,输入这样的一个二维数组和一个整数,判断数组中是否含有该整数
'''
class Solution:
# array二维列表
def find(self, target, array):
xend = len(array) - 1
yend = len(array[0]) - 1
x = 0
while x <= xend and yend >= 0:
if array[x][yend] == target:
return True
elif array[x][yend] > target:
yend -= 1
else:
x += 1
return False
if __name__ == "__main__":
array = [[1,2,5,7,9],
[2,4,6,8,10],
[3,5,7,9,11],
[4,6,8,10,12]]
S = Solution()
print(S.find(22, array))
|
ppalantir/axjingWorks
|
algorithm_note/getOffer/offer_find_two_array.py
|
offer_find_two_array.py
|
py
| 878
|
python
|
zh
|
code
| 1
|
github-code
|
6
|
72532378429
|
from collections.abc import Sequence
from datetime import datetime, timedelta
from typing import Final
import arrow
import pytest
from pydantic import NonNegativeFloat
from simcore_service_dynamic_sidecar.modules.prometheus_metrics import (
_MAX_DEFAULT_METRICS_SCRAPE_INTERVAL,
_MAX_PROMETHEUS_SAMPLES,
_get_user_services_scrape_interval,
)
_DT_REF: Final[datetime] = arrow.utcnow().datetime
@pytest.mark.parametrize(
"input_query_times, expected",
[
pytest.param(
[], _MAX_DEFAULT_METRICS_SCRAPE_INTERVAL, id="no_prometheus_queries"
),
pytest.param(
[_DT_REF],
_MAX_DEFAULT_METRICS_SCRAPE_INTERVAL,
id="too_few_prometheus_queries",
),
([_DT_REF, _DT_REF + timedelta(seconds=5)], 5),
pytest.param(
[_DT_REF, _DT_REF + timedelta(seconds=1000)],
_MAX_DEFAULT_METRICS_SCRAPE_INTERVAL,
id="prometheus_queries_too_far_apart",
),
pytest.param(
[
_DT_REF + timedelta(seconds=i * 3)
for i in range(_MAX_PROMETHEUS_SAMPLES)
],
3,
id="average_over_prometheus_queries",
),
],
)
def test_get_user_services_scrape_interval(
input_query_times: Sequence[datetime], expected: NonNegativeFloat
):
assert _get_user_services_scrape_interval(input_query_times) == expected
|
ITISFoundation/osparc-simcore
|
services/dynamic-sidecar/tests/unit/test_modules_prometheus_metrics.py
|
test_modules_prometheus_metrics.py
|
py
| 1,426
|
python
|
en
|
code
| 35
|
github-code
|
6
|
10260578739
|
import json
import heapq
import math
#get texts
with open('10k_tokenized_texts.json', 'r') as file:
tokenized_texts = json.load(file)
#count word frequency and create vocabulary
wordfreq = {}
for text in tokenized_texts:
for token in text:
if token not in wordfreq.keys():
wordfreq[token] = 1
else:
wordfreq[token] += 1
#get 10k most frequent words
import heapq
most_freq = heapq.nlargest(10000, wordfreq, key=wordfreq.get)
#count document occurence (= in how many different documents a word appears)
document_occurence = [0] * len(most_freq)
for i in range(len(most_freq)):
for text in tokenized_texts:
if most_freq[i] in text:
document_occurence[i] += 1
#get inverse document frequency (idf) for each word
idf = [0] * len(most_freq)
for i in range(len(most_freq)):
idf[i] = (math.log(len(tokenized_texts)/document_occurence[i]))
#create bag of words vectors with tf-idf weighting
tfidf_vecs = []
for i in range(len(tokenized_texts)):
tfidf_vec = [0] * len(most_freq)
for j in range(len(most_freq)):
tf = tokenized_texts[i].count(most_freq[j])/(len(tokenized_texts[i])+1) #weighs document length
tfidf_vec[j] = tf * idf[j]
tfidf_vecs.append(tfidf_vec)
#dump to files
with open('10k_bow_tfidf_embeds.json', 'w') as file:
json.dump(tfidf_vecs, file)
|
iwillemse/pre-uni
|
code/bow-tfidf.py
|
bow-tfidf.py
|
py
| 1,368
|
python
|
en
|
code
| 0
|
github-code
|
6
|
21402553475
|
from pyTasks.tasks import Task, Parameter
from pyTasks.utils import containerHash
from .graph_tasks import GraphPruningTask
from .mongo_tasks import MongoResourceTarget
from sklearn.model_selection import KFold
import numpy as np
from bson.code import Code
def non_filter(label):
return False
def identity(obj):
return obj
class MongoGraphNodesTask(Task):
collection = Parameter("graph_nodes")
def __init__(self, graph, D):
self.graph = graph
self.D = D
def require(self):
return GraphPruningTask(self.graph, self.D)
def __taskid__(self):
return "GraphNodesTask_%s_%d_%d" % (self.graph, self.D)
def output(self):
return MongoResourceTarget(
self.collection.value, '_id', self.graph
)
def run(self):
with self.input()[0] as i:
G = i.query()
nodes = set([])
for node in G:
label = G.node[node]['label']
nodes.add(label)
with self.output() as o:
coll = o.collection
coll.insert_many([
{'graph_id': self.graph,
'node': n}
for n in nodes
])
class MongoFrequencyTask(Task):
collection = Parameter("node_frequency")
def __init__(self, graphs, it, D):
self.graphs = graphs
self.it = it
self.D = D
def require(self):
return [
MongoGraphNodesTask(g, self.D)
for g in self.graphs
]
def output(self):
return MongoResourceTarget(
self.collection.value, '_id', 'frequency_%d' % self.it
)
def run(self):
with self.input()[0] as i:
coll = i.collection
map = Code("""
function(){
emit(this.node, 1);
}
""")
reduce = Code("""
function(key, values){
var total = 0;
for(var i = 0; i < values.length; i++){
total += values[i];
}
return total;
}
""")
reduce = coll.map_reduce(map, reduce, self.collection.value)
all = len(self.graphs)
reduce.update({}, {'$mul': {'value': 1/all}})
|
cedricrupb/pySVRanker
|
frequent_pattern_tasks.py
|
frequent_pattern_tasks.py
|
py
| 2,344
|
python
|
en
|
code
| 2
|
github-code
|
6
|
74959952508
|
from django.db import models
from django.core.validators import RegexValidator
from django.contrib.auth.models import AbstractUser
from django.db import models
from libgravatar import Gravatar
# Create your models here.
class User(AbstractUser):
"""User model used for authentication."""
class Experience(models.TextChoices):
BEGINNER = 'B'
INTERMEDIATE = 'I'
ADVANCED = 'A'
MASTER = 'M'
GRANDMASTER = 'G'
username = models.CharField(
max_length=30,
unique=True,
validators=[
RegexValidator(
regex='^[a-z0-9]([._-](?![._-])|[a-z0-9])*[a-z0-9]$',
message='Usernames may only contain lowercase characters '
'and . _ - but not as '
'the first or last character.',
code='invalid_username'
)
]
)
"""Attributes of Users."""
name = models.CharField(max_length=100, blank=False)
email = models.EmailField(unique=True, blank=False)
public_bio = models.CharField(max_length=250, blank=False)
chess_experience = models.CharField(max_length=1, choices=Experience.choices, default=Experience.BEGINNER)
def gravatar(self, size=120):
"""Return a URL to the user's gravatar."""
gravatar_object = Gravatar(self.email)
gravatar_url = gravatar_object.get_image(size=size, default='mp')
return gravatar_url
|
amir-rahim/ChessClubManagementSystem
|
clubs/models/users.py
|
users.py
|
py
| 1,456
|
python
|
en
|
code
| 1
|
github-code
|
6
|
13522158009
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 7 10:37:20 2019
@author: paul
"""
# import relevant packages
from TwitterAPI import TwitterAPI
import pandas as pd
import json
i = 0 # counter
requestlist = [] # list for storing each call from the api (500 tweets at a time)
# search Criteria
SEARCH_TERM = ''
PRODUCT = 'fullarchive'
LABEL = 'Research'
#API keys to authorise and access the API
consumerKey=""
consumerSecret=""
accessToken=""
accessSecret=""
#Code to initiate API
api = TwitterAPI(consumerKey, consumerSecret,
accessToken, accessSecret)
# loop which makes successive api calls based on amount of results
while True:
if i == 0 :
requestlist.append(api.request('tweets/search/%s/:%s' % (PRODUCT, LABEL),
{'query':SEARCH_TERM,
'fromDate': '201408220000',
'toDate': '201408310000',
'maxResults': 500}))
else:
if requestlist[i-1].json().get('next') == None :
break
else:
requestlist.append(api.request('tweets/search/%s/:%s' % (PRODUCT, LABEL),
{'query':SEARCH_TERM,
'fromDate': '201408220000',
'toDate': '201408310000',
'maxResults': 500,
'next':requestlist[i-1].json()['next']}))
i +=1
#save each payload to csv
for payload in requestlist:
df = pd.read_json(json.dumps(payload.json()['results']))
df.to_csv("acsvfile.csv", mode = 'a')
|
prgeddes/TwitterDataExtraction
|
Search_Save_Tweets.py
|
Search_Save_Tweets.py
|
py
| 1,563
|
python
|
en
|
code
| 0
|
github-code
|
6
|
72940318588
|
#
# @lc app=leetcode id=9 lang=python3
#
# [9] Palindrome Number
#
# https://leetcode.com/problems/palindrome-number/description/
#
# algorithms
# Easy (47.22%)
# Likes: 2092
# Dislikes: 1505
# Total Accepted: 860.8K
# Total Submissions: 1.8M
# Testcase Example: '121'
#
# Determine whether an integer is a palindrome. An integer is a palindrome when
# it reads the same backward as forward.
#
# Example 1:
#
#
# Input: 121
# Output: true
#
#
# Example 2:
#
#
# Input: -121
# Output: false
# Explanation: From left to right, it reads -121. From right to left, it
# becomes 121-. Therefore it is not a palindrome.
#
#
# Example 3:
#
#
# Input: 10
# Output: false
# Explanation: Reads 01 from right to left. Therefore it is not a palindrome.
#
#
# Follow up:
#
# Could you solve it without converting the integer to a string?
#
#
# @lc code=start
from math import floor, log
class Solution:
def isPalindrome(self, x: int) -> bool:
if x == 0:
return True
if x > 0:
n_bit = floor(log(x, 10)) + 1
for i in range(1, n_bit // 2 + 1):
x, bit_right = divmod(x, 10)
bit_left = x // 10**(n_bit - 2 * i) % 10
print(bit_left, bit_right)
if bit_left != bit_right:
return False
return True
else:
return False
# @lc code=end
|
LeanderLXZ/leetcode-solutions
|
problems/9.palindrome-number/9.palindrome-number.py
|
9.palindrome-number.py
|
py
| 1,419
|
python
|
en
|
code
| 0
|
github-code
|
6
|
5404980794
|
# the array consist of interger,where every integer is repeated thrice except one integer ,we need to return that.
def single_number(Arr):
n = len(Arr)
ones = 0
twos = 0
for i in range(0,n):
ones = (ones ^ Arr[i] ) & (~twos)
twos = (twos ^ Arr[i]) & (~ ones)
return ones
# test case
Arr = [1,2,4,3,3,2,2,3,1,1]
unique_number = single_number(Arr)
print(unique_number)
|
Ranjit007ai/InterviewBit-BitManipulation
|
bit_manipulation/single_number_II/solution.py
|
solution.py
|
py
| 418
|
python
|
en
|
code
| 0
|
github-code
|
6
|
35473677115
|
import numpy as np
class PriorBoxes:
def __init__(self, strides, scales, ratios):
self.strides = strides
self.scales = scales # [10, 25, 40]
self.ratios = ratios
self.config = {
"strides": self.strides,
"scales": self.scales,
"ratios": self.ratios
}
"""
example)
strides = [4, 8, 16]
scales => [10, 25, 40]
ratios => [(1 ,1),
(1.5,0.5),
(1.2,0.8),
(0.8,1.2),
(1.4,1.4)]
"""
def generate(self, image_shape):
"""
image_shape(H,W,3)에 맞춰서, Prior Box(==Default Boxes)를 생성하는 코드
return :
(# Prior Boxes, 4)로 이루어진 출력 값 생성
"""
fmap_hs = np.ceil(image_shape[0] / np.asarray(self.strides))
fmap_ws = np.ceil(image_shape[1] / np.asarray(self.strides))
total_anchors = []
# scaled_ratios
self.ratios = np.asarray(self.ratios, dtype=np.float32)
scaled_ratios = []
for s in self.scales:
for r in self.ratios:
scaled_ratios.append(s * r)
scaled_ratios = np.asarray(scaled_ratios)
scaled_ratios = scaled_ratios.reshape([len(self.scales), len(self.ratios), 2])
for ind in range(len(self.scales)):
h = fmap_hs[ind]
w = fmap_ws[ind]
stride = self.strides[ind] # shape []
achr_sizes = scaled_ratios[ind]
n_achr_sizes = len(achr_sizes)
n_achr = (h * w * n_achr_sizes).astype(np.int32)
# cx
cx, cy = np.meshgrid(np.arange(w), np.arange(h))
cx = cx * stride + stride // 2 # shape 32,32,5
grid_cx = np.stack([cx] * n_achr_sizes, axis=-1) # shape: (32,32,5)
# cy
cy = cy * stride + stride // 2 # shape 32,32,5
grid_cy = np.stack([cy] * n_achr_sizes, axis=-1) # shape: (32,32,5)
#
grid = np.expand_dims(np.ones_like(cx), axis=-1) # shape: (32,32, 1)
# ws
ws_sizes = achr_sizes[:, 1] # shape: 5,
grid_ws = grid * ws_sizes # shape: (32, 32, 5)
# hs
hs_sizes = achr_sizes[:, 0] # shape: 5,
grid_hs = grid * hs_sizes # shape: (32,32, 5)
# concatenate cx, cy, ws, hw
anchors = np.stack([grid_cx, grid_cy, grid_ws, grid_hs], axis=-1) # shape: (32,32,5,4)
anchors = anchors.reshape([n_achr, 4]) # shape: (32,)
total_anchors.append(anchors)
total_anchors = np.concatenate(total_anchors, axis=0)
return total_anchors
|
taila0/single-shot-multibox-detector
|
src/old_codes/prior.py
|
prior.py
|
py
| 2,726
|
python
|
en
|
code
| 0
|
github-code
|
6
|
71839285309
|
"""This module is responsible for reading the tables and processing them in order to use their data.
The use of pandas or any other parsing of the particular data table should be done here.
"""
__author__ = "carlosmperilla"
__copyright__ = "Copyright 2022 Carlos M. Perilla"
__credits__ = "Carlos M. Perilla"
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "carlosmperilla"
__email__ = "carlosperillaprogramacion@gmail.com"
__status__ = "Developing"
from .purchase import PurchaseList
|
carlosmperilla/budget-system
|
budget_system/purchase/__init__.py
|
__init__.py
|
py
| 510
|
python
|
en
|
code
| 2
|
github-code
|
6
|
34892241691
|
from logging import raiseExceptions
from flask import Flask, request, make_response, jsonify
from flask_cors import CORS, cross_origin
import hashlib
from controller import *
app = Flask(__name__)
CORS(app)
Controller = Controller()
@app.route("/ong", methods=["GET", "POST", "PUT"])
@cross_origin()
def ong():
"""
This methods returns a list of all ONGs from
ONGs public table.
"""
if request.method == "POST":
try:
payload = request.get_json()
hashed_senha = hashlib.md5(payload['senha'].encode('utf-8')).hexdigest()
response = Controller.create_ong(
cnpj=payload['cnpj'] if 'cnpj' in payload else '',
nome=payload['nome'],
descricao=payload['descricao'] ,
tipo=payload['tipo'] if 'tipo' in payload else '',
telefone=payload['telefone'] if 'telefone' in payload else '',
email=payload['email'],
endereco_cep=payload['endereco_cep'] if 'endereco_cep' in payload else '',
endereco_num=payload['endereco_num'] if 'endereco_num' in payload else -1,
endereco_complemento=payload['endereco_complemento'] if 'endereco_completo' in payload else '',
senha=hashed_senha
)
response = jsonify(response)
# # response.headers.add('Access-Control-Allow-Origin', '*')
return response
except Exception as e:
response = {"Erro": e}
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return(make_response(response, 400))
elif request.method == "PUT":
try:
payload = request.get_json()
if not set(['id_ong']).issubset(payload):
raise Exception('Id obrigatórios')
if 'senha' in payload:
payload['senha'] = hashlib.md5(payload['senha'].encode('utf-8')).hexdigest()
response = Controller.update_ong(
payload=payload
)
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return response
except Exception as e:
response = {"Erro": e}
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return(make_response(response, 400))
elif request.method == "GET":
try:
response = Controller.get_all_ongs()
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return(make_response(response, 200))
except:
response = {"Erro": e}
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return(make_response(response, 400))
@app.route("/ong/<id>", methods=["GET"])
@cross_origin()
def get_ong(id):
"""
This method returns the ong with ong
"""
try:
response = Controller.get_ong(id)
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return(make_response(response, 200))
except:
response = {"Erro": e}
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return(make_response(response, 400))
@app.route("/ong/<id>", methods=["DELETE"])
@cross_origin()
def delete_ong(id):
try:
Controller.delete_ong(id)
response = {"Sucesso: ONG has been deleted"}
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return(make_response(response, 200))
except Exception as e:
response = {"Erro": e}
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return(make_response(response, 400))
@app.route("/login", methods=["POST"])
@cross_origin()
def login():
payload = request.get_json()
email = payload["email"]
senha = payload["senha"]
tipo = payload["tipo"]
try:
response = Controller.login(email, senha, tipo)
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return response
except Exception as e:
response = {"Erro": e}
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return make_response(response, 400)
@app.route("/searchong", methods=["POST"])
@cross_origin()
def search_ong():
payload = request.get_json()
causa = payload["causa"] if "causa" in payload else None
nome = payload["nome"] if "nome" in payload else None
return Controller.search_ong(causa, nome)
if __name__ == "__main__":
app.run(debug=True)
|
BrunoTaufner/RPII
|
server/app.py
|
app.py
|
py
| 4,960
|
python
|
en
|
code
| 0
|
github-code
|
6
|
18216869821
|
#Fall2019W9B
#Broken Keyboard | CodeForces 1251A
if __name__ == "__main__":
nqueries = int(input())
outputs = []
for q in range(nqueries):
testStr = input()
strLen = len(testStr)
res = ""
ind = 0
while ind < strLen:
currChar = testStr[ind]
if ind + 1 < strLen:
if currChar == testStr[ind + 1]:
ind += 1
else:
if currChar not in res:
res += currChar
else:
if currChar not in res:
res += currChar
ind += 1
sortedResLst = sorted(res)
res = ""
for char in sortedResLst:
res += char
outputs.append(res)
for o in outputs:
print(o)
|
andrew-qu2000/Programming-Club
|
Poly Programming Club/Fall2019W9B.py
|
Fall2019W9B.py
|
py
| 814
|
python
|
en
|
code
| 0
|
github-code
|
6
|
14703280517
|
from datetime import datetime
from os.path import dirname, join
import pytest
from city_scrapers_core.constants import COMMISSION
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from city_scrapers.spiders.sf_planning import SfPlanningSpider
test_response = file_response(
join(dirname(__file__), "files", "sf_planning.html"),
url="https://sfplanning.org/event/planning-commission-151",
)
spider = SfPlanningSpider()
freezer = freeze_time("2021-10-27")
freezer.start()
parsed_items = [item for item in spider.parse_meeting(test_response)]
freezer.stop()
def test_title():
assert parsed_items[0]["title"] == "Hearing for SF Planning Commission"
def test_description():
assert len(parsed_items[0]["description"]) == 7212
def test_start():
assert parsed_items[0]["start"] == datetime(2021, 10, 28, 13, 0)
def test_end():
assert parsed_items[0]["end"] is None
def test_time_notes():
assert parsed_items[0]["time_notes"] == ""
def test_id():
assert (
parsed_items[0]["id"]
== "sf_planning/202110281300/x/hearing_for_sf_planning_commission"
)
def test_status():
assert parsed_items[0]["status"] == "tentative"
def test_location():
assert parsed_items[0]["location"] == {
"address": "Stream at https://sfgovtv.org/planning – Public Comment:"
" (415) 655-0001 / Access Code: 2486 151 4664",
"name": "SF Planning Commission",
}
def test_source():
assert (
parsed_items[0]["source"]
== "https://sfplanning.org/event/planning-commission-151"
)
def test_links():
assert parsed_items[0]["links"] == [
{
"href": "https://sfplanning.org/sites/default/files/agendas/"
"2021-10/20211028_cal.pdf",
"title": "Meeting/Agenda Information",
},
{
"href": "https://sfplanning.org/resource/"
"planning-commission-packet-october-28-2021",
"title": "Supporting Documents",
},
]
def test_classification():
assert parsed_items[0]["classification"] == COMMISSION
@pytest.mark.parametrize("item", parsed_items)
def test_all_day(item):
assert item["all_day"] is False
|
washabstract/city-scrapers-ca
|
tests/test_sf_planning.py
|
test_sf_planning.py
|
py
| 2,234
|
python
|
en
|
code
| 1
|
github-code
|
6
|
21959415638
|
from fastapi import APIRouter, HTTPException
from init_system import system
from schemas.customer_shcema import SignIn, SignUp, SetCart, Email
from models.Cart import CartItem
router = APIRouter(prefix="/customer")
@router.post("/sign_in")
async def customer_login(body: SignIn):
try:
return {
"detail": "Successfully Sign-in",
"data": system.sign_in(body.email, body.password),
}
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.post("/sign_up")
async def customer_register(body: SignUp):
try:
email = body.email
password = body.password
firstname = body.firstname
lastname = body.lastname
customer = system.create_customer(email, password, firstname, lastname)
return {"detail": "Successfully Sign-up", "data": customer.email}
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.post("/set_cart_item")
async def add_cart_item(body: SetCart):
try:
email = body.email
product_list = body.product_list
customer = system.get_customer_by_email(email)
if not customer:
raise ValueError("There is no customer with this email.")
cart = customer.cart
cart_items = []
for item in product_list:
category = system.get_category_by_name(item.category)
product = category.get_product_by_id(item.id)
cart_items.append(CartItem(product, item.quantity))
cart.cart_items = cart_items
return {"detail": "Successfully added."}
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.post("/get_cart_detail")
async def view_cart(body: Email):
try:
email = body.email
customer = system.get_customer_by_email(email)
if not customer:
raise ValueError("There is no customer with this email.")
cart = customer.cart
return {"data": cart.get_detail()}
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
|
Dope21/python-oop
|
controllers/customer_ctrl.py
|
customer_ctrl.py
|
py
| 2,486
|
python
|
en
|
code
| 0
|
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.