blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7f8e92de14db9bad2e1cad5ac7d8a09da0fb4d12
|
8bd1ae9c1681ee8c1214a4e9cda29a503676c36d
|
/v1/gps.py
|
418181c1a61dd0bdc1470bc0d6bc9a7e10955a77
|
[] |
no_license
|
zleffke/balloon
|
c15ef7e5614018022ca1f19ed2e4c82009352165
|
2943d303692b299e6d0866299d83a94d0839bf5b
|
refs/heads/master
| 2021-07-06T02:28:17.304024
| 2021-05-22T20:25:18
| 2021-05-22T20:25:18
| 57,078,951
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,637
|
py
|
#!/usr/bin/env python
##################################################
# GPS Interface
# Author: Zach Leffke
# Description: Initial GPS testing
##################################################
from optparse import OptionParser
import threading
from datetime import datetime as date
import os
import serial
import math
import sys
import string
import time
def utc_ts(self):
return str(date.utcnow()) + " UTC | "
class gpgga_object(object):
def __init__(self):
self.latitude = 0.0 #degrees
self.longitude = 0.0 #degrees
self.altitude = 0.0 #meters
self.utc_time = '' #string
self.fix_quality = 0 #0=invalid, 1=gps fix, 2 = dgps fix
self.num_sats = 0 #number of locked satellites
self.hdop = 0.0 #Horizontal Dilution of preceision, meters
self.wgs84 = 0.0 #Height of Geoid above WGS84 Ellipsoid, meters
class gprmc_object(object):
def __init__(self):
self.utc_time = ''
self.nav_warn = ''
self.latitude = 0.0
self.longitude = 0.0
self.speed = 0.0
self.track = 0.0
self.utc_date = ''
class GPS_Thread(threading.Thread):
def __init__ (self, port, baud, log_flag):
threading.Thread.__init__(self)
self._stop = threading.Event()
self.gps_ser = serial.Serial(port, baud)
self.log_flag = log_flag
self.gpgga = gpgga_object()
self.gprmc = gprmc_object()
self.raw_log = None
self.csv_log = None
if self.log_flag==1:
self.raw_log = 'gps_raw.txt'
elif self.log_flag==2:
self.csv_log = 'gps_csv.txt'
elif self.log_flag==3:
self.raw_log = 'gps_raw.txt'
self.csv_log = 'gps_csv.txt'
def run(self):
while (not self._stop.isSet()):
data = self.gps_ser.readline()
if self.raw_log != None:
rl = open(self.raw_log,'a')
rl.write(data)
rl.close()
line = ((data).strip()).split(',')
#print line
if line[0] == '$GPGGA': self.GPGGA_Parse(line)
elif line[0] == '$GPRMC': self.GPRMC_Parse(line)
sys.exit()
def get_lat_lon_alt(self):
return self.gpgga.latitude, self.gpgga.longitude, self.gpgga.altitude
def get_spd_cse(self):
return self.gprmc.speed, self.gprmc.track
def get_date_time(self):
return self.gprmc.utc_date, self.gpgga.utc_time
def GPGGA_Parse(self, line):
self.gpgga.utc_time = line[1]
lat_str = line[2]
self.gpgga.latitude = float(line[2][:2]) + float(line[2][2:]) / 60
if line[3] == 'S':
self.gpgga.latitude = -1 * self.gpgga.latitude
self.gpgga.longitude = float(line[4][:3]) + float(line[4][3:]) / 60
if line[5] == 'W':
self.gpgga.longitude = -1 * self.gpgga.longitude
self.gpgga.fix_quality = int(line[6])
self.gpgga.num_sats = int(line[7])
self.gpgga.hdop = float(line[8])
self.gpgga.altitude = float(line[9])*3.28084
self.gpgga.wgs84 = float(line[11]) #Height of geoid above WGS84 ellipsoid
#print self.gpgga.utc_time, self.gpgga.latitude, self.gpgga.longitude
def GPRMC_Parse(self, line):
self.gprmc.utc_time = line[1]
self.gprmc.nav_warn = line[2]
self.gprmc.latitude = float(line[3][:2]) + float(line[3][2:]) / 60
if line[4] == 'S':
self.gprmc.latitude = -1 * self.gprmc.latitude
self.gprmc.longitude = float(line[5][:3]) + float(line[5][3:]) / 60
if line[6] == 'W':
self.gprmc.longitude = -1 * self.gprmc.longitude
#speed in knots, need to convert to m/s or mph
#1 knot = 1.15078 mph
#1 knot = 0.514444 meters/second
if line[7] != '': self.gprmc.speed = float(line[7])*1.15078
else: self.gprmc.speed = 0.0
if line[8] != '': self.gprmc.track = float(line[8])
else: self.gprmc.track = 0.0
self.gprmc.utc_date = float(line[9])
def stop(self):
self.gps_ser.close()
self._stop.set()
sys.quit()
def stopped(self):
return self._stop.isSet()
if __name__ == '__main__':
#--------START Command Line option parser------------------------------------------------------
usage = "usage: %prog "
parser = OptionParser(usage = usage)
p_help = "GPS Serial Port, default = /dev/ttyACM0"
b_help = "GPS Serial Port Baud, default = 4800"
f_help = "GPS logfile, 0-none, 1-nmea only, 2-parsed, 3-parsed+nmea, default = none"
parser.add_option("-p", dest = "port" , action = "store", type = "string", default="/dev/ttyACM0", help = p_help)
parser.add_option("-b", dest = "baud" , action = "store", type = "int" , default="4800" , help = b_help)
parser.add_option("-f", dest = "log_file", action = "store", type = "string", default=None , help = f_help)
(options, args) = parser.parse_args()
#--------END Command Line option parser------------------------------------------------------
gps_serial = GPS_Thread(options.port, options.baud, options.log_file)
try:
gps_serial.start()
while 1:
x = 1
lat, lon, alt = gps_serial.get_lat_lon_alt()
spd, cse = gps_serial.get_spd_cse()
print lat, lon, alt, spd, cse
time.sleep(0.250)
sys.exit()
except Exception as e:
gps_serial.stop()
print "Exception Thrown, Terminating..."
print e
sys.exit()
|
[
"zleffke@vt.edu"
] |
zleffke@vt.edu
|
48c20b5c2aa2b919066addf3f2316a64f8c38b3c
|
de2c61ae61ce873a9b622d410c8a2b2c2579ef3a
|
/google_health/tests/test_map_values.py
|
fbd90e753446de9ffe7b3927acfa88a1d5f6120f
|
[
"MIT"
] |
permissive
|
JedGrabman/covidcast-indicators
|
cbd00e1530dc43f802b5ffdf199e8f59acf98c5c
|
d2a5a232431c8392c54bfc301dcb9beecc541b97
|
refs/heads/main
| 2023-01-20T23:29:17.577825
| 2020-10-28T14:18:02
| 2020-10-28T14:18:02
| 308,045,275
| 0
| 0
|
MIT
| 2020-10-28T14:38:51
| 2020-10-28T14:38:50
| null |
UTF-8
|
Python
| false
| false
| 3,868
|
py
|
import pytest
from os.path import join
import pandas as pd
import numpy as np
from delphi_google_health.map_values import derived_counts_from_dma, _dma_df_to_matrix
class TestMapValues:
def test_dummy_hrr(self):
# Create a dummy dataset
static_dir = join("..", "static")
dma_list = np.loadtxt(join(static_dir, "Canonical_DMA.txt"), dtype=int)
val = np.zeros(len(dma_list) * 2)
val[0] = 2
val[2] = 10
df_dma = pd.DataFrame(
{
"geo_id": np.tile(dma_list, 2),
"timestamp": np.repeat(["2020-02-03", "2020-02-04"], len(dma_list)),
"val": val,
}
)
df_hrr, _ = derived_counts_from_dma(df_dma, static_dir)
hrr_list = np.loadtxt(join(static_dir, "Canonical_HRR.txt"), dtype=int)
assert set(np.argwhere(df_hrr["val"].values > 0).flatten()) == set(
[254, 256, 348, 350, 368, 370, 382, 384, 470]
)
assert set(df_hrr["geo_id"].unique()) == set(hrr_list)
assert (df_hrr["timestamp"].unique() == ["2020-02-03", "2020-02-04"]).all()
def test_dummy_msa(self):
# Create a dummy dataset
static_dir = join("..", "static")
dma_list = np.loadtxt(join(static_dir, "Canonical_DMA.txt"), dtype=int)
val = np.zeros(len(dma_list) * 2)
val[0] = 2
val[2] = 10
df_dma = pd.DataFrame(
{
"geo_id": np.tile(dma_list, 2),
"timestamp": np.repeat(["2020-02-03", "2020-02-04"], len(dma_list)),
"val": val,
}
)
_, df_msa = derived_counts_from_dma(df_dma, static_dir)
msa_list = np.loadtxt(join(static_dir, "Canonical_MSA.txt"), dtype=int)
assert set(np.argwhere(df_msa["val"].values > 0).flatten()) == set(
[68, 400, 546, 674]
)
assert set(df_msa["geo_id"].unique()) == set(msa_list)
assert (df_msa["timestamp"].unique() == ["2020-02-03", "2020-02-04"]).all()
class TestDataToMatrix:
def test_matrix_format(self):
# Create a dummy dataset
static_dir = join("..", "static")
dma_list = np.loadtxt(join(static_dir, "Canonical_DMA.txt"), dtype=int)
val = np.zeros(len(dma_list) * 2)
val[0] = 2
val[2] = 10
df_dma = pd.DataFrame(
{
"geo_id": np.tile(dma_list, 2),
"timestamp": np.repeat(["2020-02-03", "2020-02-04"], len(dma_list)),
"val": val,
}
)
# create matrix
mat, day_list = _dma_df_to_matrix(df_dma, static_dir)
# check out
assert mat.shape == (len(dma_list), 2)
assert (day_list == ["2020-02-03", "2020-02-04"]).all()
assert mat[0, 0] == 2
assert mat[2, 0] == 10
assert mat.sum() == 12
assert mat.min() == 0
def test_multiple_values(self):
# Create a dummy dataset
static_dir = join("..", "static")
dma_list = np.loadtxt(join(static_dir, "Canonical_DMA.txt"), dtype=int)
val = np.zeros(len(dma_list) * 2)
val[0] = 2
val[2] = 10
df_dma = pd.DataFrame(
{
"geo_id": np.tile(dma_list, 2),
"timestamp": np.repeat(["2020-02-03", "2020-02-03"], len(dma_list)),
"val": val,
}
)
with pytest.raises(ValueError) as e_info:
mat, day_list = _dma_df_to_matrix(df_dma, static_dir)
def test_missing_values(self):
# Create a dummy dataset
static_dir = join("..", "static")
df_dma = pd.DataFrame(
{"geo_id": [500], "timestamp": ["2020-02-03"], "val": [0]}
)
with pytest.raises(ValueError) as e_info:
mat, day_list = _dma_df_to_matrix(df_dma, static_dir)
|
[
"tbarnold@protonmail.ch"
] |
tbarnold@protonmail.ch
|
2efdc583ed742bce6daede58f08e6d65b7d392eb
|
8e26bbe97e96212a8bebcbed415018c23ba4fd1a
|
/ds/binary-trees/CheckBalanced.py
|
4ebde8a96acb7f86e063fd98bc7158b6b628b80d
|
[] |
no_license
|
yashjaiswal1/CTCI-DSA
|
ba04d00f33d14de128bddafc29a823f5c7be1fdb
|
f392c4f86dea8bf29a42325c4d5004355a3b82f5
|
refs/heads/master
| 2023-05-29T00:47:49.387875
| 2021-06-13T07:31:23
| 2021-06-13T07:31:23
| 351,172,799
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
class Node:
def __init__(self, key=None):
self.key = key
self.leftChild = None
self.rightChild = None
def isBalanced(localRoot):
if localRoot == None:
return True
else:
difference = abs(getHeight(localRoot.leftChild) -
getHeight(localRoot.rightChild))
if difference > 1:
return False
elif isBalanced(localRoot.leftChild) and isBalanced(localRoot.rightChild):
return True
def getHeight(node):
if node == None:
return -1
else:
return max(getHeight(node.leftChild), getHeight(node.rightChild)) + 1
tree_root = Node(20)
tree_root.leftChild = Node(10)
tree_root.rightChild = Node(30)
tree_root.leftChild.leftChild = Node(5)
tree_root.leftChild.rightChild = Node(15)
tree_root.leftChild.rightChild.rightChild = Node(16)
tree_root.leftChild.rightChild.rightChild.rightChild = Node(17)
tree_root.rightChild.leftChild = Node(25)
tree_root.rightChild.rightChild = Node(35)
print(isBalanced(tree_root))
|
[
"yashjaiswal1@outlook.com"
] |
yashjaiswal1@outlook.com
|
cafefe10c03e39adab28f4ab55ac8cd1d46afa94
|
afb2d6491ac801778500bb5424488355828c3f08
|
/construction_execution.py
|
5b9f8f1ae4dc90d5997119db94b48235ed476605
|
[
"MIT"
] |
permissive
|
dsnaveen/meucci-python
|
31bac8cd1a3c76ee082c40e7d47b1815fbc3096b
|
1519f5076b3fb3e0e575200dd3a41d7a36547300
|
refs/heads/master
| 2021-06-01T04:23:40.584878
| 2016-07-05T07:22:04
| 2016-07-05T07:22:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,322
|
py
|
"""
Python code for blog post "mini-Meucci : Applying The Checklist - Steps 8-9"
http://www.returnandrisk.com/2016/06/mini-meucci-applying-checklist-steps-8-9.html
Copyright (c) 2016 Peter Chan (peter-at-return-and-risk-dot-com)
"""
#%matplotlib inline
from pandas_datareader import data
import numpy as np
import pandas as pd
import datetime
import math
import matplotlib.pyplot as plt
import seaborn
# Get Yahoo data on 30 DJIA stocks and a few ETFs
tickers = ['MMM','AXP','AAPL','BA','CAT','CVX','CSCO','KO','DD','XOM','GE','GS',
'HD','INTC','IBM','JNJ','JPM','MCD','MRK','MSFT','NKE','PFE','PG',
'TRV','UNH','UTX','VZ','V','WMT','DIS','SPY','DIA','TLT','SHY']
start = datetime.datetime(2008, 4, 1)
end = datetime.datetime(2016, 5, 31)
rawdata = data.DataReader(tickers, 'yahoo', start, end)
prices = rawdata.to_frame().unstack(level=1)['Adj Close']
# Setup
tau = 21 # investment horizon in days
n_scenarios = len(prices) - tau
n_asset = 30
asset_tickers = tickers[0:30]
###############################################################################
# Construction - 2 step mean-variance optimization
###############################################################################
# Take shortcut and bypass some of the checklist steps in this toy example since
# returns are invariants, estimation interval = horizon ie can use linear return
# distribution directly as input into mean-variance optimizer
# Projected linear returns to the horizon - historical simulation
asset_rets = np.array(prices.pct_change(tau).ix[tau:, asset_tickers])
# Mean-variance inputs
# Distribution of asset returns at horizon with flexible probabilities
# Time-conditioned flexible probs with exponential decay
half_life = 252 * 2 # half life of 2 years
es_lambda = math.log(2) / half_life
exp_probs = np.exp(-es_lambda * (np.arange(0, n_scenarios)[::-1]))
exp_probs = exp_probs / sum(exp_probs)
# Apply flexible probabilities to asset return scenarios
import rnr_meucci_functions as rnr
mu_pc, sigma2_pc = rnr.fp_mean_cov(asset_rets.T, exp_probs)
# Perform shrinkage to mitigate estimation risk
mu_shrk, cov_shrk = rnr.simple_shrinkage(mu_pc, sigma2_pc)
# Step 1: m-v quadratic optimization for efficient frontier
n_portfolio = 40
weights_pc, rets_pc, vols_pc = rnr.efficient_frontier_qp_rets(n_portfolio,
cov_shrk, mu_shrk)
# Step 2: evaluate satisfaction for all allocations on the frontier
satisfaction_pc = -vols_pc
# Choose the allocation that maximises satisfaction
max_sat_idx = np.asscalar(np.argmax(satisfaction_pc))
max_sat = satisfaction_pc[max_sat_idx]
max_sat_weights = weights_pc[max_sat_idx, :]
print('Optimal portfolio is minimum volatility portfolio with satisfaction\
index = {:.2}'.format(max_sat))
# Plot charts
import matplotlib.gridspec as gridspec
fig = plt.figure(figsize=(9, 8))
fig.hold(True)
gs = gridspec.GridSpec(2, 1)
ax = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[1, 0])
ax.plot(vols_pc, rets_pc)
ax.set_xlim(vols_pc[0]*0.95, vols_pc[-1]*1.02)
ax.set_ylim(min(rets_pc)*0.9, max(rets_pc)*1.05)
ax.set_xlabel('Standard Deviation')
ax.set_ylabel('Expected Return')
ax.set_title("Efficient Frontier")
ax.plot(vols_pc[0], rets_pc[0], 'g.', markersize=10.0)
ax.text(vols_pc[0]*1.02, rets_pc[0], 'minimum volatility portfolio',
fontsize=10)
ax2.plot(vols_pc, satisfaction_pc)
ax2.set_xlim(vols_pc[0]*0.95, vols_pc[-1]*1.02)
ax2.set_ylim(min(satisfaction_pc)*1.05, max(satisfaction_pc)*0.9)
ax2.set_xlabel('Standard Deviation')
ax2.set_ylabel('Satisfaction')
ax2.set_title("Satisfaction")
ax2.plot(vols_pc[max_sat_idx], max(satisfaction_pc), 'g.', markersize=10.0)
ax2.text(vols_pc[max_sat_idx]*1.02, max(satisfaction_pc), 'maximum satisfaction',
fontsize=10)
plt.tight_layout()
plt.show()
# Plot minimum volatility portfolio weights
pd.DataFrame(weights_pc[0,:], index=asset_tickers, columns=['w']).sort_values('w', \
ascending=False).plot(kind='bar', title='Minimum Volatility Portfolio Weights', \
legend=None, figsize=(10, 8))
plt.show()
###############################################################################
# Execution
###############################################################################
# See zipline simulation in dynamic allocation code file
|
[
"returnandrisk@users.noreply.github.com"
] |
returnandrisk@users.noreply.github.com
|
35c2c078c3d6ff1d63ce2b376b5387b4eb97f1e7
|
b5aa43c8db450c3bcacc8f28897eab684a8032a1
|
/data/games/missiles/ui.py
|
431fd970d43ec82007a75566bf14a29e0ec60f3b
|
[] |
no_license
|
iminurnamez/Python_Arcade_Collab
|
29a74cf2a6264969de9bae3c4a6ed23d6282e793
|
67702414ed30addd1bf46339bb458df34ed88f2a
|
refs/heads/master
| 2021-04-15T07:32:18.573004
| 2018-05-13T14:29:19
| 2018-05-13T14:29:19
| 126,644,972
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,767
|
py
|
import pygame as pg
from data.core import constants as prog_constants
from data.components.labels import Label
from . import constants
class UI(object):
def __init__(self, player, level):
font = prog_constants.FONTS["Fixedsys500c"]
self.level_label = Label(font, 16, "Level {}".format(level.level_num),
constants.LOW_LIGHT_GREEN, {"topleft": (0, 0)})
self.cash_label = Label(font, 16, "${}".format(player.cash),
constants.LOW_LIGHT_GREEN, {"topleft": (0, 20)})
self.ammo_label = Label(font, 16, "Ammo: {}".format(player.ammo),
constants.LOW_LIGHT_GREEN, {"topleft": (0, 40)})
def update(self, player, level):
self.cash_label.set_text("${}".format(player.cash))
self.ammo_label.set_text("Ammo: {}".format(player.ammo))
def draw(self, surface):
self.level_label.draw(surface)
self.cash_label.draw(surface)
self.ammo_label.draw(surface)
class CityIcon(object):
def __init__(self, midbottom, points, image, current_points=0):
self.current_points = current_points
self.points = points
self.image = image
self.rect = self.image.get_rect(midbottom=midbottom)
self.points_label = Label(prog_constants.FONTS["Fixedsys500c"], 24,
"{}".format(self.current_points), constants.LOW_LIGHT_GREEN,
{"midtop": (midbottom[0], midbottom[1] + 4)})
def update(self):
text = "{}".format(self.current_points)
if self.points_label.text != text:
self.points_label.set_text(text)
def draw(self, surface):
surface.blit(self.image, self.rect)
self.points_label.draw(surface)
|
[
"easinerf@gmail.com"
] |
easinerf@gmail.com
|
69c34a6ee0995ab5c5ac58ef95ce650d85b3cfb3
|
b7098b8a2c673cec6d4bfb9eab47bd9df2944292
|
/7_문자열/5622.py
|
379f4de148c422f45392fc7fadf89d21275a487a
|
[] |
no_license
|
limgeonho/BOJ_stages
|
0bc9e53aec129a34c6114d4b39c9a09de4e93bbb
|
c579124823df510b5ad84f95fb9ebdff5c9b762c
|
refs/heads/master
| 2023-07-17T03:48:33.161536
| 2021-08-29T14:48:55
| 2021-08-29T14:48:55
| 389,390,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
# 다이얼
s = input()
phone = {2: 'ABC', 3: 'DEF', 4: 'GHI', 5: 'JKL',
6: 'MNO', 7: 'PQRS', 8: 'TUV', 9: 'WXYZ'}
cnt = 0
for char in s:
for k, v in phone.items():
if char in v:
cnt += k+1
print(cnt)
# li = ['ABC', 'DEF', 'GHI', 'JKL', 'MNO', 'PQRS', 'TUV', 'WXYZ']
# word = input()
# cnt = 0
# for char in word:
# for letter in li:
# if char in letter:
# cnt += li.index(letter) + 3
# print(cnt)
|
[
"ghlim909@gmail.com"
] |
ghlim909@gmail.com
|
abeb393fe7e77610063af908b694f73c67e31c8f
|
776fb2aecea006e14e39fb8a71ff9a9f447b6105
|
/metarho/localsettings-dist.py
|
deae4e87021eece9b42c2a156011da7419128e3e
|
[] |
no_license
|
TheProjecter/metarho
|
1615c1e2f838e0676f9a3d6e4243906aa8bb9eb9
|
36654f34fbdaffb7146f2f8f4dc87b48ad39cc75
|
refs/heads/master
| 2021-01-10T15:13:31.244164
| 2010-05-28T21:50:25
| 2010-05-28T21:50:25
| 43,166,467
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,370
|
py
|
# file localsettings-dist.py
#
# Copyright 2010 Scott Turnbull
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copy to localsettings.py and set vars as needed.
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DEV_ENV = False
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Make this unique, and don't share it with anybody.
SECRET_KEY = ''
|
[
"streamweaver@mindspring.com"
] |
streamweaver@mindspring.com
|
d15e3bb52395ec126ac48230b4f647c49dd77d27
|
d6beff28c32310a8835c3d34a882ff6d6dee89ab
|
/setup.py
|
926353d559c620954512666519f611dddb494ccc
|
[] |
no_license
|
mattvonrocketstein/goulash
|
ae0dfc87a8d8c44f924800bc7a0257bb756ac810
|
cff9cb1e279a31394af7357f3f515a0b51cf5f62
|
refs/heads/master
| 2020-05-17T17:25:55.403433
| 2016-01-15T20:35:01
| 2016-01-15T20:35:01
| 7,263,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,764
|
py
|
#!/usr/bin/env python
""" setup.py for goulash
"""
import os, sys
from setuptools import setup, find_packages
# make sure that finding packages works, even
# when setup.py is invoked from outside this dir
this_dir = os.path.dirname(os.path.abspath(__file__))
if not os.getcwd()==this_dir:
os.chdir(this_dir)
# make sure we can import the version number so that it doesn't have
# to be changed in two places. goulash/__init__.py is also free
# to import various requirements that haven't been installed yet
sys.path.append(os.path.join(this_dir, 'goulash'))
from version import __version__
sys.path.pop()
base_url = 'https://github.com/mattvonrocketstein/goulash/'
packages = [x for x in find_packages() if x not in ['tests']]
setup(
name = 'goulash',
version = __version__,
description = 'toolbox, random shared stuff from my other projects',
author = 'mattvonrocketstein',
author_email = '$author@gmail',
url = base_url,
download_url = base_url + '/tarball/master',
packages = packages,
keywords = ['goulash'],
install_requires = [
'addict', # dictionary utility
'ansi2html', # required for goulash.ansi
'werkzeug', # used for caching helpers
'fabric', # misc. automation
'argparse', # command line option-parsing
'configparser', # .ini configurations
'mkdocs', # static docs generation
'epydoc', # static docs generation
'Importing' # lazy module
],
entry_points = dict(
console_scripts=[
'goulash = goulash.bin._goulash:entry',
]),
package_data={'': ['*.*', 'goulash/data/*.*']},
include_package_data=True,
)
|
[
"matthewvonrocketstein@gmail-dot-com"
] |
matthewvonrocketstein@gmail-dot-com
|
106a0d9c89e4949c8b9f5af082852301f80aad4f
|
72d7c721df1bfe8274ea17194498e869c4247d95
|
/espn_api/baseball/__init__.py
|
898b3e849003e8bfa622941b824501074687a039
|
[
"MIT"
] |
permissive
|
AdrianForsythe/ff-espn-api
|
f344287e6f74a057910ab0b140393ca3e4d5580f
|
ac82048e3f0397361c54dc5497df72bf9f29586f
|
refs/heads/master
| 2022-09-21T03:47:18.813600
| 2022-07-31T21:49:06
| 2022-07-31T21:49:06
| 206,588,624
| 1
| 0
|
MIT
| 2019-09-05T14:49:49
| 2019-09-05T14:49:49
| null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
__all__ = ['League',
'Team',
'Player',
'Matchup',
]
from .league import League
from .team import Team
from .player import Player
from .matchup import Matchup
|
[
"cwendt94@vt.edu"
] |
cwendt94@vt.edu
|
395ed5ce63deb82814dea454d7b5964dd380116c
|
f5b46d7a9f5583b5156ef03797ca2651b8c3e145
|
/tubers/tubers/settings.py
|
1f8101ef697b7a3bec9fbff550d2b25061de0cfc
|
[] |
no_license
|
Ipshita30/lco-tubers
|
96283240925a4ce9076ee29c2691698c2e83871d
|
8a6f467ca7026222af7b56853614a000c723e609
|
refs/heads/main
| 2023-03-19T08:13:46.875529
| 2021-03-19T10:03:11
| 2021-03-19T10:03:11
| 349,349,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,341
|
py
|
"""
Django settings for tubers project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$q2=spekw$kg!im(ti(&^-)*ge-7(uukmq#s1b*f6g@cdd%%2$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'webpages.apps.WebpagesConfig',
'djangocms_admin_style',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tubers.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tubers.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'lcotubers',
'USER': 'postgres',
'PASSWORD': 'ipshita30',
'HOST': 'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'tubers/static')
]
|
[
"Ipshitam.soni@gmail.com"
] |
Ipshitam.soni@gmail.com
|
92001a2b21623b944fec416bba71f5a2a3afe83f
|
e0029bbfe3158b1d590a102965ed0b11bdfa7ab8
|
/pydicom/tests/test_pylibjpeg.py
|
e025cd854390112b259c3962b9f5839b98671ed1
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ARteapartedelarte/pydicom
|
1302eb969661c27cda908b56ebc3d6c572027a95
|
389b1ad97c1b3616851b37749e4bd2520755f0d1
|
refs/heads/master
| 2023-04-24T08:43:04.902001
| 2021-04-29T23:33:51
| 2021-04-29T23:33:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29,695
|
py
|
# Copyright 2020 pydicom authors. See LICENSE file for details.
"""Tests for the pixel_data_handlers.pylibjpeg_handler module."""
import pytest
import pydicom
from pydicom.data import get_testdata_file
from pydicom.encaps import defragment_data
from pydicom.filereader import dcmread
from pydicom.pixel_data_handlers.util import (
convert_color_space, get_j2k_parameters
)
from pydicom.uid import (
ImplicitVRLittleEndian,
JPEGBaseline8Bit,
JPEGExtended12Bit,
JPEGLosslessP14,
JPEGLosslessSV1,
JPEGLSLossless,
JPEGLSNearLossless,
JPEG2000Lossless,
JPEG2000,
RLELossless,
AllTransferSyntaxes
)
try:
import numpy as np
from pydicom.pixel_data_handlers import numpy_handler as NP_HANDLER
HAVE_NP = True
except ImportError:
NP_HANDLER = None
HAVE_NP = False
try:
from pydicom.pixel_data_handlers import pylibjpeg_handler as LJ_HANDLER
from pydicom.pixel_data_handlers.pylibjpeg_handler import (
get_pixeldata, as_array, generate_frames
)
HAVE_PYLIBJPEG = LJ_HANDLER.HAVE_PYLIBJPEG
HAVE_LJ = LJ_HANDLER.HAVE_LIBJPEG
HAVE_OJ = LJ_HANDLER.HAVE_OPENJPEG
HAVE_RLE = LJ_HANDLER.HAVE_RLE
except ImportError:
LJ_HANDLER = None
HAVE_PYLIBJPEG = False
HAVE_LJ = False
HAVE_OJ = False
HAVE_RLE = False
TEST_HANDLER = HAVE_NP and HAVE_PYLIBJPEG # Run handler tests
TEST_JPEG = TEST_HANDLER and HAVE_LJ # Run 10918 JPEG tests
TEST_JPEGLS = TEST_HANDLER and HAVE_LJ # Run 14495 JPEG-LS tests
TEST_JPEG2K = TEST_HANDLER and HAVE_OJ # Run 15444 JPEG 2000 tests
TEST_RLE = TEST_HANDLER and HAVE_RLE # Run RLE Lossless tests
SUPPORTED_SYNTAXES = [
JPEGBaseline8Bit,
JPEGExtended12Bit,
JPEGLosslessP14,
JPEGLosslessSV1,
JPEGLSLossless,
JPEGLSNearLossless,
JPEG2000Lossless,
JPEG2000,
RLELossless,
]
UNSUPPORTED_SYNTAXES = list(
set(AllTransferSyntaxes) ^ set(SUPPORTED_SYNTAXES)
)
# Transfer syntaxes supported by other handlers
IMPL = get_testdata_file("MR_small_implicit.dcm")
EXPL = get_testdata_file("OBXXXX1A.dcm")
EXPB = get_testdata_file("OBXXXX1A_expb.dcm")
DEFL = get_testdata_file("image_dfl.dcm")
REFERENCE_DATA_UNSUPPORTED = [
(IMPL, ('1.2.840.10008.1.2', 'CompressedSamples^MR1')),
(EXPL, ('1.2.840.10008.1.2.1', 'OB^^^^')),
(EXPB, ('1.2.840.10008.1.2.2', 'OB^^^^')),
(DEFL, ('1.2.840.10008.1.2.1.99', '^^^^')),
]
# RLE Lossless - PackBits algorithm
RLE_8_1_1F = get_testdata_file("OBXXXX1A_rle.dcm")
RLE_8_1_2F = get_testdata_file("OBXXXX1A_rle_2frame.dcm")
RLE_8_3_1F = get_testdata_file("SC_rgb_rle.dcm")
RLE_8_3_2F = get_testdata_file("SC_rgb_rle_2frame.dcm")
RLE_16_1_1F = get_testdata_file("MR_small_RLE.dcm")
RLE_16_1_10F = get_testdata_file("emri_small_RLE.dcm")
RLE_16_3_1F = get_testdata_file("SC_rgb_rle_16bit.dcm")
RLE_16_3_2F = get_testdata_file("SC_rgb_rle_16bit_2frame.dcm")
RLE_32_1_1F = get_testdata_file("rtdose_rle_1frame.dcm")
RLE_32_1_15F = get_testdata_file("rtdose_rle.dcm")
RLE_32_3_1F = get_testdata_file("SC_rgb_rle_32bit.dcm")
RLE_32_3_2F = get_testdata_file("SC_rgb_rle_32bit_2frame.dcm")
# JPEG - ISO/IEC 10918 Standard
# FMT_BA_BV_SPX_PR_FRAMESF_PI
# JPGB: 1.2.840.10008.1.2.4.50 - JPEG Baseline (8-bit only)
JPGB_08_08_3_0_1F_YBR_FULL = get_testdata_file("SC_rgb_small_odd_jpeg.dcm")
JPGB_08_08_3_0_120F_YBR_FULL_422 = get_testdata_file("color3d_jpeg_baseline.dcm") # noqa
# Different subsampling 411, 422, 444
JPGB_08_08_3_0_1F_YBR_FULL_422_411 = get_testdata_file("SC_rgb_dcmtk_+eb+cy+np.dcm") # noqa
JPGB_08_08_3_0_1F_YBR_FULL_422_422 = get_testdata_file("SC_rgb_dcmtk_+eb+cy+s2.dcm") # noqa
JPGB_08_08_3_0_1F_YBR_FULL_411 = get_testdata_file("SC_rgb_dcmtk_+eb+cy+n1.dcm") # noqa
JPGB_08_08_3_0_1F_YBR_FULL_422 = get_testdata_file("SC_rgb_dcmtk_+eb+cy+n2.dcm") # noqa
JPGB_08_08_3_0_1F_YBR_FULL_444 = get_testdata_file("SC_rgb_dcmtk_+eb+cy+s4.dcm") # noqa
JPGB_08_08_3_0_1F_RGB = get_testdata_file("SC_rgb_dcmtk_+eb+cr.dcm")
# JPGE: 1.2.840.1.2.4.51 - JPEG Extended
JPGE_BAD = get_testdata_file("JPEG-lossy.dcm") # Bad JPEG file
JPGE_16_12_1_0_1F_M2 = get_testdata_file("JPGExtended.dcm") # Fixed version
# JPGL: 1.2.840.10008.1.2.4.70 - JPEG Lossless, Non-hierarchical, 1st Order
JPGL_08_08_1_0_1F = get_testdata_file("JPGLosslessP14SV1_1s_1f_8b.dcm")
JPGL_16_16_1_1_1F_M2 = get_testdata_file("JPEG-LL.dcm")
JPGB = JPEGBaseline8Bit
JPGE = JPEGExtended12Bit
JPGL = JPEGLosslessSV1
JPG_REFERENCE_DATA = [
# fpath, (syntax, bits, nr samples, pixel repr, nr frames, shape, dtype)
(JPGB_08_08_3_0_120F_YBR_FULL_422, (JPGB, 8, 3, 0, 120, (120, 480, 640, 3), 'uint8')), # noqa
(JPGB_08_08_3_0_1F_YBR_FULL_422_411, (JPGB, 8, 3, 0, 1, (100, 100, 3), 'uint8')), # noqa
(JPGB_08_08_3_0_1F_YBR_FULL_422_422, (JPGB, 8, 3, 0, 1, (100, 100, 3), 'uint8')), # noqa
(JPGB_08_08_3_0_1F_YBR_FULL_411, (JPGB, 8, 3, 0, 1, (100, 100, 3), 'uint8')), # noqa
(JPGB_08_08_3_0_1F_YBR_FULL_422, (JPGB, 8, 3, 0, 1, (100, 100, 3), 'uint8')), # noqa
(JPGB_08_08_3_0_1F_YBR_FULL_444, (JPGB, 8, 3, 0, 1, (100, 100, 3), 'uint8')), # noqa
(JPGB_08_08_3_0_1F_RGB, (JPGB, 8, 3, 0, 1, (100, 100, 3), 'uint8')),
(JPGE_16_12_1_0_1F_M2, (JPGE, 16, 1, 0, 1, (1024, 256), 'uint16')),
(JPGL_08_08_1_0_1F, (JPGL, 8, 1, 0, 1, (768, 1024), 'uint8')),
(JPGL_16_16_1_1_1F_M2, (JPGL, 16, 1, 1, 1, (1024, 256), 'int16')),
]
JPG_MATCHING_DATASETS = [
# (compressed, reference, hard coded check values), px tolerance
pytest.param(
JPGB_08_08_3_0_1F_YBR_FULL_422_411,
get_testdata_file("SC_rgb_dcmtk_ebcynp_dcmd.dcm"),
[
(253, 1, 0), (253, 129, 131), (0, 255, 5), (127, 255, 129),
(0, 0, 254), (127, 128, 255), (0, 0, 0), (64, 64, 64),
(192, 192, 192), (255, 255, 255),
],
2
),
pytest.param(
JPGB_08_08_3_0_1F_YBR_FULL_422_422,
get_testdata_file("SC_rgb_dcmtk_ebcys2_dcmd.dcm"),
[
(254, 0, 0), (255, 127, 127), (0, 255, 5), (129, 255, 129),
(0, 0, 254), (128, 127, 255), (0, 0, 0), (64, 64, 64),
(192, 192, 192), (255, 255, 255),
],
0
),
pytest.param(
JPGB_08_08_3_0_1F_YBR_FULL_411,
get_testdata_file("SC_rgb_dcmtk_ebcyn1_dcmd.dcm"),
[
(253, 1, 0), (253, 129, 131), (0, 255, 5), (127, 255, 129),
(0, 0, 254), (127, 128, 255), (0, 0, 0), (64, 64, 64),
(192, 192, 192), (255, 255, 255),
],
2
),
pytest.param(
JPGB_08_08_3_0_1F_YBR_FULL_422,
get_testdata_file("SC_rgb_dcmtk_ebcyn2_dcmd.dcm"),
[
(254, 0, 0), (255, 127, 127), (0, 255, 5), (129, 255, 129),
(0, 0, 254), (128, 127, 255), (0, 0, 0), (64, 64, 64),
(192, 192, 192), (255, 255, 255),
],
0
),
pytest.param(
JPGB_08_08_3_0_1F_YBR_FULL_444,
get_testdata_file("SC_rgb_dcmtk_ebcys4_dcmd.dcm"),
[
(254, 0, 0), (255, 127, 127), (0, 255, 5), (129, 255, 129),
(0, 0, 254), (128, 127, 255), (0, 0, 0), (64, 64, 64),
(192, 192, 192), (255, 255, 255),
],
0
),
pytest.param(
JPGB_08_08_3_0_1F_RGB,
get_testdata_file("SC_rgb_dcmtk_ebcr_dcmd.dcm"),
[
(255, 0, 0), (255, 128, 128), (0, 255, 0), (128, 255, 128),
(0, 0, 255), (128, 128, 255), (0, 0, 0), (64, 64, 64),
(192, 192, 192), (255, 255, 255),
],
1
),
]
# JPEG-LS - ISO/IEC 14495 Standard
JLSL = JPEGLSNearLossless
JLSN = JPEGLSLossless
JPEG_LS_LOSSLESS = get_testdata_file("MR_small_jpeg_ls_lossless.dcm")
JLS_REFERENCE_DATA = [
# fpath, (syntax, bits, nr samples, pixel repr, nr frames, shape, dtype)
(JPEG_LS_LOSSLESS, (JLSN, 16, 1, 1, 1, (64, 64), 'int16')),
]
# JPEG 2000 - ISO/IEC 15444 Standard
J2KR = JPEG2000Lossless
J2KI = JPEG2000
# J2KR: 1.2.840.100008.1.2.4.90 - JPEG 2000 Lossless
J2KR_08_08_3_0_1F_YBR_ICT = get_testdata_file("US1_J2KR.dcm")
J2KR_16_10_1_0_1F_M1 = get_testdata_file("RG3_J2KR.dcm")
J2KR_16_12_1_0_1F_M2 = get_testdata_file("MR2_J2KR.dcm")
J2KR_16_15_1_0_1F_M1 = get_testdata_file("RG1_J2KR.dcm")
J2KR_16_16_1_0_10F_M2 = get_testdata_file("emri_small_jpeg_2k_lossless.dcm")
J2KR_16_14_1_1_1F_M2 = get_testdata_file("693_J2KR.dcm")
J2KR_16_16_1_1_1F_M2 = get_testdata_file("MR_small_jp2klossless.dcm")
J2KR_16_13_1_1_1F_M2_MISMATCH = get_testdata_file("J2K_pixelrep_mismatch.dcm")
# Non-conformant pixel data -> JP2 header present
J2KR_08_08_3_0_1F_YBR_RCT = get_testdata_file("GDCMJ2K_TextGBR.dcm")
# J2KI: 1.2.840.10008.1.2.4.91 - JPEG 2000
J2KI_08_08_3_0_1F_RGB = get_testdata_file("SC_rgb_gdcm_KY.dcm")
J2KI_08_08_3_0_1F_YBR_ICT = get_testdata_file("US1_J2KI.dcm")
J2KI_16_10_1_0_1F_M1 = get_testdata_file("RG3_J2KI.dcm")
J2KI_16_12_1_0_1F_M2 = get_testdata_file("MR2_J2KI.dcm")
J2KI_16_15_1_0_1F_M1 = get_testdata_file("RG1_J2KI.dcm")
J2KI_16_14_1_1_1F_M2 = get_testdata_file("693_J2KI.dcm")
J2KI_16_16_1_1_1F_M2 = get_testdata_file("JPEG2000.dcm")
J2K_REFERENCE_DATA = [
# fpath, (syntax, bits, nr samples, pixel repr, nr frames, shape, dtype)
(J2KR_08_08_3_0_1F_YBR_ICT, (J2KR, 8, 3, 0, 1, (480, 640, 3), 'uint8')),
(J2KR_16_10_1_0_1F_M1, (J2KR, 16, 1, 0, 1, (1760, 1760), 'uint16')),
(J2KR_16_12_1_0_1F_M2, (J2KR, 16, 1, 0, 1, (1024, 1024), 'uint16')),
(J2KR_16_15_1_0_1F_M1, (J2KR, 16, 1, 0, 1, (1955, 1841), 'uint16')),
# should be Bits Stored = 12
(J2KR_16_16_1_0_10F_M2, (J2KR, 16, 1, 0, 10, (10, 64, 64), 'uint16')),
# should be Bits Stored = 16
(J2KR_16_14_1_1_1F_M2, (J2KR, 16, 1, 1, 1, (512, 512), 'int16')),
(J2KR_16_16_1_1_1F_M2, (J2KR, 16, 1, 1, 1, (64, 64), 'int16')),
(J2KI_08_08_3_0_1F_RGB, (J2KI, 8, 3, 0, 1, (100, 100, 3), 'uint8')),
(J2KI_08_08_3_0_1F_YBR_ICT, (J2KI, 8, 3, 0, 1, (480, 640, 3), 'uint8')),
(J2KI_16_10_1_0_1F_M1, (J2KI, 16, 1, 0, 1, (1760, 1760), 'uint16')),
(J2KI_16_12_1_0_1F_M2, (J2KI, 16, 1, 0, 1, (1024, 1024), 'uint16')),
(J2KI_16_15_1_0_1F_M1, (J2KI, 16, 1, 0, 1, (1955, 1841), 'uint16')),
# should be Bits Stored = 16
(J2KI_16_14_1_1_1F_M2, (J2KI, 16, 1, 1, 1, (512, 512), 'int16')),
(J2KI_16_16_1_1_1F_M2, (J2KI, 16, 1, 1, 1, (1024, 256), 'int16')),
]
J2K_MATCHING_DATASETS = [
# (compressed, reference, fixes)
pytest.param(
J2KR_08_08_3_0_1F_YBR_ICT,
get_testdata_file("US1_UNCR.dcm"),
{},
),
pytest.param(
J2KR_16_10_1_0_1F_M1,
get_testdata_file("RG3_UNCR.dcm"),
{},
),
pytest.param(
J2KR_16_12_1_0_1F_M2,
get_testdata_file("MR2_UNCR.dcm"),
{},
),
pytest.param(
J2KR_16_15_1_0_1F_M1,
get_testdata_file("RG1_UNCR.dcm"),
{},
),
pytest.param(
J2KR_16_16_1_0_10F_M2,
get_testdata_file("emri_small.dcm"),
{'BitsStored': 16},
),
pytest.param(
J2KR_16_14_1_1_1F_M2,
get_testdata_file("693_UNCR.dcm"),
{'BitsStored': 14},
),
pytest.param(
J2KR_16_16_1_1_1F_M2,
get_testdata_file("MR_small.dcm"),
{},
),
pytest.param(
J2KI_08_08_3_0_1F_RGB,
get_testdata_file("SC_rgb_gdcm2k_uncompressed.dcm"),
{},
),
pytest.param(
J2KI_08_08_3_0_1F_YBR_ICT,
get_testdata_file("US1_UNCI.dcm"),
{},
),
pytest.param(
J2KI_16_10_1_0_1F_M1,
get_testdata_file("RG3_UNCI.dcm"),
{},
),
pytest.param(
J2KI_16_12_1_0_1F_M2,
get_testdata_file("MR2_UNCI.dcm"),
{},
),
pytest.param(
J2KI_16_15_1_0_1F_M1,
get_testdata_file("RG1_UNCI.dcm"),
{},
),
pytest.param(
J2KI_16_14_1_1_1F_M2,
get_testdata_file("693_UNCI.dcm"),
{'BitsStored': 16},
),
pytest.param(
J2KI_16_16_1_1_1F_M2,
get_testdata_file("JPEG2000_UNC.dcm"),
{},
),
]
def test_unsupported_syntaxes():
"""Test that UNSUPPORTED_SYNTAXES is as expected."""
for syntax in SUPPORTED_SYNTAXES:
assert syntax not in UNSUPPORTED_SYNTAXES
@pytest.mark.skipif(not HAVE_PYLIBJPEG, reason='pylibjpeg not available')
class TestHandler:
"""Tests for handling Pixel Data with the handler."""
def setup(self):
"""Setup the test datasets and the environment."""
self.original_handlers = pydicom.config.pixel_data_handlers
pydicom.config.pixel_data_handlers = [NP_HANDLER, LJ_HANDLER]
def teardown(self):
"""Restore the environment."""
pydicom.config.pixel_data_handlers = self.original_handlers
def test_environment(self):
"""Check that the testing environment is as expected."""
assert HAVE_NP
assert HAVE_PYLIBJPEG
assert LJ_HANDLER is not None
def test_unsupported_syntax_raises(self):
"""Test pixel_array raises exception for unsupported syntaxes."""
pydicom.config.pixel_data_handlers = [LJ_HANDLER]
ds = dcmread(EXPL)
for uid in UNSUPPORTED_SYNTAXES:
ds.file_meta.TransferSyntaxUID = uid
with pytest.raises((NotImplementedError, RuntimeError)):
ds.pixel_array
@pytest.mark.skipif(
HAVE_LJ and HAVE_OJ and HAVE_RLE, reason="plugins available"
)
def test_no_plugins_raises(self):
"""Test exception raised if required plugin missing."""
ds = dcmread(JPGB_08_08_3_0_1F_YBR_FULL)
msg = (
r"Unable to convert the Pixel Data as the 'pylibjpeg-libjpeg' "
r"plugin is not installed"
)
with pytest.raises(RuntimeError, match=msg):
ds.pixel_array
ds = dcmread(J2KI_08_08_3_0_1F_RGB)
msg = (
r"Unable to convert the Pixel Data as the 'pylibjpeg-openjpeg' "
r"plugin is not installed"
)
with pytest.raises(RuntimeError, match=msg):
ds.pixel_array
ds = dcmread(RLE_8_1_1F)
msg = (
r"Unable to convert the Pixel Data as the 'pylibjpeg-rle' "
r"plugin is not installed"
)
with pytest.raises(RuntimeError, match=msg):
ds.pixel_array
def test_change_photometric_interpretation(self):
"""Test returned value."""
ds = dcmread(J2KR_16_12_1_0_1F_M2)
func = LJ_HANDLER.should_change_PhotometricInterpretation_to_RGB
assert func(ds) is False
@pytest.mark.skipif(not TEST_JPEG, reason="no -libjpeg plugin")
class TestJPEG:
def setup(self):
"""Setup the test datasets and the environment."""
self.original_handlers = pydicom.config.pixel_data_handlers
pydicom.config.pixel_data_handlers = [NP_HANDLER, LJ_HANDLER]
def teardown(self):
"""Restore the environment."""
pydicom.config.pixel_data_handlers = self.original_handlers
@pytest.mark.parametrize('fpath, data', JPG_REFERENCE_DATA)
def test_properties(self, fpath, data):
"""Test dataset and pixel array properties are as expected."""
ds = dcmread(fpath)
assert ds.file_meta.TransferSyntaxUID == data[0]
assert ds.BitsAllocated == data[1]
assert ds.SamplesPerPixel == data[2]
assert ds.PixelRepresentation == data[3]
assert getattr(ds, 'NumberOfFrames', 1) == data[4]
arr = ds.pixel_array
assert arr.flags.writeable
assert data[5] == arr.shape
assert arr.dtype == data[6]
@pytest.mark.parametrize('fpath, rpath, val, tol', JPG_MATCHING_DATASETS)
def test_array(self, fpath, rpath, val, tol):
"""Test pixel_array returns correct values."""
ds = dcmread(fpath)
arr = ds.pixel_array
if 'YBR' in ds.PhotometricInterpretation:
arr = convert_color_space(arr, ds.PhotometricInterpretation, 'RGB')
ref = dcmread(rpath).pixel_array
if val:
assert tuple(arr[5, 50, :]) == val[0]
assert tuple(arr[15, 50, :]) == val[1]
assert tuple(arr[25, 50, :]) == val[2]
assert tuple(arr[35, 50, :]) == val[3]
assert tuple(arr[45, 50, :]) == val[4]
assert tuple(arr[55, 50, :]) == val[5]
assert tuple(arr[65, 50, :]) == val[6]
assert tuple(arr[75, 50, :]) == val[7]
assert tuple(arr[85, 50, :]) == val[8]
assert tuple(arr[95, 50, :]) == val[9]
# All results within `tol` intensity units of the reference
assert np.allclose(arr, ref, atol=tol)
@pytest.mark.parametrize('fpath, rpath, val, tol', JPG_MATCHING_DATASETS)
def test_generate_frames(self, fpath, rpath, val, tol):
"""Test pixel_array returns correct values."""
ds = dcmread(fpath)
frame_generator = generate_frames(ds)
ref = dcmread(rpath).pixel_array
nr_frames = getattr(ds, 'NumberOfFrames', 1)
for ii in range(nr_frames):
arr = next(frame_generator)
if 'YBR' in ds.PhotometricInterpretation:
arr = convert_color_space(
arr, ds.PhotometricInterpretation, 'RGB'
)
if nr_frames > 1:
assert np.allclose(arr, ref[ii, ...], atol=tol)
else:
assert np.allclose(arr, ref, atol=tol)
with pytest.raises(StopIteration):
next(frame_generator)
def test_bad_file_raises(self):
"""Test a bad JPEG file raises an exception."""
ds = dcmread(JPGE_BAD)
msg = (
r"libjpeg error code '-1038' returned from Decode\(\): A "
r"misplaced marker segment was found - scan start must be zero "
r"and scan stop must be 63 for the sequential operating modes"
)
with pytest.raises(RuntimeError, match=msg):
ds.pixel_array
def test_missing_element_raises(self):
"""Test that missing required element raises exception."""
ds = dcmread(JPGB_08_08_3_0_1F_YBR_FULL)
del ds.PixelData
msg = (
r"Unable to convert the pixel data as the following required "
r"elements are missing from the dataset: PixelData"
)
with pytest.raises(AttributeError, match=msg):
ds.pixel_array
@pytest.mark.skipif(not TEST_JPEGLS, reason="no -libjpeg plugin")
class TestJPEGLS:
def setup(self):
"""Setup the test datasets and the environment."""
self.original_handlers = pydicom.config.pixel_data_handlers
pydicom.config.pixel_data_handlers = [NP_HANDLER, LJ_HANDLER]
def teardown(self):
"""Restore the environment."""
pydicom.config.pixel_data_handlers = self.original_handlers
@pytest.mark.parametrize('fpath, data', JLS_REFERENCE_DATA)
def test_properties(self, fpath, data):
"""Test dataset and pixel array properties are as expected."""
ds = dcmread(fpath)
assert ds.file_meta.TransferSyntaxUID == data[0]
assert ds.BitsAllocated == data[1]
assert ds.SamplesPerPixel == data[2]
assert ds.PixelRepresentation == data[3]
assert getattr(ds, 'NumberOfFrames', 1) == data[4]
arr = ds.pixel_array
assert arr.flags.writeable
assert data[5] == arr.shape
assert arr.dtype == data[6]
def test_arrary(self):
"""Test returned array values are OK."""
ds = dcmread(JPEG_LS_LOSSLESS)
arr = ds.pixel_array
# Checked against GDCM
assert (
[170, 193, 191, 373, 1293, 2053, 1879, 1683, 1711] ==
arr[55:65, 35].tolist()
)
@pytest.mark.skipif(not TEST_JPEG2K, reason="no -openjpeg plugin")
class TestJPEG2K:
def setup(self):
"""Setup the test datasets and the environment."""
self.original_handlers = pydicom.config.pixel_data_handlers
pydicom.config.pixel_data_handlers = [NP_HANDLER, LJ_HANDLER]
def teardown(self):
"""Restore the environment."""
pydicom.config.pixel_data_handlers = self.original_handlers
@pytest.mark.parametrize('fpath, data', J2K_REFERENCE_DATA)
def test_properties_as_array(self, fpath, data):
"""Test dataset, pixel_array and as_array() are as expected."""
req_fixes = [
J2KR_16_16_1_0_10F_M2,
J2KR_16_14_1_1_1F_M2,
J2KI_16_14_1_1_1F_M2
]
ds = dcmread(fpath)
assert ds.file_meta.TransferSyntaxUID == data[0]
assert ds.BitsAllocated == data[1]
assert ds.SamplesPerPixel == data[2]
assert ds.PixelRepresentation == data[3]
assert getattr(ds, 'NumberOfFrames', 1) == data[4]
# Check Dataset.pixel_array
if fpath in req_fixes:
with pytest.warns(UserWarning):
arr = ds.pixel_array
else:
arr = ds.pixel_array
assert arr.flags.writeable
assert data[5] == arr.shape
assert arr.dtype == data[6]
# Check handlers as_array() function
if fpath in req_fixes:
with pytest.warns(UserWarning):
arr = as_array(ds)
else:
arr = as_array(ds)
assert arr.flags.writeable
assert data[5] == arr.shape
assert arr.dtype == data[6]
@pytest.mark.parametrize('fpath, rpath, fixes', J2K_MATCHING_DATASETS)
def test_array(self, fpath, rpath, fixes):
"""Test pixel_array returns correct values."""
ds = dcmread(fpath)
if fixes:
with pytest.warns(UserWarning):
arr = ds.pixel_array
else:
arr = ds.pixel_array
ref = dcmread(rpath).pixel_array
assert np.array_equal(arr, ref)
@pytest.mark.parametrize('fpath, rpath, fixes', J2K_MATCHING_DATASETS)
def test_generate_frames(self, fpath, rpath, fixes):
"""Test pixel_array returns correct values."""
ds = dcmread(fpath)
frame_generator = generate_frames(ds)
ref = dcmread(rpath).pixel_array
nr_frames = getattr(ds, 'NumberOfFrames', 1)
for ii in range(nr_frames):
if fixes:
with pytest.warns(UserWarning):
arr = next(frame_generator)
else:
arr = next(frame_generator)
if nr_frames > 1:
assert np.array_equal(arr, ref[ii, ...])
else:
assert np.array_equal(arr, ref)
with pytest.raises(StopIteration):
next(frame_generator)
def test_warnings(self):
"""Test the plugin warnings work."""
# Bits Stored
ds = dcmread(J2KR_16_14_1_1_1F_M2)
msg = (
r"The \(0028,0101\) Bits Stored value '16' in the dataset does "
r"not match the component precision value '14' found in the JPEG "
r"2000 data. It's recommended that you change the Bits Stored "
r"value to produce the correct output"
)
with pytest.warns(UserWarning, match=msg):
ds.pixel_array
# Pixel Representation
ds.BitsStored = 14
ds.PixelRepresentation = 0
msg = (
r"The \(0028,0103\) Pixel Representation value '0' \(unsigned\) "
r"in the dataset does not match the format of the values found in "
r"the JPEG 2000 data 'signed'"
)
with pytest.warns(UserWarning, match=msg):
ds.pixel_array
# Samples per Pixel
ds.PixelRepresentation = 0
ds.SamplesPerPixel = 3
msg = (
r"The \(0028,0002\) Samples per Pixel value '3' in the dataset "
r"does not match the number of components '1' found in the JPEG "
r"2000 data. It's recommended that you change the Samples per "
r"Pixel value to produce the correct output"
)
with pytest.warns(UserWarning, match=msg):
with pytest.raises(ValueError):
ds.pixel_array
# JP2 header
ds = dcmread(J2KR_08_08_3_0_1F_YBR_RCT)
msg = (
r"The \(7FE0,0010\) Pixel Data contains a JPEG 2000 codestream "
r"with the optional JP2 file format header, which is "
r"non-conformant to the DICOM Standard \(Part 5, Annex A.4.4\)"
)
with pytest.warns(UserWarning, match=msg):
ds.pixel_array
def test_decompress_using_pylibjpeg(self):
"""Test decompressing JPEG2K with pylibjpeg handler succeeds."""
ds = dcmread(J2KR_16_12_1_0_1F_M2)
ds.decompress(handler_name='pylibjpeg')
arr = ds.pixel_array
ds = dcmread(get_testdata_file("MR2_J2KR.dcm"))
ref = ds.pixel_array
assert np.array_equal(arr, ref)
def test_pixel_rep_mismatch(self):
"""Test mismatched j2k sign and Pixel Representation."""
ds = dcmread(J2KR_16_13_1_1_1F_M2_MISMATCH)
assert 1 == ds.PixelRepresentation
assert 13 == ds.BitsStored
bs = defragment_data(ds.PixelData)
params = get_j2k_parameters(bs)
assert 13 == params["precision"]
assert not params["is_signed"]
msg = r"value '1' \(signed\)"
with pytest.warns(UserWarning, match=msg):
arr = ds.pixel_array
assert 'int16' == arr.dtype
assert (512, 512) == arr.shape
assert arr.flags.writeable
assert -2000 == arr[0, 0]
assert [621, 412, 138, -193, -520, -767, -907, -966, -988, -995] == (
arr[47:57, 279].tolist()
)
assert [-377, -121, 141, 383, 633, 910, 1198, 1455, 1638, 1732] == (
arr[328:338, 106].tolist()
)
RLE_REFERENCE_DATA = [
# fpath, (bits, nr samples, pixel repr, nr frames, shape, dtype)
(RLE_8_1_1F, (8, 1, 0, 1, (600, 800), 'uint8')),
(RLE_8_1_2F, (8, 1, 0, 2, (2, 600, 800), 'uint8')),
(RLE_8_3_1F, (8, 3, 0, 1, (100, 100, 3), 'uint8')),
(RLE_8_3_2F, (8, 3, 0, 2, (2, 100, 100, 3), 'uint8')),
(RLE_16_1_1F, (16, 1, 1, 1, (64, 64), 'int16')),
(RLE_16_1_10F, (16, 1, 0, 10, (10, 64, 64), 'uint16')),
(RLE_16_3_1F, (16, 3, 0, 1, (100, 100, 3), 'uint16')),
(RLE_16_3_2F, (16, 3, 0, 2, (2, 100, 100, 3), 'uint16')),
(RLE_32_1_1F, (32, 1, 0, 1, (10, 10), 'uint32')),
(RLE_32_1_15F, (32, 1, 0, 15, (15, 10, 10), 'uint32')),
(RLE_32_3_1F, (32, 3, 0, 1, (100, 100, 3), 'uint32')),
(RLE_32_3_2F, (32, 3, 0, 2, (2, 100, 100, 3), 'uint32')),
]
RLE_MATCHING_DATASETS = [
# (compressed, reference)
pytest.param(RLE_8_1_1F, get_testdata_file("OBXXXX1A.dcm")),
pytest.param(RLE_8_1_2F, get_testdata_file("OBXXXX1A_2frame.dcm")),
pytest.param(RLE_8_3_1F, get_testdata_file("SC_rgb.dcm")),
pytest.param(RLE_8_3_2F, get_testdata_file("SC_rgb_2frame.dcm")),
pytest.param(RLE_16_1_1F, get_testdata_file("MR_small.dcm")),
pytest.param(RLE_16_1_10F, get_testdata_file("emri_small.dcm")),
pytest.param(RLE_16_3_1F, get_testdata_file("SC_rgb_16bit.dcm")),
pytest.param(RLE_16_3_2F, get_testdata_file("SC_rgb_16bit_2frame.dcm")),
pytest.param(RLE_32_1_1F, get_testdata_file("rtdose_1frame.dcm")),
pytest.param(RLE_32_1_15F, get_testdata_file("rtdose.dcm")),
pytest.param(RLE_32_3_1F, get_testdata_file("SC_rgb_32bit.dcm")),
pytest.param(RLE_32_3_2F, get_testdata_file("SC_rgb_32bit_2frame.dcm")),
]
@pytest.mark.skipif(not TEST_RLE, reason="no -rle plugin")
class TestRLE:
def test_decompress_using_pylibjpeg(self):
"""Test decompressing RLE with pylibjpeg handler succeeds."""
ds = dcmread(RLE_8_3_1F)
ds.decompress(handler_name='pylibjpeg')
arr = ds.pixel_array
ds = dcmread(get_testdata_file("SC_rgb.dcm"))
ref = ds.pixel_array
assert np.array_equal(arr, ref)
@pytest.mark.parametrize('fpath, data', RLE_REFERENCE_DATA)
def test_properties_as_array(self, fpath, data):
"""Test dataset, pixel_array and as_array() are as expected."""
ds = dcmread(fpath)
assert RLELossless == ds.file_meta.TransferSyntaxUID
assert ds.BitsAllocated == data[0]
assert ds.SamplesPerPixel == data[1]
assert ds.PixelRepresentation == data[2]
assert getattr(ds, 'NumberOfFrames', 1) == data[3]
# Note: decompress modifies the dataset inplace
ds.decompress("pylibjpeg")
# Check Dataset.pixel_array
arr = ds.pixel_array
assert arr.flags.writeable
assert data[4] == arr.shape
assert arr.dtype == data[5]
# Check handler's as_array() function
ds = dcmread(fpath)
arr = as_array(ds)
assert arr.flags.writeable
assert data[4] == arr.shape
assert arr.dtype == data[5]
@pytest.mark.parametrize('fpath, rpath', RLE_MATCHING_DATASETS)
def test_array(self, fpath, rpath):
"""Test pixel_array returns correct values."""
ds = dcmread(fpath)
ds.decompress("pylibjpeg")
arr = ds.pixel_array
ref = dcmread(rpath).pixel_array
assert np.array_equal(arr, ref)
@pytest.mark.parametrize('fpath, rpath', RLE_MATCHING_DATASETS)
def test_generate_frames(self, fpath, rpath):
"""Test pixel_array returns correct values."""
ds = dcmread(fpath)
frame_generator = generate_frames(ds)
ref = dcmread(rpath).pixel_array
nr_frames = getattr(ds, 'NumberOfFrames', 1)
for ii in range(nr_frames):
arr = next(frame_generator)
if nr_frames > 1:
assert np.array_equal(arr, ref[ii, ...])
else:
assert np.array_equal(arr, ref)
with pytest.raises(StopIteration):
next(frame_generator)
|
[
"noreply@github.com"
] |
ARteapartedelarte.noreply@github.com
|
e40704c97d19358dd5216ad35e1b047a08393b37
|
6f0b9856f3c46557af1c58d86b16f1f93d0fe95f
|
/mcdonald/Python-dsstore-master/stdin.py
|
c8eea1e732e1fd55fc6faccdd2a8399f2b689436
|
[
"MIT"
] |
permissive
|
vito-lbs/35c3ctf-junior
|
1acae908cbd16a6867ffc694de8d9d720319723f
|
39f956714fd61f571a9de07078d062298b89936e
|
refs/heads/master
| 2020-04-13T20:09:00.270495
| 2018-12-30T12:49:52
| 2018-12-30T12:49:52
| 163,422,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
import dsstore
import os
import sys
if __name__ == "__main__":
d = dsstore.DS_Store(sys.stdin.read(), debug=False)
files = d.traverse_root()
print("Count: ", len(files))
for f in files:
print(f)
|
[
"vito@legitbs.net"
] |
vito@legitbs.net
|
91ca1eae352c62b73172ecfd062b21b351e121ca
|
dbc2cd83ba5440262ce4ed20081e6d0ee995f243
|
/problem/problem/urls.py
|
1654a2a2258cdabccf0d91bfca7097b32ffd27df
|
[] |
no_license
|
templargin/datepicker
|
64ef313d0a20543c251c1b909dcb7584aaf0bbd4
|
335662bc283880a5a96023b82d0298f9ecae636e
|
refs/heads/master
| 2022-12-05T10:36:19.850909
| 2020-08-30T05:19:09
| 2020-08-30T05:19:09
| 291,404,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 800
|
py
|
"""problem URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('datepicker.urls')),
]
|
[
"templargin@gmail.com"
] |
templargin@gmail.com
|
97e0d5144b9a6ae3c4194a613b241057f387c463
|
d97b7d689c39f73d38e35ad4431262a4b988c61f
|
/Python/python_new/sec07.py
|
64a6973dd5602fcaef05eec74cd7d9609325ea07
|
[] |
no_license
|
jiye-stingray/Python
|
58fefe46b100cbb3e8212f7de36a14cf4c27ee71
|
51c81230cedc64f6d63e43b1b7888db36a37c7db
|
refs/heads/main
| 2023-07-01T05:07:35.821903
| 2021-07-30T00:29:19
| 2021-07-30T00:29:19
| 389,515,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
# 클래스
# 선언
# class 클래스 명: (클래스명 첫 글자: 대문자, ex)Stu, StuInfo
# 함수~~
# class UserInfo:
# def __init__(self, name, hp, add):
# self.name = name
# print('Name:',self.name)
# user1 = UserInfo('Kim')
# user2 = UserInfo('Park')
# print(id(user1))
# print(id(user2))
# print('user1:',user1.__dict__)
# print('user2:',user2.__dict__)
class Student:
name = 'student'
age = 0
def __init__(self,name, age) -> None:
print('객체 초기화')
self.name = name
self.age = age
def __del__(self):
print('객체 삭제')
def info(self):
print('My name is', self.name)
print('I am ',self.age,'years old' )
s = Student('JaeHyun',22)
s.info()
del s
print(type(s))
class Student1:
def __init__(self, name, age) -> None:
self.university = 'SNU'
self.name = name
self.age = age
self.isStudying = True
self.studyHour = 0
def study(self):
if self.isStudying:
self.studyHour += 1
def hourofstudy(self):
print('{}현재 공부 시간: {}시간'.format(self.name, self.studyHour))
|
[
"noreply@github.com"
] |
jiye-stingray.noreply@github.com
|
fb92ee596bbf8cf147d7b2a163c82507d79b8efa
|
a8dd2eba427c91945a15511dfb507efc5ef204b3
|
/odin/utils/scipy_interface.py
|
d61579b99df1ec941c4a5fd22cff09e1ad09f155
|
[
"MIT"
] |
permissive
|
sdi1100041/SLEIPNIR
|
3ace6e15731b357148710fb40b02ce9a2121d0f0
|
02dd3eca8574899fd3f0e287b1a050e76e5ba0de
|
refs/heads/master
| 2021-02-10T10:12:32.481825
| 2020-03-06T15:54:00
| 2020-03-06T15:54:00
| 244,372,898
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,088
|
py
|
#Some minor adjustments to the original TensorFlow code
#for better logging
#
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow interface for third-party optimizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.core.protobuf.config_pb2 import *
from tensorflow.python.profiler.model_analyzer import Profiler
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
logging.set_verbosity(logging.DEBUG)
__all__ = ['ExternalOptimizerInterface', 'ScipyOptimizerInterface']
class ExternalOptimizerInterface(object):
"""Base class for interfaces with external optimization algorithms.
Subclass this and implement `_minimize` in order to wrap a new optimization
algorithm.
`ExternalOptimizerInterface` should not be instantiated directly; instead use
e.g. `ScipyOptimizerInterface`.
@@__init__
@@minimize
"""
def __init__(self,
loss,
var_list=None,
equalities=None,
inequalities=None,
var_to_bounds=None,
file_writer=None,
dir_prof_name=None,
**optimizer_kwargs):
"""Initialize a new interface instance.
Args:
loss: A scalar `Tensor` to be minimized.
var_list: Optional `list` of `Variable` objects to update to minimize
`loss`. Defaults to the list of variables collected in the graph
under the key `GraphKeys.TRAINABLE_VARIABLES`.
equalities: Optional `list` of equality constraint scalar `Tensor`s to be
held equal to zero.
inequalities: Optional `list` of inequality constraint scalar `Tensor`s
to be held nonnegative.
var_to_bounds: Optional `dict` where each key is an optimization
`Variable` and each corresponding value is a length-2 tuple of
`(low, high)` bounds. Although enforcing this kind of simple constraint
could be accomplished with the `inequalities` arg, not all optimization
algorithms support general inequality constraints, e.g. L-BFGS-B. Both
`low` and `high` can either be numbers or anything convertible to a
NumPy array that can be broadcast to the shape of `var` (using
`np.broadcast_to`). To indicate that there is no bound, use `None` (or
`+/- np.infty`). For example, if `var` is a 2x3 matrix, then any of
the following corresponding `bounds` could be supplied:
* `(0, np.infty)`: Each element of `var` held positive.
* `(-np.infty, [1, 2])`: First column less than 1, second column less
than 2.
* `(-np.infty, [[1], [2], [3]])`: First row less than 1, second row less
than 2, etc.
* `(-np.infty, [[1, 2, 3], [4, 5, 6]])`: Entry `var[0, 0]` less than 1,
`var[0, 1]` less than 2, etc.
**optimizer_kwargs: Other subclass-specific keyword arguments.
"""
self._file_writer=file_writer
self.dir_name=dir_prof_name
self._loss = loss
self._equalities = equalities or []
self._inequalities = inequalities or []
if var_list is None:
self._vars = variables.trainable_variables()
else:
self._vars = list(var_list)
packed_bounds = None
if var_to_bounds is not None:
left_packed_bounds = []
right_packed_bounds = []
for var in self._vars:
shape = var.get_shape().as_list()
bounds = (-np.infty, np.infty)
if var in var_to_bounds:
bounds = var_to_bounds[var]
left_packed_bounds.extend(list(np.broadcast_to(bounds[0], shape).flat))
right_packed_bounds.extend(list(np.broadcast_to(bounds[1], shape).flat))
packed_bounds = list(zip(left_packed_bounds, right_packed_bounds))
self._packed_bounds = packed_bounds
self._update_placeholders = [
array_ops.placeholder(var.dtype) for var in self._vars
]
self._var_updates = [
var.assign(array_ops.reshape(placeholder, _get_shape_tuple(var)))
for var, placeholder in zip(self._vars, self._update_placeholders)
]
loss_grads = _compute_gradients(loss, self._vars)
equalities_grads = [
_compute_gradients(equality, self._vars)
for equality in self._equalities
]
inequalities_grads = [
_compute_gradients(inequality, self._vars)
for inequality in self._inequalities
]
self.optimizer_kwargs = optimizer_kwargs
self._packed_var = self._pack(self._vars)
self._packed_loss_grad = self._pack(loss_grads)
self._packed_equality_grads = [
self._pack(equality_grads) for equality_grads in equalities_grads
]
self._packed_inequality_grads = [
self._pack(inequality_grads) for inequality_grads in inequalities_grads
]
dims = [_prod(_get_shape_tuple(var)) for var in self._vars]
accumulated_dims = list(_accumulate(dims))
self._packing_slices = [
slice(start, end)
for start, end in zip(accumulated_dims[:-1], accumulated_dims[1:])
]
def minimize(self,
session=None,
feed_dict=None,
fetches=None,
step_callback=None,
loss_callback=None,
**run_kwargs):
"""Minimize a scalar `Tensor`.
Variables subject to optimization are updated in-place at the end of
optimization.
Note that this method does *not* just return a minimization `Op`, unlike
`Optimizer.minimize()`; instead it actually performs minimization by
executing commands to control a `Session`.
Args:
session: A `Session` instance.
feed_dict: A feed dict to be passed to calls to `session.run`.
fetches: A list of `Tensor`s to fetch and supply to `loss_callback`
as positional arguments.
step_callback: A function to be called at each optimization step;
arguments are the current values of all optimization variables
flattened into a single vector.
loss_callback: A function to be called every time the loss and gradients
are computed, with evaluated fetches supplied as positional arguments.
**run_kwargs: kwargs to pass to `session.run`.
"""
session = session or ops.get_default_session()
feed_dict = feed_dict or {}
fetches = fetches or []
loss_callback = loss_callback or (lambda *fetches: None)
step_callback = step_callback or (lambda xk: None)
# Construct loss function and associated gradient.
loss_grad_func = self._make_eval_func([self._loss,
self._packed_loss_grad], session,
feed_dict, fetches, loss_callback)
# Construct equality constraint functions and associated gradients.
equality_funcs = self._make_eval_funcs(self._equalities, session, feed_dict,
fetches)
equality_grad_funcs = self._make_eval_funcs(self._packed_equality_grads,
session, feed_dict, fetches)
# Construct inequality constraint functions and associated gradients.
inequality_funcs = self._make_eval_funcs(self._inequalities, session,
feed_dict, fetches)
inequality_grad_funcs = self._make_eval_funcs(self._packed_inequality_grads,
session, feed_dict, fetches)
# Get initial value from TF session.
initial_packed_var_val = session.run(self._packed_var)
# Perform minimization.
packed_var_val = self._minimize(
initial_val=initial_packed_var_val,
loss_grad_func=loss_grad_func,
equality_funcs=equality_funcs,
equality_grad_funcs=equality_grad_funcs,
inequality_funcs=inequality_funcs,
inequality_grad_funcs=inequality_grad_funcs,
packed_bounds=self._packed_bounds,
step_callback=step_callback,
optimizer_kwargs=self.optimizer_kwargs)
res=packed_var_val[1:3]
packed_var_val=packed_var_val[0]
var_vals = [
packed_var_val[packing_slice] for packing_slice in self._packing_slices
]
# Set optimization variables to their new values.
session.run(
self._var_updates,
feed_dict=dict(zip(self._update_placeholders, var_vals)),
**run_kwargs)
return res
def _minimize(self, initial_val, loss_grad_func, equality_funcs,
equality_grad_funcs, inequality_funcs, inequality_grad_funcs,
packed_bounds, step_callback, optimizer_kwargs):
"""Wrapper for a particular optimization algorithm implementation.
It would be appropriate for a subclass implementation of this method to
raise `NotImplementedError` if unsupported arguments are passed: e.g. if an
algorithm does not support constraints but `len(equality_funcs) > 0`.
Args:
initial_val: A NumPy vector of initial values.
loss_grad_func: A function accepting a NumPy packed variable vector and
returning two outputs, a loss value and the gradient of that loss with
respect to the packed variable vector.
equality_funcs: A list of functions each of which specifies a scalar
quantity that an optimizer should hold exactly zero.
equality_grad_funcs: A list of gradients of equality_funcs.
inequality_funcs: A list of functions each of which specifies a scalar
quantity that an optimizer should hold >= 0.
inequality_grad_funcs: A list of gradients of inequality_funcs.
packed_bounds: A list of bounds for each index, or `None`.
step_callback: A callback function to execute at each optimization step,
supplied with the current value of the packed variable vector.
optimizer_kwargs: Other key-value arguments available to the optimizer.
Returns:
The optimal variable vector as a NumPy vector.
"""
raise NotImplementedError(
'To use ExternalOptimizerInterface, subclass from it and implement '
'the _minimize() method.')
@classmethod
def _pack(cls, tensors):
"""Pack a list of `Tensor`s into a single, flattened, rank-1 `Tensor`."""
if not tensors:
return None
elif len(tensors) == 1:
return array_ops.reshape(tensors[0], [-1])
else:
flattened = [array_ops.reshape(tensor, [-1]) for tensor in tensors]
return array_ops.concat(flattened, 0)
def _make_eval_func(self, tensors, session, feed_dict, fetches,
callback=None):
"""Construct a function that evaluates a `Tensor` or list of `Tensor`s."""
if not isinstance(tensors, list):
tensors = [tensors]
num_tensors = len(tensors)
run_options = RunOptions(trace_level=RunOptions.FULL_TRACE)
run_metadata = RunMetadata()
if self.dir_name:
if not gfile.Exists(self.dir_name):
gfile.MakeDirs(self.dir_name)
def eval_func(x):
"""Function to evaluate a `Tensor`."""
eval_func.step+=1
augmented_feed_dict = {
var: x[packing_slice].reshape(_get_shape_tuple(var))
for var, packing_slice in zip(self._vars, self._packing_slices)
}
augmented_feed_dict.update(feed_dict)
augmented_fetches = tensors + fetches
if (eval_func.step % 10 == 0) and (self._file_writer or self.dir_name):
augmented_fetch_vals = session.run(augmented_fetches, feed_dict=augmented_feed_dict,options=run_options, run_metadata=run_metadata)
if self.dir_name:
profiler = Profiler()
profiler.add_step(0, run_metadata)
filename = os.path.join(compat.as_bytes(self.dir_name), compat.as_bytes('profile_%d' % eval_func.step))
with gfile.Open(filename, 'wb') as f:
f.write(profiler.serialize_to_string())
if self._file_writer:
self._file_writer.add_run_metadata(run_metadata, 'step%05d' % eval_func.step)
else:
augmented_fetch_vals = session.run(augmented_fetches, feed_dict=augmented_feed_dict)
if callable(callback):
callback(*augmented_fetch_vals[num_tensors:])
return augmented_fetch_vals[:num_tensors]
eval_func.step=-1
return eval_func
def _make_eval_funcs(self,
tensors,
session,
feed_dict,
fetches,
callback=None):
return [
self._make_eval_func(tensor, session, feed_dict, fetches, callback)
for tensor in tensors
]
class ScipyOptimizerInterface(ExternalOptimizerInterface):
"""Wrapper allowing `scipy.optimize.minimize` to operate a `tf.Session`.
Example:
```python
vector = tf.Variable([7., 7.], 'vector')
# Make vector norm as small as possible.
loss = tf.reduce_sum(tf.square(vector))
optimizer = ScipyOptimizerInterface(loss, options={'maxiter': 100})
with tf.Session() as session:
optimizer.minimize(session)
# The value of vector should now be [0., 0.].
```
Example with simple bound constraints:
```python
vector = tf.Variable([7., 7.], 'vector')
# Make vector norm as small as possible.
loss = tf.reduce_sum(tf.square(vector))
optimizer = ScipyOptimizerInterface(
loss, var_to_bounds={vector: ([1, 2], np.infty)})
with tf.Session() as session:
optimizer.minimize(session)
# The value of vector should now be [1., 2.].
```
Example with more complicated constraints:
```python
vector = tf.Variable([7., 7.], 'vector')
# Make vector norm as small as possible.
loss = tf.reduce_sum(tf.square(vector))
# Ensure the vector's y component is = 1.
equalities = [vector[1] - 1.]
# Ensure the vector's x component is >= 1.
inequalities = [vector[0] - 1.]
# Our default SciPy optimization algorithm, L-BFGS-B, does not support
# general constraints. Thus we use SLSQP instead.
optimizer = ScipyOptimizerInterface(
loss, equalities=equalities, inequalities=inequalities, method='SLSQP')
with tf.Session() as session:
optimizer.minimize(session)
# The value of vector should now be [1., 1.].
```
"""
_DEFAULT_METHOD = 'L-BFGS-B'
def _minimize(self, initial_val, loss_grad_func, equality_funcs,
equality_grad_funcs, inequality_funcs, inequality_grad_funcs,
packed_bounds, step_callback, optimizer_kwargs):
def loss_grad_func_wrapper(x):
# SciPy's L-BFGS-B Fortran implementation requires gradients as doubles.
loss, gradient = loss_grad_func(x)
return loss, gradient.astype('float64')
optimizer_kwargs = dict(optimizer_kwargs.items())
method = optimizer_kwargs.pop('method', self._DEFAULT_METHOD)
constraints = []
for func, grad_func in zip(equality_funcs, equality_grad_funcs):
constraints.append({'type': 'eq', 'fun': func, 'jac': grad_func})
for func, grad_func in zip(inequality_funcs, inequality_grad_funcs):
constraints.append({'type': 'ineq', 'fun': func, 'jac': grad_func})
minimize_args = [loss_grad_func_wrapper, initial_val]
minimize_kwargs = {
'jac': True,
'callback': step_callback,
'method': method,
'constraints': constraints,
'bounds': packed_bounds,
}
for kwarg in minimize_kwargs:
if kwarg in optimizer_kwargs:
if kwarg == 'bounds':
# Special handling for 'bounds' kwarg since ability to specify bounds
# was added after this module was already publicly released.
raise ValueError(
'Bounds must be set using the var_to_bounds argument')
raise ValueError(
'Optimizer keyword arg \'{}\' is set '
'automatically and cannot be injected manually'.format(kwarg))
minimize_kwargs.update(optimizer_kwargs)
import scipy.optimize # pylint: disable=g-import-not-at-top
result = scipy.optimize.minimize(*minimize_args, **minimize_kwargs)
message_lines = [
'Optimization terminated with:',
' Message: %s',
' Objective function value: %f',
]
message_args = [result.message, result.fun]
if hasattr(result, 'nit'):
# Some optimization methods might not provide information such as nit and
# nfev in the return. Logs only available information.
message_lines.append(' Number of iterations: %d')
message_args.append(result.nit)
if hasattr(result, 'nfev'):
message_lines.append(' Number of functions evaluations: %d')
message_args.append(result.nfev)
logging.info('\n'.join(message_lines), *message_args)
return [result['x'],result.success,result.nit]
def _accumulate(list_):
total = 0
yield total
for x in list_:
total += x
yield total
def _get_shape_tuple(tensor):
return tuple(dim.value for dim in tensor.get_shape())
def _prod(array):
prod = 1
for value in array:
prod *= value
return prod
def _compute_gradients(tensor, var_list):
grads = gradients.gradients(tensor, var_list)
# tf.gradients sometimes returns `None` when it should return 0.
return [
grad if grad is not None else array_ops.zeros_like(var)
for var, grad in zip(var_list, grads)
]
|
[
"manosangelis@gmail.com"
] |
manosangelis@gmail.com
|
6ee2d4096e128491473231d9eeb153b9099cd13b
|
95b747c2ae2f95b5696c287db2d14087669fa3ca
|
/billie_pr/billie_pr/asgi.py
|
2f772846f4ef8afbdb2e43f808d1b6df9fc1d213
|
[] |
no_license
|
victorsierraram/bille_vsr
|
3bf6c43da2b260d3da382ed98bde5c9dc27d7599
|
0216ad43fbce489ae1ae72f2cd4f7ac5f0d8fd08
|
refs/heads/master
| 2022-12-28T23:56:57.486747
| 2020-09-25T15:53:41
| 2020-09-25T15:53:41
| 298,615,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
ASGI config for billie_pr project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'billie_pr.settings')
application = get_asgi_application()
|
[
"v.sierra@i2tic.com"
] |
v.sierra@i2tic.com
|
57dd010b386b4608b2bd455946413d12040e1b8f
|
ae4b91e269df7c80a0fa75ccc054d2241de82461
|
/adder/adder.py
|
d685505a984ce254d2335f905e4bb083879dbb1d
|
[] |
no_license
|
ydnatag/sifive-bsas-hdl-python
|
be48b32d1080f5b77fec7429b4f8ef86c8e02d41
|
3763300510eaa496b5d24fff4846494d3446a901
|
refs/heads/master
| 2023-02-24T14:51:19.310738
| 2019-11-21T17:05:16
| 2019-11-21T17:05:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,755
|
py
|
from nmigen import *
from nmigen.cli import main
from nmigen.hdl.rec import Direction
class AxiStream(Record):
def __init__(self, width, direction=None, name=None, fields=None):
self.width = width
self.DATA_FIELDS = [('TDATA', width)]
if direction == 'sink':
layout = [('TDATA', width, Direction.FANIN),
('TVALID', 1, Direction.FANIN),
('TREADY', 1, Direction.FANOUT),
('TLAST', 1, Direction.FANIN)]
elif direction == 'source':
layout = [('TDATA', width, Direction.FANOUT),
('TVALID', 1, Direction.FANOUT),
('TREADY', 1, Direction.FANIN),
('TLAST', 1, Direction.FANOUT)]
Record.__init__(self, layout, name=name, fields=fields)
self.data = self.TDATA
self.valid = self.TVALID
self.ready = self.TREADY
self.last = self.TLAST
def accepted(self):
return (self.TVALID == 1) & (self.TREADY == 1)
class Adder(Elaboratable):
def __init__(self, width, domain='comb', interface=None):
self.width = width
self.interface = interface
if self.interface == None:
self.a = Signal(width)
self.b = Signal(width)
self.r = Signal(width + 1)
self.d = domain
else:
self.a = self.interface(width, 'sink', name='a')
self.b = self.interface(width, 'sink', name='b')
self.r = self.interface(width + 1, 'source', name='r')
def elaborate(self, platform):
m = Module()
if self.interface == None:
m.domain[self.d] += self.r.eq(self.a + self.b)
else:
comb = m.domain.comb
sync = m.domain.sync
comb += self.a.ready.eq(0)
comb += self.b.ready.eq(0)
output_available = (self.r.valid == 0) | self.r.accepted()
input_ready = (self.a.valid == 1) & (self.b.valid == 1) & output_available
comb += self.a.ready.eq(input_ready)
comb += self.b.ready.eq(input_ready)
with m.If(self.a.accepted() | self.b.accepted()):
sync += self.r.data.eq(self.a.data + self.b.data)
sync += self.r.valid.eq(1)
sync += self.r.last.eq(self.a.last | self.b.last)
with m.Elif(self.r.accepted()):
sync += self.r.data.eq(0)
sync += self.r.valid.eq(0)
sync += self.r.last.eq(0)
return m
if __name__ == '__main__':
m = Adder(10, 'sync', AxiStream)
ports = []
for i in [m.a, m.b, m.r]:
ports += [i[f] for f in i.fields]
main(m, platform=None, ports=ports)
|
[
"andresdemski@gmail.com"
] |
andresdemski@gmail.com
|
7e40dfecbe5369523eaad88b5eb6aa6a9cc5f77a
|
42bb1d9bcd38f30a37e3adb52da68dc0ec868535
|
/clients/python_client/scikitlearn_iris_client.py
|
d77f1c361d48e059d4b62a453fb365528246cedd
|
[
"Apache-2.0"
] |
permissive
|
zhaoyingjun/simple_tensorflow_serving
|
828a7b76dd3ab39ca1dedbf1c5a4d8d7c4de1a86
|
8e6a2ac22a323905b2d3e05b6207ce43c6806470
|
refs/heads/master
| 2020-08-21T17:29:38.702132
| 2019-09-24T08:12:59
| 2019-09-24T08:12:59
| 216,208,646
| 0
| 1
|
Apache-2.0
| 2019-10-19T13:12:23
| 2019-10-19T13:12:23
| null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
#!/usr/bin/env python
import requests
def main():
endpoint = "http://127.0.0.1:8500"
input_data = {
"model_name": "default",
"model_version": 1,
"data": [[1.0, 2.0, 3.0, 4.0]]
}
result = requests.post(endpoint, json=input_data)
print(result.text)
input_data = {
"preprocess": True,
"postprocess": True,
"data": [[1.0, 2.0, 3.0, 4.0]]
}
result = requests.post(endpoint, json=input_data)
print(result.text)
if __name__ == "__main__":
main()
|
[
"tobeg3oogle@gmail.com"
] |
tobeg3oogle@gmail.com
|
5cae62a723fc75db2acb2a368ef8e647d0ff7cda
|
8a92ed71ab5efc18c7221e7aef1b306a6d2657c8
|
/General/List_data_type.py
|
2a01ce6502e40c0d26d344bb6973cc07d6f9159e
|
[] |
no_license
|
ksreddy1980/test
|
a163cff96ec535f0da46d6cd7d751b08ccb37a07
|
0ef300ec830e0eee9f68149d178b4ce25a6766ee
|
refs/heads/master
| 2021-03-24T09:43:59.006443
| 2018-01-10T16:23:18
| 2018-01-10T16:23:18
| 116,905,860
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
# Define a list
task= ['brush','bath','office','drinks']
print(task)
# Update a list
task[1]='Dryclean'
print(task)
#Print the length of the list
print(len(task))
#print an element of the list
print(task[2])
#print a part of the list
print(task[0:2])
#Concatination of list
list1=['computers','maths','Python','Hadoop']
print(task+list1)
#Print the list multiple times
print(task*3 +list1)
|
[
"kskoteru@gmail.com"
] |
kskoteru@gmail.com
|
d87a4bf8c96e7786d39a38cdb4b518ff441e65a9
|
1528e85557dff16ea148d97c40c7667e1c3ec574
|
/hackinstring.py
|
47e9e42e0d387ed1d1562d02437a8eb40fe3d149
|
[] |
no_license
|
aashishksingh/HackerRankSoln
|
6c2871e84046a900c36ab45f84ddef1a22af4c23
|
924381d02e9d62ea970d7f7e39f97f5834df600a
|
refs/heads/master
| 2021-01-21T14:28:54.473289
| 2017-09-28T13:42:01
| 2017-09-28T13:42:01
| 95,291,343
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
#!/bin/python3
import sys
q = int(input().strip())
for a0 in range(q):
s = input().strip()
lst=list(s)
# your code goes here
word='hackerrank'
d=[]
flag=False
for c in word:
d.append([c,-1])
# print(d)
#print(s.index('a'))
j=-1
for i in range(len(d)):
if d[i][0] in lst[j+1:]:
d[i][1]=lst[j+1:].index(d[i][0])+j+1
j=d[i][1]
print(d)
for i in range(len(d)-1):
if (d[i][1]>=d[i+1][1]) and (d[i][1]>0):
flag=True
if not (flag):
print("YES")
else:
print("NO")
|
[
"noreply@github.com"
] |
aashishksingh.noreply@github.com
|
2f94c0237d53466948e828695d5b8374dbf238b1
|
182979d15a51397b1e0967b6be9271f4db9b9d18
|
/Week 2/Searching and Sorting/Square Root.py
|
037ffd7cd9014c8a59315b1708bf495154097664
|
[] |
no_license
|
Harini-Pavithra/GFG-11-Week-DSA-Workshop
|
1d85115ad3985834456bcf5dc2e424ab65bc8e01
|
f305a3a23fd37080e68d0f4d4eeedbcedf399fc9
|
refs/heads/main
| 2023-06-24T01:19:32.410996
| 2021-07-22T16:36:19
| 2021-07-22T16:36:19
| 317,279,085
| 18
| 10
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,065
|
py
|
Square root
Given an integer x, find the square root of x. If x is not a perfect square, then return floor(√x).
Example 1:
Input:
x = 5
Output: 2
Explanation: Since, 5 is not a perfect
square, floor of square_root of 5 is 2.
Example 2:
Input:
x = 4
Output: 2
Explanation: Since, 4 is a perfect
square, so its square root is 2.
Your Task:
You don't need to read input or print anything. The task is to complete the function floorSqrt() which takes x as the input parameter and return its square root.
Expected Time Complexity: O(log N)
Expected Auxiliary Space: O(1)
Constraints:
1 ≤ x ≤ 107
Solution:
#User function Template for python3
#Complete this function
def floorSqrt(x):
#Your code here
return int(math.sqrt(x))
#{
# Driver Code Starts
#Initial Template for Python 3
import math
def main():
T=int(input())
while(T>0):
x=int(input())
print(floorSqrt(x))
T-=1
if __name__ == "__main__":
main()
# } Driver Code Ends
|
[
"noreply@github.com"
] |
Harini-Pavithra.noreply@github.com
|
71df3b3b8eb6a2a73b6bcb40342bd58bc13029c7
|
0dcb8d16ca9a71197c5770b734699b0d6d063d25
|
/app/views/main.py
|
2f2803bd3a4ec4fb26e2d5d01c0e4e56e4fbda12
|
[] |
no_license
|
jingmeiliu/flask_restful_autodoc
|
ded6e87cc491cc483447ee0d31d913792995a6e1
|
4c6c140990697743a29a48dc7f8a881397647c14
|
refs/heads/master
| 2020-04-28T09:44:39.116124
| 2019-03-12T09:38:22
| 2019-03-12T09:38:22
| 175,178,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,662
|
py
|
# 自动生成文档路由
from flask import Blueprint, redirect, url_for, render_template
from .. import get_app
main = Blueprint('main', __name__)
@main.route('/', methods=['GET'])
def index():
"""Redirect home page to docs page."""
return redirect(url_for('api.index'))
@main.route('/docs/<endpoint>', methods=['GET'])
def docs(endpoint):
"""Document page for an endpoint."""
api = {
'endpoint': endpoint,
'methods': '',
'doc': '',
'url': '',
'name': ''
}
try:
func = get_app().view_functions[endpoint]
api=_get_api_doc_split(func)
api['name'] = _get_api_name(func)
for rule in get_app().url_map.iter_rules():
if rule.endpoint == endpoint:
api['url'] = str(rule)
except:
api['doc'] = 'Invalid api endpoint: "{}"!'.format(endpoint)
return render_template('api_docs.html', api=api)
def _get_api_name(func):
"""e.g. Convert 'do_work' to 'Do Work'"""
words = func.__name__.split('_')
words = [w.capitalize() for w in words]
return ' '.join(words)
def _get_api_doc(func):
if func.__doc__:
return func.__doc__
else:
return 'No doc found for this API!'
def _get_api_doc_split(func):
api_docs = {'description': '', 'methods':'','parameter': '', 'response': ''}
description,methods, parameter, response = _get_api_doc(func).split(';')
api_docs['description'] = description.split('===')[1]
api_docs['methods'] = methods.split('===')[1]
api_docs['parameter'] = parameter.split('===')[1]
api_docs['response'] = response.split('===')[1]
return api_docs
|
[
"m17610062085@163.com"
] |
m17610062085@163.com
|
aa48328ecabf25584e364dac5f0684067c919c3f
|
4de615cb622b3f3b344aec0ad2f562e37534bda3
|
/reading_code/01_if_else/02_if_else.py
|
03dbb8a5b68974f44dd7512189c779b684249525
|
[] |
no_license
|
igin/academy_exercises
|
9679c561f5ddec2b9a04ecfa5ffaff1f3d3ff7b6
|
6697f94f0a3a8525b15829e4354af8ddc3374f73
|
refs/heads/main
| 2023-03-03T21:42:26.049593
| 2021-02-21T10:04:54
| 2021-02-21T10:04:54
| 340,292,286
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
x = 0.3
y = -10
if x > 5:
print("A1")
elif y > -30:
print("A2")
elif x < 10:
print("A3")
else:
print("A4")
if x * 4 < 1:
print("B1")
elif y * x > 0:
print("B2")
elif x == 0.3:
print("B3")
elif y < 0:
print("B4")
else:
print("B5")
|
[
"n.pleschko@gmail.com"
] |
n.pleschko@gmail.com
|
123953571e2bab1ee932089a36b99f7da38c9c69
|
4db571a07884c56ad46fbb69a76018231d2a9d3b
|
/cogs/admin.py
|
cbd2a925d60c6762e8190725565be8913f58a32e
|
[] |
no_license
|
noahgarrett/ThiccBot
|
fe89f4146df799d9fe347e909ac2c2c06f09e557
|
753a2e671e0ec674b2b09e3ae0118ac9d021b597
|
refs/heads/main
| 2023-08-10T15:57:56.086336
| 2021-09-21T15:33:18
| 2021-09-21T15:33:18
| 319,788,252
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
import discord
from discord.ext import commands, tasks
from discord.utils import get
import asyncio
import youtube_dl
from random import choice
import os, json, random
import main
class Admin(commands.Cog):
def __init__(self, client):
self.client = client
def setup(client):
client.add_cog(Admin(client))
|
[
"67662284+xsychgames@users.noreply.github.com"
] |
67662284+xsychgames@users.noreply.github.com
|
726a7f16647c3a127386899bc01e7866d9b1e643
|
2740f4dfc5bde90663cb5c022751ff0b0620609d
|
/migrations/lifeline/add_column_issuetype.py
|
9d040a8b7830fd60179d1f5001511363a2eeb22d
|
[
"MIT"
] |
permissive
|
danielseetoh/twilio185
|
951a5883d99f1b40b09d4c18c9160840d75e0e80
|
b9b60e230e7a10f00fe8a1fc0bc67ad7ad8db0d8
|
refs/heads/master
| 2021-01-10T12:51:41.648818
| 2016-05-01T20:48:09
| 2016-05-01T20:48:09
| 54,152,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
import psycopg2 as db
import sys
# try:
# con = db.connect(database="lifeline", user="postgres", password="seetoh", host="localhost")
con = db.connect(database="lifeline", user="postgres", password="seetoh", host="localhost")
print 'Success!'
cur = con.cursor()
cur.execute("ALTER TABLE requests ADD COLUMN issuetype varchar")
con.commit()
con.close()
# except:
# print 'Failed to connect to database.'
|
[
"danielseetoh92@gmail.com"
] |
danielseetoh92@gmail.com
|
6523dca574371c5e6210bf67e62f3d1285e7cfb2
|
c32d3849f6273a08c646ea54fb1e717743620389
|
/example-1.py
|
7faefbd992be07984b1a75c586e102b591a4940e
|
[] |
no_license
|
aron-castle/python_learing_test
|
838c60648b4291b8aef4ca2167a52a4b9a23f26e
|
72c54b4be1a9d41e2f9970cc6bb76c7678c6d1aa
|
refs/heads/master
| 2020-07-11T03:42:26.832168
| 2019-08-27T02:47:01
| 2019-08-27T02:47:01
| 204,437,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
print("Type integers,each followed by Enter;or just Enter to finish")
total = 0
count = 0
while True:
line = input("integer:")
if line:
try:
number = int(line)
except ValueError as err:
print(err)
continue
total += number
count += 1
else:
break
if count:
print("count =",count,"total =",total,"mean =",total / count)
|
[
"noreply@github.com"
] |
aron-castle.noreply@github.com
|
3ebcd7bdfa9db76a19a1a1858d267eb560ba1e32
|
4adecd7aafb73c3b01a3b6eb488d1232443dac35
|
/manage.py
|
17d3a8e8c04dadbc9d180f23c9d99df785f59f1b
|
[] |
no_license
|
koleror/model-history
|
17736c49e442ed35d941f6f91001f937e70d79d4
|
44cfe386486ba1e84bc142c12e962f3bbf6f43a7
|
refs/heads/master
| 2021-01-02T23:07:26.674177
| 2015-08-20T19:54:26
| 2015-08-20T19:54:26
| 41,084,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "model_history.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"defrance.hugo@gmail.com"
] |
defrance.hugo@gmail.com
|
26ca04971196860ed518734f0badf8c96387e75f
|
64a919d43541b0114c3158afdb7b2f7fc98bd766
|
/demo/countries/models.py
|
8b8b46312e7c0e242f425f8239f7218f42c3cedf
|
[] |
no_license
|
finebrush/takeatripsDA
|
377c02330f0c29497d51e1e59c81a99606537e4f
|
1b0a92ccbab25427d0274f1f812c91dfb3cc1dfb
|
refs/heads/master
| 2022-12-25T07:22:24.645429
| 2019-12-02T06:44:56
| 2019-12-02T06:44:56
| 220,358,879
| 0
| 0
| null | 2022-12-08T06:50:42
| 2019-11-08T01:08:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,980
|
py
|
from django.conf import settings
from django.db import models
from django.utils.translation import gettext_lazy as _
from demo.countries.choices import COUNTRY_TYPES
import uuid
class Country(models.Model):
name = models.CharField(_('Name'), max_length=64)
picture = models.ImageField(_('Picture'), null=True, blank=True)
population = models.IntegerField(_('Population'), null=True)
type = models.CharField(_('Type'), choices=COUNTRY_TYPES, max_length=2)
is_safe = models.BooleanField(_('Is safe'), default=True)
created = models.DateField(_('Created'))
modified = models.DateTimeField(_('Modified'))
time = models.TimeField(_('Time'))
class Meta:
verbose_name = _('Country')
verbose_name_plural = _('Countries')
db_table = 'country'
ordering = ('name',)
def __str__(self):
return self.name
class Person(models.Model):
uuid = models.UUIDField(verbose_name=_('UUID number'), default=uuid.uuid4, editable=False)
nationality = models.ForeignKey(
'countries.Country', verbose_name=_('Nationality'), on_delete=models.CASCADE, null=True, blank=True
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, verbose_name=_('User'), on_delete=models.CASCADE, null=True, blank=True
)
date = models.DateField(_('Birth Date'))
description = models.TextField(_('description'), null=True, blank=True)
google_play = models.URLField(_('Google Play Link'), blank=True, null=True)
spotify = models.URLField(_('Spotify Link'), blank=True, null=True)
itunes = models.URLField(_('Itunes Link'), blank=True, null=True)
video = models.FileField(_('Video'), null=True, blank=True)
class Meta:
verbose_name = _('Person')
verbose_name_plural = _('Persons')
db_table = 'persons'
class ProxyPerson(Person):
class Meta:
proxy = True
verbose_name = _('Proxy Person')
verbose_name_plural = _('Proxy Persons')
|
[
"finebrush.mlab@gmail.com"
] |
finebrush.mlab@gmail.com
|
15f50699320bda493f34379b9861414c24e815ce
|
5f983115d507b2d6dc453e66bcb6d9f36c2b67f3
|
/lambdaScripts/jobIssuer/lambda_function_v2.py
|
20358b11647084966909af45153cb623e3b22fd8
|
[] |
no_license
|
TheMatrix97/CCBDA-Project
|
d51df1e17ad9cd5c72e5310f30dc35df227d5f39
|
3214ce9f149a70cd3f05f1a961dea6fb3f709452
|
refs/heads/main
| 2023-05-25T08:33:28.927858
| 2021-05-29T15:13:31
| 2021-05-29T15:13:31
| 364,298,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,961
|
py
|
import json
import boto3
client = boto3.client('ecs')
def lambda_handler(event, context):
input_data = json.loads(event["body"])
res = {
'statusCode': 404,
'body': json.dumps("Method not implemented")
}
if input_data['command'] == "run":
if not exists_running_task():
id = run_job_issuer_task()
res['statusCode'] = 200
res['body'] = json.dumps({'id': id})
else:
res['statusCode'] = 500
res['body'] = json.dumps("Job issuer is already running")
elif input_data['command'] == "stop":
id = input_data['id']
stop_job_issuer_task(id)
res['statusCode'] = 200
res['body'] = json.dumps("Job issuer stop command issued")
return res
def exists_running_task():
response = client.list_tasks(
desiredStatus='RUNNING',
launchType='FARGATE'
)
response2 = client.list_tasks(
desiredStatus='PENDING',
launchType='FARGATE'
)
return len(response['taskArns']) != 0 or len(response2['taskArns'])
def run_job_issuer_task():
response = client.run_task(taskDefinition='first-run-task-definition',
networkConfiguration={
'awsvpcConfiguration': {
'subnets': [
'subnet-0c8da3a737e8c2f57',
],
'securityGroups': [
'sg-0474b93c000f411f7',
],
'assignPublicIp': 'ENABLED'
}
}, launchType='FARGATE')
return response['tasks'][0]['attachments'][0]['id']
def stop_job_issuer_task(id):
print(id)
response = client.stop_task(task=id)
print(response)
|
[
"marc.catrisse@upc.edu"
] |
marc.catrisse@upc.edu
|
c688e3c586687d77aeb0923ad881816739ef16ac
|
d8dfb0a9c6bc69aa814a39339aebe774376f61ba
|
/dyndnsc/updater/noip.py
|
d5cca6a3c6a7bf8e00eda70923dec2bb1ec1269e
|
[
"MIT"
] |
permissive
|
uservidya/python-dyndnsc
|
c2508202ad622ea2d77b3e485392d3a99f8ec2db
|
e73b45a7c3e4b7fa6a755ab7f3b9f2c3797978f8
|
refs/heads/master
| 2020-12-27T02:00:48.301037
| 2013-12-17T08:22:20
| 2013-12-17T08:22:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
# -*- coding: utf-8 -*-
from .base import UpdateProtocol
class UpdateProtocolNoip(UpdateProtocol):
"""Protocol handler for www.noip.com"""
_updateurl = "https://dynupdate.no-ip.com/nic/update"
def __init__(self, options):
self.theip = None
self.hostname = options['hostname']
self.userid = options['userid']
self.password = options['password']
super(UpdateProtocolNoip, self).__init__()
@staticmethod
def configuration_key():
return "noip"
def update(self, ip):
self.theip = ip
return self.protocol()
|
[
"pkremer@spurious.biz"
] |
pkremer@spurious.biz
|
11e8bfaa30fade9e2e8cbdb08801527898eb909f
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2467/60707/313825.py
|
9e7d486b1e47bb4e4c41e21933bd802f525f3d33
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
if __name__ == "__main__":
n = int(input())
for i in range(n):
inp1 = input().split()
idx = int(inp1[2])
list1 = input().split(" ")
for j in range(len(list1)):
list1[j] = int(list1[j])
list2 = input().split(" ")
for k in range(len(list2)):
list2[k] = int(list2[k])
list1.extend(list2)
list1.sort()
print(list1[idx-1])
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
46fea0501fd3da99228b02967ae565548670757e
|
fc81cbe8e184205b4c38c6f945927cb9b5a763a1
|
/20210715_Nadocording.py
|
35fd236cc6879f650a4cd0868785f4db9ac0d9d2
|
[] |
no_license
|
leeyw9804/1day_1commit
|
9107d7ac19b5e565008c810148996209bce75968
|
b3a092767924eebfb878a77b077aaff13bf691cb
|
refs/heads/master
| 2023-06-20T07:48:42.017758
| 2021-07-22T09:15:52
| 2021-07-22T09:15:52
| 385,814,495
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
import requests
from bs4 import BeautifulSoup
url = "https://comic.naver.com/webtoon/list?titleId=675554"
res = requests.get(url)
soup = BeautifulSoup(res.text,"lxml")
cartoons = soup.find_all("div", attrs={"class":"rating_type"})
# title = cartoons[0].a.get_text()
# link = cartoons[0].a["href"]
# print(title, link)
# for cartoon in cartoons:
# title = cartoon.a.get_text()
# link = cartoon.a["href"]
# print(title, link)
# for cartoon in cartoons:
# print(cartoon.get_text())
total = 0
for cartoon in cartoons:
rate = cartoon.find("strong").get_text()
total += float(rate)
total_rate = total/ len(cartoons)
print(total_rate)
|
[
"leeyw9804@naver.com"
] |
leeyw9804@naver.com
|
a4d1968b323bced963b0652a503afe708cd1172a
|
bf5850321813743c28e30e2c57cd172c7db2b549
|
/point_grouper.py
|
033109e2691255e785fb43eefde5029eb5406e70
|
[] |
no_license
|
rasake/CLiFFpy
|
17bbc18bb5ac62843b514824bb9bea1248a3da11
|
f763b6ec93428e269250a3e77422be0b14af1197
|
refs/heads/master
| 2022-07-09T03:17:34.851304
| 2020-05-14T09:48:30
| 2020-05-14T09:48:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,443
|
py
|
import sys
import numpy as np
import cl_arithmetic as cla
GROUP_DISTANCE_TOLERANCE = .1
class PointGrouper(object):
def __init__(self, distance=cla.distance_wrap_2d_vec):
self.distance = distance
def group_points(self, points):
group_assignment = []
groups = []
group_index = 0
for point in points:
nearest_group_index = self._determine_nearest_group(point, groups)
if nearest_group_index is None:
# create new group
groups.append([point])
group_assignment.append(group_index)
group_index += 1
else:
group_assignment.append(nearest_group_index)
groups[nearest_group_index].append(point)
return np.array(group_assignment)
def _determine_nearest_group(self, point, groups):
nearest_group_index = None
index = 0
for group in groups:
distance_to_group = self._distance_to_group(point, group)
if distance_to_group < GROUP_DISTANCE_TOLERANCE:
nearest_group_index = index
index += 1
return nearest_group_index
def _distance_to_group(self, point, group):
min_distance = sys.float_info.max
for pt in group:
dist = self.distance(point, pt)
if dist < min_distance:
min_distance = dist
return min_distance
|
[
"tomasz.kucner@oru.se"
] |
tomasz.kucner@oru.se
|
35fdf253b0ebed1d4eb6a119aec0214ec76c5669
|
21701849de6a4284f05712e1a16fbaf731b317fb
|
/Eurosat data set creation.py
|
3c6e49ccf0b362b5f0e1eea223a3ab13a42020ee
|
[] |
no_license
|
leslie-toone/EuroSat
|
69a914c8de3810af3251d77ddcc0184b08f96d20
|
d6893978f71a52745aaa09b5383195c193be926f
|
refs/heads/main
| 2023-05-18T21:16:43.574929
| 2021-06-08T13:04:45
| 2021-06-08T13:04:45
| 375,010,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,472
|
py
|
# needed to download a subset of Eurosat data to run on Coursera Project
#found this code at
# https://colab.research.google.com/github/e-chong/Remote-Sensing/blob/master/EuroSAT%20Land%20Cover%20Classification/EuroSAT%20Land%20Use%20and%20Land%20Cover%20Classification%20using%20Deep%20Learning.ipynb
# processing and reading images
import zipfile
import requests
import io
from PIL import Image
from numpy import asarray
from numpy import save
# tensor processing
import numpy as np
from sklearn.utils import shuffle
# plotting
import matplotlib.pyplot as plt
# modeling
from sklearn.model_selection import train_test_split
import keras
# RGB file URL
url = "http://madm.dfki.de/files/sentinel/EuroSAT.zip"
# download zip
r = requests.get(url)
z = zipfile.ZipFile(io.BytesIO(r.content))
# get file names
txtfiles = []
for file in z.namelist():
txtfiles.append(file)
# keep only those containing ".jpg"
txtfiles = [x for x in txtfiles if ".jpg" in x]
# read images to numpy array
XImages = np.zeros([len(txtfiles), 64, 64, 3])
i = 0
for pic in txtfiles:
XImages[i] = np.asarray(Image.open(z.open(pic))).astype('uint8')/255
i += 1
del r # clear memory
del z
# Get labels in numpy array as strings
labs = np.empty(len(txtfiles), dtype = 'S20')
i = 0
for label in txtfiles:
labs[i] = label.split('/')[1]
i += 1
# change them to integers in alphabetical order
label_names, yLabels = np.unique(labs, return_inverse=True)
label_Dict = dict(zip(np.unique(yLabels), label_names))
print(label_Dict)
np.array(np.unique(yLabels, return_counts=True)).T
# test that the labels and images read in properly
tmp = 18000
img = XImages[tmp]
print(yLabels[tmp])
print(label_names[yLabels[tmp]])
plt.imshow(img)
plt.show()
# find the smallest class
smallest_class = np.argmin(np.bincount(yLabels))
smallest_class
# number of classes
num_classes = len(np.array(np.unique(yLabels)))
# observations in smallest class
smallest_class_obs = np.where(yLabels == smallest_class)[0]
# Get 2000 observations from each class
indBal = np.empty(0, dtype=int)
for i in range(num_classes):
indTemp = shuffle(np.where(yLabels == i)[0], random_state=42)[0:smallest_class_obs.shape[0]]
indBal = np.concatenate([indBal, indTemp])
# shuffle the balanced index
indBal = shuffle(indBal, random_state = 42)
yBal = yLabels[indBal]
XBal = XImages[indBal]
print(yBal.shape)
print(XBal.shape)
# first line uses balanced labels
# second line uses original imbalanced labels
x_train, x_test, y_train, y_test = train_test_split(XBal, yBal, stratify = yBal, test_size = 0.2, random_state=42)
#x_train, x_test, y_train, y_test = train_test_split(XImages, yLabels, stratify = yLabels, test_size = 0.2, random_state=42)
# test that the labels and images are still matched up properly
tmp = 7000
img = x_train[tmp]
print(label_names[y_train[tmp]])
plt.imshow(img)
plt.show()
# class distribution for yTrain
print(np.array(np.unique(y_train, return_counts=True)).T)
# class distribution for yTest
print(np.array(np.unique(y_test, return_counts=True)).T)
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# save to npy file
save('data/y_train.npy', y_train)
save('data/y_test.npy', y_test)
save('data/x_train.npy', x_train)
save('data/x_test.npy', x_test)
|
[
"noreply@github.com"
] |
leslie-toone.noreply@github.com
|
18c82fe5ded0fea0644f6620c0943a1364176281
|
5087b67363fb86005ffbdd6153baa257c15e6c4f
|
/src/ui/templates/home_map_template.py
|
ea5ead0b7e46c80ed445431d1c2290d0c895b98c
|
[] |
no_license
|
juanchitot/domo
|
8d015243da88269bd6d1e81896788e800f5e0c5c
|
82dc543f342a8c50cd59680f3b570c7fa72037ff
|
refs/heads/master
| 2021-01-22T08:32:54.036451
| 2014-06-22T01:05:59
| 2014-06-22T01:05:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,874
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './templates/home_map_template.ui'
#
# Created: Sun Jul 17 16:46:45 2011
# by: PyQt4 UI code generator 4.7.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(1138, 835)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
Form.setAutoFillBackground(False)
Form.setStyleSheet("background-color:transparent")
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setObjectName("gridLayout")
self.lcdNumber = QtGui.QLCDNumber(Form)
self.lcdNumber.setStyleSheet("border-width:thin;\n"
"border-color:grey;\n"
"border-style:dotted;")
self.lcdNumber.setFrameShape(QtGui.QFrame.StyledPanel)
self.lcdNumber.setFrameShadow(QtGui.QFrame.Raised)
self.lcdNumber.setLineWidth(0)
self.lcdNumber.setSegmentStyle(QtGui.QLCDNumber.Flat)
self.lcdNumber.setObjectName("lcdNumber")
self.gridLayout.addWidget(self.lcdNumber, 2, 9, 1, 1)
self.pushButton = QtGui.QPushButton(Form)
self.pushButton.setObjectName("pushButton")
self.gridLayout.addWidget(self.pushButton, 0, 4, 1, 1)
self.pushButton_5 = QtGui.QPushButton(Form)
self.pushButton_5.setObjectName("pushButton_5")
self.gridLayout.addWidget(self.pushButton_5, 0, 5, 1, 1)
self.frame_5 = QtGui.QFrame(Form)
self.frame_5.setStyleSheet("None")
self.frame_5.setFrameShape(QtGui.QFrame.NoFrame)
self.frame_5.setFrameShadow(QtGui.QFrame.Raised)
self.frame_5.setObjectName("frame_5")
self.gridLayout_2 = QtGui.QGridLayout(self.frame_5)
self.gridLayout_2.setObjectName("gridLayout_2")
self.pushButton_6 = QtGui.QPushButton(self.frame_5)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_6.sizePolicy().hasHeightForWidth())
self.pushButton_6.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setUnderline(False)
self.pushButton_6.setFont(font)
self.pushButton_6.setMouseTracking(False)
self.pushButton_6.setFocusPolicy(QtCore.Qt.NoFocus)
self.pushButton_6.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.pushButton_6.setToolTip("None")
self.pushButton_6.setStatusTip("None")
self.pushButton_6.setWhatsThis("None")
self.pushButton_6.setAccessibleName("None")
self.pushButton_6.setAccessibleDescription("None")
self.pushButton_6.setText("None")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/ui/images/but_up_domotica.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_6.setIcon(icon)
self.pushButton_6.setIconSize(QtCore.QSize(100, 70))
self.pushButton_6.setShortcut("None")
self.pushButton_6.setCheckable(False)
self.pushButton_6.setDefault(False)
self.pushButton_6.setFlat(True)
self.pushButton_6.setObjectName("pushButton_6")
self.gridLayout_2.addWidget(self.pushButton_6, 0, 0, 1, 1)
self.gridLayout.addWidget(self.frame_5, 1, 9, 1, 1)
self.frame_6 = QtGui.QFrame(Form)
self.frame_6.setFrameShape(QtGui.QFrame.NoFrame)
self.frame_6.setFrameShadow(QtGui.QFrame.Raised)
self.frame_6.setObjectName("frame_6")
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.frame_6)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.pushButton_7 = QtGui.QPushButton(self.frame_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_7.sizePolicy().hasHeightForWidth())
self.pushButton_7.setSizePolicy(sizePolicy)
self.pushButton_7.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/ui/images/but_down_domotica.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_7.setIcon(icon1)
self.pushButton_7.setIconSize(QtCore.QSize(100, 70))
self.pushButton_7.setFlat(True)
self.pushButton_7.setObjectName("pushButton_7")
self.horizontalLayout_2.addWidget(self.pushButton_7)
self.gridLayout.addWidget(self.frame_6, 3, 9, 1, 1)
self.pushButton_8 = QtGui.QPushButton(Form)
self.pushButton_8.setObjectName("pushButton_8")
self.gridLayout.addWidget(self.pushButton_8, 5, 4, 1, 1)
self.pushButton_9 = QtGui.QPushButton(Form)
self.pushButton_9.setObjectName("pushButton_9")
self.gridLayout.addWidget(self.pushButton_9, 5, 5, 1, 1)
self.pushButton_10 = QtGui.QPushButton(Form)
self.pushButton_10.setObjectName("pushButton_10")
self.gridLayout.addWidget(self.pushButton_10, 5, 6, 1, 1)
self.pushButton_11 = QtGui.QPushButton(Form)
self.pushButton_11.setObjectName("pushButton_11")
self.gridLayout.addWidget(self.pushButton_11, 5, 7, 1, 1)
self.pushButton_12 = QtGui.QPushButton(Form)
self.pushButton_12.setObjectName("pushButton_12")
self.gridLayout.addWidget(self.pushButton_12, 5, 8, 1, 1)
self.time_lcd = QtGui.QLCDNumber(Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.time_lcd.sizePolicy().hasHeightForWidth())
self.time_lcd.setSizePolicy(sizePolicy)
self.time_lcd.setFrameShape(QtGui.QFrame.StyledPanel)
self.time_lcd.setFrameShadow(QtGui.QFrame.Raised)
self.time_lcd.setLineWidth(1)
self.time_lcd.setMidLineWidth(1)
self.time_lcd.setNumDigits(10)
self.time_lcd.setSegmentStyle(QtGui.QLCDNumber.Flat)
self.time_lcd.setObjectName("time_lcd")
self.gridLayout.addWidget(self.time_lcd, 5, 9, 1, 1)
self.graphicsView = QtGui.QGraphicsView(Form)
self.graphicsView.setMinimumSize(QtCore.QSize(800, 600))
self.graphicsView.setObjectName("graphicsView")
self.gridLayout.addWidget(self.graphicsView, 1, 4, 4, 5)
self.label_2 = QtGui.QLabel(Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 5, 1, 1, 1)
self.map_zoom = QtGui.QSlider(Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.map_zoom.sizePolicy().hasHeightForWidth())
self.map_zoom.setSizePolicy(sizePolicy)
self.map_zoom.setMinimum(50)
self.map_zoom.setMaximum(150)
self.map_zoom.setProperty("value", 100)
self.map_zoom.setOrientation(QtCore.Qt.Horizontal)
self.map_zoom.setInvertedAppearance(False)
self.map_zoom.setInvertedControls(False)
self.map_zoom.setObjectName("map_zoom")
self.gridLayout.addWidget(self.map_zoom, 5, 2, 1, 1)
self.level_combo = QtGui.QComboBox(Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.level_combo.sizePolicy().hasHeightForWidth())
self.level_combo.setSizePolicy(sizePolicy)
self.level_combo.setObjectName("level_combo")
self.gridLayout.addWidget(self.level_combo, 0, 2, 1, 2)
self.label = QtGui.QLabel(Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setWeight(50)
font.setBold(False)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 1, 1, 1)
self.zoom_lab = QtGui.QLabel(Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.zoom_lab.sizePolicy().hasHeightForWidth())
self.zoom_lab.setSizePolicy(sizePolicy)
self.zoom_lab.setObjectName("zoom_lab")
self.gridLayout.addWidget(self.zoom_lab, 5, 3, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton.setStyleSheet(QtGui.QApplication.translate("Form", "font: 75 9pt \"Sans Serif\";\n"
"background-color:transparent;\n"
"color: rgb(0, 0, 255);", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton.setText(QtGui.QApplication.translate("Form", "Luces", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_5.setStyleSheet(QtGui.QApplication.translate("Form", "font: 75 9pt \"Sans Serif\";\n"
"background-color:transparent;\n"
"color: rgb(0, 0, 255);", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_5.setText(QtGui.QApplication.translate("Form", "Temperatura", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_6.setStyleSheet(QtGui.QApplication.translate("Form", "background-color: transparent;", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_7.setStyleSheet(QtGui.QApplication.translate("Form", "background-color:transparent;", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_8.setText(QtGui.QApplication.translate("Form", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_9.setText(QtGui.QApplication.translate("Form", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_10.setText(QtGui.QApplication.translate("Form", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_11.setText(QtGui.QApplication.translate("Form", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_12.setText(QtGui.QApplication.translate("Form", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.time_lcd.setStyleSheet(QtGui.QApplication.translate("Form", "border-width:thin;\n"
"color: rgb(0, 0, 0);\n"
"border-color:grey;\n"
"border-style:dotted", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Form", "Zoom", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Form", "Nivel", None, QtGui.QApplication.UnicodeUTF8))
self.zoom_lab.setText(QtGui.QApplication.translate("Form", "100%", None, QtGui.QApplication.UnicodeUTF8))
import resources_rc
|
[
"juanchitot@gmail.com"
] |
juanchitot@gmail.com
|
4791043d856e97b2b100ec57dea56971d0aeed70
|
d88b70150c2b4f840b5d240fc52cf5fdc320fbba
|
/snewpdag/plugins/renderers/TimeProfile.py
|
2d61277fd121152be0696c77272baa6bd1bf53af
|
[
"BSD-3-Clause"
] |
permissive
|
woonsinglau/snewpdag
|
b091fd3a1139f3d36e2a7306b7cdf58f6bdc4c02
|
6ea4795828b03d83b7756e37c789c0997b46b17a
|
refs/heads/master
| 2023-07-10T16:41:32.043099
| 2021-08-23T19:20:13
| 2021-08-23T19:20:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,848
|
py
|
"""
Time profile renderer.
Configuration options:
in_field: optional, name of dictionary of input data
(otherwise look in payload dictionary itself)
in_xfield: name of data field for x values
in_yfield: name of data field for y values
title: profile title (top of plot)
xlabel: x axis label
ylabel: y axis label
filename: output filename, with fields
{0} renderer name
{1} count index, starting from 0
{2} burst_id from update data (default 0 if no such field)
{3} source name (which this renderer observes)
Plots y vs x.
"""
import matplotlib.pyplot as plt
import numpy as np
from snewpdag.dag import Node
class TimeProfile(Node):
def __init__(self, in_xfield, in_yfield, title, xlabel, ylabel, filename, **kwargs):
self.xfield = in_xfield
self.yfield = in_yfield
self.title = title
self.xlabel = xlabel
self.ylabel = ylabel
self.filename = filename # include pattern to include index
self.in_field = kwargs.pop('in_field', None)
self.count = 0 # number of histograms made
super().__init__(**kwargs)
def render(self, burst_id, source, x, y, subtitle):
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set_xlabel(self.xlabel)
ax.set_ylabel(self.ylabel)
ax.set_title(self.title + '(' + subtitle + ')')
fig.tight_layout()
fname = self.filename.format(self.name, self.count, burst_id, source)
plt.savefig(fname)
self.count += 1
def alert(self, data):
burst_id = data.get('burst_id', 0)
d = data[self.in_field] if self.in_field else data
nm = d['name']
if 'comment' in d:
nm += ": " + d['comment']
self.render(burst_id, self.last_source,
d[self.xfield], d[self.yfield], nm)
return True
def report(self, data):
return self.alert(data)
|
[
"jeff.tseng@physics.ox.ac.uk"
] |
jeff.tseng@physics.ox.ac.uk
|
8b94756c525f05c760d71a62298896c0205a615d
|
973f552142a150f24d8602cf91e45d5c764e1ddc
|
/wallpaper.py
|
302b3f786853b369845406938a8a274679511cc4
|
[] |
no_license
|
fjcarnevale/redditwalls
|
534d5f7267b8f37f34b97089f3ef1849a69c619f
|
791eeb817fbb22123d0f121f631a3cc8d41954b5
|
refs/heads/master
| 2021-05-15T01:47:52.724836
| 2017-02-22T02:27:39
| 2017-02-22T02:27:39
| 19,913,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,245
|
py
|
from google.appengine.ext import ndb
from google.appengine.api import images
import logging
import urllib
import urllib2
from reddit import RedditPost
from Imgur import Imgur
class Wallpaper(ndb.Model):
name = ndb.StringProperty()
height = ndb.IntegerProperty()
width = ndb.IntegerProperty()
reddit_link = ndb.StringProperty()
image_link = ndb.StringProperty()
@staticmethod
def get_by_id(wall_id):
return wallpaper_key(wall_id).get()
@staticmethod
def from_post(post):
url = post.link_url
if Imgur.is_imgur_link(url):
if Imgur.is_image(url):
# If the URL already has the image extension, just use the URL
# Otherwise, look it up
link = url
if not any(ext in url for ext in Imgur.extensions):
info = Imgur.Image.from_url(url)
if not info:
return None
link = info.link
w = Wallpaper(key=wallpaper_key(post.name))
w.name = post.name
w.reddit_link = post.post_url
w.image_link = link
w.put()
return w
else:
# TODO handle albums
# probably just grap the cover photo, maybe add class for albums
# or convert wallpaper class to Post class as a catch-all for reddit image posts
pass
else:
pass # don't upload new stuff for now
# Try and upload from the url
#info = Imgur.upload_image_from_url(url)
#if info is not None:
# logging.info('Uploaded image id:%s\tdeletehash:%s' % (info.img_id, info.deletehash))
# w = Wallpaper(key=wallpaper_key(post.name))
# w.name = post.name
# w.reddit_link = post.post_url
# w.image_link = info.link
# w.put()
# return w
return None
def wallpaper_key(name):
"""Generates datastore key for name"""
return ndb.Key('Wallpaper',name)
def create_wallpapers(posts):
"""Creates and commits wallpapers from the given reddit posts"""
wallpapers = []
for post in posts:
# see if this wallpaper exists
wallpaper = Wallpaper.get_by_id(post.name)
if not wallpaper:
wallpaper = Wallpaper.from_post(post)
if wallpaper is not None:
wallpapers.append(wallpaper)
return wallpapers
|
[
"fjcarnevale@gmail.com"
] |
fjcarnevale@gmail.com
|
ba30ef35957bc511059b6499e195315c68c62807
|
9d12082ad67b4f7d8088ea845a4266a3b3a85313
|
/7OOP/useslots.py
|
86108bc0548bfd0158dbb975a80d6ba8441121b5
|
[] |
no_license
|
bberzhou/LearningPython
|
e6f7ee9d44dae3547008aae33874639970a269a3
|
aee82c60696a0ef93a351c7a9cf899387eeb9ce0
|
refs/heads/master
| 2023-05-14T03:38:44.251665
| 2021-06-06T13:49:59
| 2021-06-06T13:49:59
| 319,500,148
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,473
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from types import MethodType
"""
正常情况下,当我们定义了一个class,创建了一个class的实例后,
我们可以给该实例绑定任何属性和方法,这就是动态语言的灵活性。先定义class
s.set_age = MethodType(set_age, s) # 给实例绑定一个方法
但是,如果我们想要限制实例的属性怎么办?比如,只允许对Student实例添加name和age属性。
为了达到限制的目的,Python允许在定义class的时候,定义一个特殊的__slots__变量,来限制该class实例能添加的属性
"""
class Student(object):
__slots__ = ('name', 'age') # 用tuple定义允许绑定的属性名称
# 尝试给实例绑定一个属性
s = Student()
s.name = 'Michael' # 动态给实例绑定一个属性
print(s.name) # Michael
# 还可以尝试给实例绑定一个方法
def set_age(self, age): # 定义一个函数作为实例方法
self.age = age
s.set_age = MethodType(set_age, s) # 给实例绑定一个方法
s.set_age(25)
print(s.age) # 25
# 但是,给一个实例绑定的方法,对另一个实例是不起作用的
s2 = Student() # 创建新的实例
# s2.set_age(25) # AttributeError: 'Student' object has no attribute 'set_age'
# 如果要给所有实例都绑定方法,可以给class绑定方法
def set_score(self, score):
self.score = score
Student.set_score = set_score
# 给class绑定方法后,所有实例均可调用
s.set_score(100)
print(s.score) # 100
s2.set_score(80)
print(s2.score) # 80
# 通常情况下,上面的set_score方法可以直接定义在class中,但动态绑定允许我们在程序运行的过程中动态给class加上功能,这在静态语言中很难实现。
# __slots__ = ('name', 'age') # 用tuple定义允许绑定的属性名称
s3 = Student() # 创建新的实例
s3.name = 'Michael'
s3.age = 18 # 绑定属性'age'
# s3.score = 99 # AttributeError: 'Student' object has no attribute 'set_age'
# 由于'score'没有被放到__slots__中,所以不能绑定score属性,试图绑定score将得到AttributeError的错误
# 使用__slots__要注意,__slots__定义的属性仅对当前类实例起作用,对继承的子类是不起作用的
class GraduateStudent(Student):
pass
g = GraduateStudent()
g.score = 100
# 除非在子类中也定义__slots__,这样,子类实例允许定义的属性就是自身的__slots__加上父类的__slots__
|
[
"bberzhou@gmail.com"
] |
bberzhou@gmail.com
|
5a0cd00525e3c3cb0b52c1675cad1f2f129425d9
|
8bd69d678c49a2c8948238c5d40b6926e74d1b85
|
/ijosephproject/ijosephproject/wsgi.py
|
1ddbe7a7b2778fe66fc4800b6e6377702ef7e0c4
|
[] |
no_license
|
CarolineMadison/I_Joseph_Capstone_API
|
925a84340d7627ff5b0fd14da3105b9310e13d65
|
4dff9410768d4af001b63987c8a87da4b1f46bef
|
refs/heads/master
| 2023-08-28T23:23:40.783938
| 2020-04-03T03:09:48
| 2020-04-03T03:09:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
"""
WSGI config for ijosephproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ijosephproject.settings')
application = get_wsgi_application()
|
[
"brownleecaroline@gmail.com"
] |
brownleecaroline@gmail.com
|
271feb2b84b4112e4bd627459950a98dc3607a91
|
7ebde4e79f33057df38f22b14cf1932da45884b5
|
/Python/Container With Most Water.py
|
2d17e89d9630eaae4ef79c0404c53e2b7e08d24f
|
[] |
no_license
|
xiaochenai/leetCode
|
1400fae8c3033fee71ba0f7ea36acf6555323403
|
acca8ed2e9628787468eb15b27f4bd552ee2bffd
|
refs/heads/master
| 2021-01-20T00:58:55.956367
| 2014-10-29T02:44:31
| 2014-10-29T02:44:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
# Given n non-negative integers a1, a2, ..., an, where each represents a point at coordinate (i, ai). n vertical lines are drawn such that the two endpoints of line i is at (i, ai) and (i, 0). Find two lines, which together with x-axis forms a container, such that the container contains the most water.
# Note: You may not slant the container
class Solution:
# @return an integer
def maxArea(self, height):
lenH = len(height)
contain = 0
if lenH < 2:
return 0
end = lenH-1
maxV=0
i=0
while i<end:
contain = min(height[i],height[end])*(end - i)
maxV = max(maxV,contain)
if height[i] <= height[end]:
i = i + 1
else:
end = end -1
return maxV
|
[
"xzl0036@auburn.edu"
] |
xzl0036@auburn.edu
|
42b8973417a853d323cb5d8ef0b0f89525ed9a6d
|
f8981c67954828e4a1a0249fbdcb36d099090cd9
|
/Module6/running_system_commands.py
|
04cf75695cfd3473026925398e6b279ed502b06d
|
[] |
no_license
|
shreyakapadia10/Using-Python-to-Interact-with-the-Operating-System
|
fc67a82bc0950c0d9b2faa39f33c25459f72b505
|
57aef8af9137f6df5bdff76f4d138e527b5b6cbf
|
refs/heads/master
| 2022-12-27T18:39:37.837440
| 2020-10-03T07:04:54
| 2020-10-03T07:04:54
| 300,818,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
import subprocess
subprocess.run(["date"])
print("subprocess.run(['sleep', '2']) will wait for 2 seconds")
subprocess.run(["sleep", "2"])
print("Trying to list a file that doesn't exist using ls")
result = subprocess.run(["ls", "no_such_file.txt"])
print(result.returncode)
|
[
"shreyakapadia8@gmail.com"
] |
shreyakapadia8@gmail.com
|
295d9752bf723b60685cdbca89a38e56b90d8dc3
|
e59fe240f0359aa32c59b5e9f581db0bfdb315b8
|
/galaxy-dist/lib/galaxy/jobs/runners/cli_shell/rsh.py
|
b0f8f686cedc4a0ec40089a03c1d706686c805a4
|
[
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
subway/Galaxy-Distribution
|
dc269a0258471597d483687a0f1dd9e10bd47448
|
d16d6f9b6a8b7f41a218c06539863c8ce4d5a73c
|
refs/heads/master
| 2021-06-30T06:26:55.237251
| 2015-07-04T23:55:51
| 2015-07-04T23:55:51
| 15,899,275
| 1
| 2
| null | 2020-10-07T06:17:26
| 2014-01-14T10:47:28
|
Groff
|
UTF-8
|
Python
| false
| false
| 2,376
|
py
|
"""
Interface for remote shell commands (rsh, rcp) and derivatives that use the same syntax (ssh, scp)
"""
import logging
import os
import subprocess
import tempfile
import time
from galaxy.util.bunch import Bunch
from galaxy.jobs.runners.cli_shell import BaseShellExec
log = logging.getLogger( __name__ )
__all__ = ('RemoteShell', 'SecureShell', 'GlobusSecureShell')
class RemoteShell(BaseShellExec):
def __init__(self, rsh='rsh', rcp='rcp', hostname=None, username=None, **kwargs):
self.rsh = rsh
self.rcp = rcp
self.hostname = hostname
self.username = username
self.sessions = {}
def copy(self, rcp_cmd, files, dest):
pass
def execute(self, cmd, persist=False, timeout=60):
# TODO: implement persistence
if self.username is None:
fullcmd = '%s %s %s' % (self.rsh, self.hostname, cmd)
else:
fullcmd = '%s -l %s %s %s' % (self.rsh, self.username, self.hostname, cmd)
# Read stdout to a tempfile in case it's large (>65K)
outf = tempfile.TemporaryFile()
p = subprocess.Popen(fullcmd, shell=True, stdin=None, stdout=outf, stderr=subprocess.PIPE)
# poll until timeout
for i in range(timeout/3):
r = p.poll()
if r is not None:
break
time.sleep(3)
else:
pid = int(p.pid)
for sig in (15, 9):
try:
os.kill(pid, sig)
time.sleep(3)
except:
log.warning('Killing pid %s (cmd: "%s") with signal %s failed' % (p.pid, fullcmd, sig))
return Bunch(stdout='', stderr='Execution timed out', returncode=-1)
outf.seek(0)
return Bunch(stdout=outf.read(), stderr=p.stderr.read(), returncode=p.returncode)
class SecureShell(RemoteShell):
SSH_NEW_KEY_STRING = 'Are you sure you want to continue connecting'
def __init__(self, rsh='ssh', rcp='scp', **kwargs):
rsh += ' -oStrictHostKeyChecking=yes -oConnectTimeout=60'
rcp += ' -oStrictHostKeyChecking=yes -oConnectTimeout=60'
super(SecureShell, self).__init__(rsh=rsh, rcp=rcp, **kwargs)
class GlobusSecureShell(SecureShell):
def __init__(self, rsh='gsissh', rcp='gsiscp', **kwargs):
super(SecureShell, self).__init__(rsh=rsh, rcp=rcp, **kwargs)
|
[
"sabba_88@hotmail.com"
] |
sabba_88@hotmail.com
|
6cb46e4ef46fcf6d1ad70681126112b9aa1a7a2f
|
96c7d259859593efa2db89f7434603f941dcc1d1
|
/main/admin.py
|
23fd591572d9c84f6055b46656185584e2051029
|
[] |
no_license
|
nishadprinja/squawker-django
|
b688387bf466d942f4914c251c0f2f2d20683a40
|
b493223651b9f31f497831bbddaee0b16e356f51
|
refs/heads/master
| 2021-01-11T11:27:45.074632
| 2016-12-07T05:13:57
| 2016-12-07T05:13:57
| 72,485,860
| 0
| 0
| null | 2016-10-31T23:12:55
| 2016-10-31T23:12:55
| null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
from django.contrib import admin
from .models import Squawk
class SquawkAdmin(admin.ModelAdmin):
list_display = ('message', 'time')
# Register your models here.
admin.site.register(Squawk, SquawkAdmin)
|
[
"np327@cornell.edu"
] |
np327@cornell.edu
|
8036f2a754eff7c3f8a0f966552344286da13109
|
fb76a350867ce54c238eb308072863ba30ca963f
|
/blogweb/templatetags/__init__.py
|
164a81f67d0807fd8adf841b37d12d59c7ea29a8
|
[] |
no_license
|
Mayankmansha61/blogproject
|
3b241889794e644fc6a57dc20a717742d2c10a39
|
a9582a1bec3f0220197dd83e1e5c7c0d301e9b98
|
refs/heads/master
| 2020-06-23T23:34:38.523475
| 2019-07-25T08:07:06
| 2019-07-25T08:07:06
| 198,785,397
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24
|
py
|
from .query_data import*
|
[
"mayankbhargav1919@gmail.com"
] |
mayankbhargav1919@gmail.com
|
c9c25fcb8aae1505003866b2b770810e6a17a3dc
|
5f0c43ee76bf8465aa66c51183b2d4aae83c8294
|
/cml/models.py
|
dff17ebe32ba20f2470998a9021c33949d085fc5
|
[
"BSD-2-Clause"
] |
permissive
|
mikamiel/django-cml
|
9769012956f34ea6cea5e1aef275ed680d0853a3
|
2f7bd597db937365e2d642f81063cf891e312b0d
|
refs/heads/master
| 2020-08-07T10:22:40.515044
| 2019-10-02T07:42:43
| 2019-10-02T07:42:43
| 213,410,148
| 0
| 0
| null | 2019-10-07T14:46:49
| 2019-10-07T14:46:49
| null |
UTF-8
|
Python
| false
| false
| 757
|
py
|
from __future__ import absolute_import
from django.db import models
from django.conf import settings
class Exchange(models.Model):
class Meta:
verbose_name = 'Exchange log entry'
verbose_name_plural = 'Exchange logs'
exchange_type_choices = {
('import', 'import'),
('export', 'export')
}
exchange_type = models.CharField(max_length=50, choices=exchange_type_choices)
timestamp = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
filename = models.CharField(max_length=200)
@classmethod
def log(cls, exchange_type, user, filename=u''):
ex_log = Exchange(exchange_type=exchange_type, user=user, filename=filename)
ex_log.save()
|
[
"artem.merkulov@gmail.com"
] |
artem.merkulov@gmail.com
|
9e883ec19a4d9b1f4eb852f53c30ab1e8894bf01
|
83303b3d43e4f2090e42f337f4aeb535dd1fbc7a
|
/a_power_N.py
|
af0ebf0c6a6a2d97ca502367ee0ed5e3b33018b0
|
[] |
no_license
|
chen2319/python_knight_dialer
|
00630bce7027db7d4ed77924b2895185d9a9f5d0
|
71dc96d4abb5224cc73f41fa49db18e6af88256e
|
refs/heads/master
| 2020-04-02T20:14:14.760950
| 2018-11-20T03:55:10
| 2018-11-20T03:55:10
| 154,762,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,519
|
py
|
# 算法问题描述: 计算a的N次方, N为整数
import time
# 算法1:直接暴力计算,不解释
# O(n) (3的400000次方,6秒左右 (Mac Pro))
def a_power_n_1(a, n):
res = 1
i = 0
while i < n:
res = res * a
i = i + 1
return res
# 算法2:分治法,考虑N是奇数还是偶数。偶数 f(n) = f(n/2)*f(n/2), 奇数 f(n) = f(n-1/2)*f(n-1/2)
# O(logN) (3的400000次方,0.3秒左右 (Mac Pro))
def a_power_n_2(a, n):
if n == 0:
return 1
if n == 1:
return a
if n%2 == 0:
return a_power_n_2(a,n/2) * a_power_n_2(a,n/2)
else:
return a_power_n_2(a,(n-1)/2) * a_power_n_2(a,(n-1)/2) * a
# 算法3:a的二进制,遇1相乘。比如 3^9 = 3^1*3^8 (9 = 1001)
# O(logN) (3的400000次方,0.07秒左右 (Mac Pro))
def a_power_n_3(a, n):
res = 1
square = a # a 的1次方
while n != 0:
if n&1 == 1:
res = square * res
square = square * square
n = n >> 1
return res
if __name__ == '__main__':
start1 = time.clock()
res1 = a_power_n_1(3, 400000)
end1 = time.clock()
print('1: Running time: %s Seconds %d' % (end1-start1, res1))
start2 = time.clock()
res2 = a_power_n_2(3, 400000)
end2 = time.clock()
print('2: Running time: %s Seconds %d' % (end2-start2, res2))
start3 = time.clock()
res3 = a_power_n_3(3, 400000)
end3 = time.clock()
print('2: Running time: %s Seconds %d' % (end3-start3, res3))
|
[
"yangch3@cisco.com"
] |
yangch3@cisco.com
|
5f417589e0415b83f561b35af56742aa54ca7b5a
|
aa6e885d78fd68f9132bfe373eb58c42117cb11e
|
/MitsuiSumitomo/A.py
|
e0648ad9006215dfc0a3e9d24e0c4eb3301c86eb
|
[] |
no_license
|
Arcprm4/HelloGit
|
a7beb3ab6a3d0b82e7aa23147e86ce2c0eadb5bd
|
84ec5c8ebee2e9d7fe676445775e566bae694e1c
|
refs/heads/master
| 2020-11-30T06:27:05.243440
| 2020-02-01T23:10:42
| 2020-02-01T23:10:42
| 230,331,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
import math
import collections
import itertools
import sys
import bisect
from heapq import heappop,heappush,heapify
sys.setrecursionlimit(10**6)
def MAP(): return list(map(int,input().split()))
def INT(): return int(input())
def FLOAT(): return float(input())
MOD = 10**9+7
m1,d1 = MAP()
m2,d2 = MAP()
if m1!=m2:
print(1)
else:
print(0)
|
[
"te2dshin@gmail.com"
] |
te2dshin@gmail.com
|
e7cba753f66de4c18380a281e36285c32cab048c
|
35235f241d8626db8dc9a8acb47942bfc6130782
|
/main/middlewares.py
|
f2ac9fb94c8d06986d46cb17b799d0f408c593b9
|
[] |
no_license
|
shbviews/RESTOCK-MONITOR
|
b771bddfe9c7d0240296ecb1272f35ccbe1b3db6
|
d02fbdef60e8db0f7cdd8317d2322523675db4a3
|
refs/heads/master
| 2021-04-24T20:54:17.780167
| 2017-11-03T06:54:02
| 2017-11-03T06:54:02
| 117,135,194
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,094
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Sneaker Notify
# author - Yu Lin
# https://github.com/yulin12345
# admin@yulin12345.site
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from colorama import Fore, Style
from scrapy import signals
class CrawlerSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info(Fore.RED + 'Spider opened: %s' % spider.name + Style.RESET_ALL)
|
[
"admin@yulin12345.site"
] |
admin@yulin12345.site
|
e6fc96486cceb79968c2bdc05c4bca8b68bd2196
|
d5e7d991dc853612e74869d81059ab5f162d7f59
|
/gen2-human-pose/main.py
|
638c123245a6b3888ac020ed31d8532b2fe04219
|
[
"MIT"
] |
permissive
|
AMB-technology-playground/depthai-experiments
|
b0ca1d3e7f531db7d51b4edac95d8d5aa5fd5fa3
|
b3f72a2a1dfc27f3c55d987482141db6815c0f0c
|
refs/heads/master
| 2023-09-05T23:00:21.804683
| 2021-10-15T09:42:17
| 2021-10-15T09:42:17
| 415,680,320
| 0
| 0
|
MIT
| 2021-10-10T19:25:12
| 2021-10-10T19:25:11
| null |
UTF-8
|
Python
| false
| false
| 6,923
|
py
|
import argparse
import threading
from pathlib import Path
from depthai_sdk.managers import PipelineManager, NNetManager, BlobManager, PreviewManager
from depthai_sdk import FPSHandler, Previews, getDeviceInfo, downloadYTVideo
from pose import getKeypoints, getValidPairs, getPersonwiseKeypoints
import cv2
import depthai as dai
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('-nd', '--no-debug', action="store_true", help="Prevent debug output")
parser.add_argument('-cam', '--camera', action="store_true", help="Use DepthAI 4K RGB camera for inference (conflicts with -vid)")
parser.add_argument('-vid', '--video', type=str, help="Path to video file to be used for inference (conflicts with -cam)")
args = parser.parse_args()
if not args.camera and not args.video:
raise RuntimeError("No source selected. Please use either \"-cam\" to use RGB camera as a source or \"-vid <path>\" to run on video")
debug = not args.no_debug
device_info = getDeviceInfo()
if args.camera:
blob_path = "models/human-pose-estimation-0001_openvino_2021.2_6shave.blob"
else:
blob_path = "models/human-pose-estimation-0001_openvino_2021.2_8shave.blob"
if str(args.video).startswith('https'):
args.video = downloadYTVideo(str(args.video))
print("Youtube video downloaded.")
if not Path(args.video).exists():
raise ValueError("Path {} does not exists!".format(args.video))
colors = [[0, 100, 255], [0, 100, 255], [0, 255, 255], [0, 100, 255], [0, 255, 255], [0, 100, 255], [0, 255, 0],
[255, 200, 100], [255, 0, 255], [0, 255, 0], [255, 200, 100], [255, 0, 255], [0, 0, 255], [255, 0, 0],
[200, 200, 0], [255, 0, 0], [200, 200, 0], [0, 0, 0]]
POSE_PAIRS = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13],
[1, 0], [0, 14], [14, 16], [0, 15], [15, 17], [2, 17], [5, 16]]
running = True
pose = None
keypoints_list = None
detected_keypoints = None
personwiseKeypoints = None
nm = NNetManager(inputSize=(456, 256))
pm = PipelineManager()
pm.setNnManager(nm)
if args.camera:
fps = FPSHandler()
pm.createColorCam(previewSize=(456, 256), xout=True)
else:
cap = cv2.VideoCapture(str(Path(args.video).resolve().absolute()))
fps = FPSHandler(cap)
nn = nm.createNN(pm.pipeline, pm.nodes, source=Previews.color.name if args.camera else "host", blobPath=Path(blob_path), fullFov=True)
pm.addNn(nn=nn)
def decode_thread(in_queue):
global keypoints_list, detected_keypoints, personwiseKeypoints
while running:
try:
raw_in = in_queue.get()
except RuntimeError:
return
fps.tick('nn')
heatmaps = np.array(raw_in.getLayerFp16('Mconv7_stage2_L2')).reshape((1, 19, 32, 57))
pafs = np.array(raw_in.getLayerFp16('Mconv7_stage2_L1')).reshape((1, 38, 32, 57))
heatmaps = heatmaps.astype('float32')
pafs = pafs.astype('float32')
outputs = np.concatenate((heatmaps, pafs), axis=1)
new_keypoints = []
new_keypoints_list = np.zeros((0, 3))
keypoint_id = 0
for row in range(18):
probMap = outputs[0, row, :, :]
probMap = cv2.resize(probMap, nm.inputSize) # (456, 256)
keypoints = getKeypoints(probMap, 0.3)
new_keypoints_list = np.vstack([new_keypoints_list, *keypoints])
keypoints_with_id = []
for i in range(len(keypoints)):
keypoints_with_id.append(keypoints[i] + (keypoint_id,))
keypoint_id += 1
new_keypoints.append(keypoints_with_id)
valid_pairs, invalid_pairs = getValidPairs(outputs, w=nm.inputSize[0], h=nm.inputSize[1], detected_keypoints=new_keypoints)
newPersonwiseKeypoints = getPersonwiseKeypoints(valid_pairs, invalid_pairs, new_keypoints_list)
detected_keypoints, keypoints_list, personwiseKeypoints = (new_keypoints, new_keypoints_list, newPersonwiseKeypoints)
def show(frame):
global keypoints_list, detected_keypoints, personwiseKeypoints, nm
if keypoints_list is not None and detected_keypoints is not None and personwiseKeypoints is not None:
scale_factor = frame.shape[0] / nm.inputSize[1]
offset_w = int(frame.shape[1] - nm.inputSize[0] * scale_factor) // 2
def scale(point):
return int(point[0] * scale_factor) + offset_w, int(point[1] * scale_factor)
for i in range(18):
for j in range(len(detected_keypoints[i])):
cv2.circle(frame, scale(detected_keypoints[i][j][0:2]), 5, colors[i], -1, cv2.LINE_AA)
for i in range(17):
for n in range(len(personwiseKeypoints)):
index = personwiseKeypoints[n][np.array(POSE_PAIRS[i])]
if -1 in index:
continue
B = np.int32(keypoints_list[index.astype(int), 0])
A = np.int32(keypoints_list[index.astype(int), 1])
cv2.line(frame, scale((B[0], A[0])), scale((B[1], A[1])), colors[i], 3, cv2.LINE_AA)
print("Starting pipeline...")
with dai.Device(pm.pipeline, device_info) as device:
if args.camera:
pv = PreviewManager(display=[Previews.color.name], nnSource=Previews.color.name, scale={"color": 0.37}, fpsHandler=fps)
pv.createQueues(device)
nm.createQueues(device)
seq_num = 1
t = threading.Thread(target=decode_thread, args=(nm.outputQueue, ))
t.start()
def should_run():
return cap.isOpened() if args.video else True
try:
while should_run():
fps.nextIter()
if args.camera:
pv.prepareFrames()
frame = pv.get(Previews.color.name)
if debug:
show(frame)
cv2.putText(frame, f"RGB FPS: {round(fps.tickFps(Previews.color.name), 1)}", (5, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
cv2.putText(frame, f"NN FPS: {round(fps.tickFps('nn'), 1)}", (5, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
pv.showFrames()
if not args.camera:
read_correctly, frame = cap.read()
if not read_correctly:
break
nm.sendInputFrame(frame)
fps.tick('host')
if debug:
show(frame)
cv2.putText(frame, f"RGB FPS: {round(fps.tickFps('host'), 1)}", (5, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
cv2.putText(frame, f"NN FPS: {round(fps.tickFps('nn'), 1)}", (5, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
cv2.imshow("rgb", frame)
key = cv2.waitKey(1)
if key == ord('q'):
break
except KeyboardInterrupt:
pass
running = False
t.join()
fps.printStatus()
if not args.camera:
cap.release()
|
[
"noreply@github.com"
] |
AMB-technology-playground.noreply@github.com
|
3386aa1a676bffd3f77461d5c78939e3d8d3b997
|
74b97e20b06a58ada94278f82ce511403fcddf21
|
/test/scenarios/kusto/output/ext_default_folder/src/kusto/azext_kusto/generated/action.py
|
cd132ab68ad34d41cac5d438142449e4f78d287f
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/autorest.az
|
b171eb495efdb815dc051dface3800c3e5e35b8e
|
64f403a5fe74be28e46a90b6b77f8d2bc9a12baf
|
refs/heads/master
| 2023-09-01T13:22:21.784354
| 2022-11-01T02:34:12
| 2022-11-01T02:34:12
| 226,059,721
| 24
| 17
|
MIT
| 2023-02-08T00:46:07
| 2019-12-05T09:04:00
|
Python
|
UTF-8
|
Python
| false
| false
| 10,230
|
py
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=protected-access
# pylint: disable=no-self-use
import argparse
from collections import defaultdict
from knack.util import CLIError
class AddTrustedExternalTenants(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddTrustedExternalTenants, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'value':
d['value'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter trusted-external-tenants. All possible keys are:'
' value'.format(k)
)
return d
class AddOptimizedAutoscale(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.optimized_autoscale = action
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'version':
d['version'] = v[0]
elif kl == 'is-enabled':
d['is_enabled'] = v[0]
elif kl == 'minimum':
d['minimum'] = v[0]
elif kl == 'maximum':
d['maximum'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter optimized-autoscale. All possible keys are: version,'
' is-enabled, minimum, maximum'.format(k)
)
return d
class AddVirtualNetworkConfiguration(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.virtual_network_configuration = action
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'subnet-id':
d['subnet_id'] = v[0]
elif kl == 'engine-public-ip-id':
d['engine_public_ip_id'] = v[0]
elif kl == 'data-management-public-ip-id':
d['data_management_public_ip_id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter virtual-network-configuration. All possible keys are:'
' subnet-id, engine-public-ip-id, data-management-public-ip-id'.format(k)
)
return d
class AddKeyVaultProperties(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.key_vault_properties = action
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'key-name':
d['key_name'] = v[0]
elif kl == 'key-version':
d['key_version'] = v[0]
elif kl == 'key-vault-uri':
d['key_vault_uri'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter key-vault-properties. All possible keys are:'
' key-name, key-version, key-vault-uri'.format(k)
)
return d
class AddClustersValue(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddClustersValue, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'language-extension-name':
d['language_extension_name'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter value. All possible keys are: language-extension-name'
.format(k)
)
return d
class AddReadWriteDatabase(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.read_write_database = action
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'soft-delete-period':
d['soft_delete_period'] = v[0]
elif kl == 'hot-cache-period':
d['hot_cache_period'] = v[0]
elif kl == 'location':
d['location'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter read-write-database. All possible keys are:'
' soft-delete-period, hot-cache-period, location'.format(k)
)
d['kind'] = 'ReadWrite'
return d
class AddReadOnlyFollowingDatabase(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.read_only_following_database = action
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'hot-cache-period':
d['hot_cache_period'] = v[0]
elif kl == 'location':
d['location'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter read-only-following-database. All possible keys are:'
' hot-cache-period, location'.format(k)
)
d['kind'] = 'ReadOnlyFollowing'
return d
class AddDatabasesValue(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddDatabasesValue, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'role':
d['role'] = v[0]
elif kl == 'name':
d['name'] = v[0]
elif kl == 'type':
d['type'] = v[0]
elif kl == 'fqn':
d['fqn'] = v[0]
elif kl == 'email':
d['email'] = v[0]
elif kl == 'app-id':
d['app_id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter value. All possible keys are: role, name, type, fqn,'
' email, app-id'.format(k)
)
return d
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
85de6259bd21760c8e6bdb9417861277e6c5a064
|
bdf7bc061652e3683d0fc9d021ff30357bf2aeb5
|
/spinoffs/oryx/oryx/core/interpreters/inverse/core.py
|
b624cc2f4853479ef46f16d61e7c1851cc3389f8
|
[
"Apache-2.0"
] |
permissive
|
danbaker76/probability
|
1843e8f81d93579ceca25080ff7ac5ce3024482d
|
64203e2668703220168abeae82c3ac1e9b50424c
|
refs/heads/master
| 2022-11-29T06:29:17.878450
| 2020-08-05T05:23:10
| 2020-08-05T05:24:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,316
|
py
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Core logic for the inverse transformation."""
from typing import Iterable
import jax
from jax import abstract_arrays
from jax import core as jax_core
from jax import linear_util as lu
from jax import tree_util
from jax import util as jax_util
from jax.interpreters import partial_eval as pe
from jax.interpreters import pxla
from jax.interpreters import xla
import jax.numpy as np
from oryx.core import primitive
from oryx.core import trace_util
from oryx.core.interpreters import harvest
from oryx.core.interpreters import propagate
from oryx.core.interpreters.inverse import slice as slc
safe_map = jax_core.safe_map
safe_zip = jax_core.safe_zip
Cell = propagate.Cell
NDSlice = slc.NDSlice
Slice = slc.Slice
class InverseAndILDJ(Cell):
"""Propagates inverse value slices and their ILDJs.
An InverseAndILDJ instance keeps track of a set of slices of a value. In the
simplest case, the slice's indices capture the entire value, in which case the
cell is "top". Partial information is represented with slices that do not
capture the entire value. No information, i.e. "bottom', is represented with a
cell that has no slices.
Joining two cells creates set of slices, and if we detect that the slices can
be concatenated, we combine them into a single slice. As propagation
progresses, we hope to accumulate enough slices to concatenate them all into
this cell's `val`. ILDJs are also kept track of in the same way, except we
keep track of the diagonal of the Jacobian since split operations may also
split up the Jacobian.
"""
def __init__(self,
aval: jax_core.AbstractValue,
slices: Iterable[NDSlice]):
super().__init__(aval)
self.slices = frozenset(slices)
def top(self) -> bool:
"""Returns if this cell represents the top of the slice lattice.
An InverseAndILDJ is at the top if its slice represents the entire array.
"""
if len(self.slices) != 1:
return False
if self.aval == jax_core.abstract_unit:
return True
return list(self.slices)[0].value.shape == self.aval.shape
def bottom(self) -> bool:
"""Returns if this cell represents the bottom of the slice lattice.
An InverseAndILDJ is at the bottom if we have no slices.
"""
return len(self.slices) == 0 # pylint: disable=g-explicit-length-test
def __lt__(self, other: 'InverseAndILDJ') -> bool:
if self.top() or other.bottom():
return False
return all(any(s1 < s2 for s2 in other.slices) for s1 in self.slices)
def __eq__(self, other: 'InverseAndILDJ') -> bool:
if self.aval != other.aval:
return False
return self.slices == other.slices
def join(self, other: 'InverseAndILDJ') -> 'InverseAndILDJ':
if other.top():
return other
if other.bottom():
return self
if self == other:
return self
if other < self:
return self
if self < other:
return other
all_slices = sorted(self.slices | other.slices,
key=lambda slc: tuple(s.start for s in slc.slices))
new_slices = set()
active = all_slices.pop(0)
while all_slices:
for dim in range(len(self.aval.shape)):
if active.can_concatenate(all_slices[0], dim):
active = active.concatenate(all_slices.pop(0), dim)
break
else:
new_slices.add(active)
active = all_slices.pop(0)
new_slices.add(active)
return InverseAndILDJ(self.aval, new_slices)
@property
def val(self):
if not self.top():
raise AssertionError('Cannot get value from non-top lattice value: ',
f'{self.aval}, {self.slices}')
return list(self.slices)[0].value
@property
def ildj(self):
if not self.top():
raise AssertionError('Cannot get ildj from non-top lattice value: ',
f'{self.aval}, {self.slices}')
return list(self.slices)[0].ildj
@classmethod
def unknown(cls, aval):
return InverseAndILDJ(aval, [])
@classmethod
def new(cls, val):
val = np.array(val)
aval = jax_core.get_aval(val)
if aval is jax_core.abstract_unit:
return InverseAndILDJ.unknown(aval)
aval = abstract_arrays.raise_to_shaped(aval)
ndslice = NDSlice.new(val, np.zeros_like(val))
return InverseAndILDJ(aval, frozenset([ndslice]))
def flatten(self):
slices = list(sorted(self.slices))
return slices, (self.aval,)
@classmethod
def unflatten(cls, data, slices):
return InverseAndILDJ(data[0], frozenset(slices))
def inverse_and_ildj(f, *trace_args):
"""Inverse and ILDJ function transformation."""
def wrapped(*args, **kwargs):
"""Function wrapper that takes in inverse arguments."""
forward_args = trace_args if len(trace_args) else args
jaxpr, (in_tree, _) = trace_util.stage(f)(*forward_args, **kwargs)
flat_forward_args, _ = tree_util.tree_flatten(forward_args)
flat_args, _ = tree_util.tree_flatten(args)
flat_constcells = safe_map(InverseAndILDJ.new, jaxpr.literals)
flat_forward_avals = [
trace_util.get_shaped_aval(arg)
for arg in flat_forward_args]
flat_incells = [InverseAndILDJ.unknown(aval) for aval in flat_forward_avals]
flat_outcells = safe_map(InverseAndILDJ.new, flat_args)
env = propagate.propagate(InverseAndILDJ, ildj_registry, jaxpr.jaxpr,
flat_constcells, flat_incells, flat_outcells)
flat_incells = [env.read(invar) for invar in jaxpr.jaxpr.invars]
if any(flat_incell.is_unknown() for flat_incell in flat_incells):
raise ValueError('Cannot invert function.')
flat_cells, flat_ildjs = jax_util.unzip2([
(flat_incell.val, flat_incell.ildj) for flat_incell in flat_incells
])
vals = tree_util.tree_unflatten(in_tree, flat_cells)
ildjs = tree_util.tree_unflatten(in_tree, flat_ildjs)
ildj_ = sum(np.sum(i) for i in ildjs)
if len(forward_args) == 1:
vals = vals[0]
return vals, ildj_
return wrapped
def inverse(f, *trace_args):
def wrapped(*args, **kwargs):
return inverse_and_ildj(f, *trace_args)(*args, **kwargs)[0]
return wrapped
def ildj(f, *trace_args):
def wrapped(*args, **kwargs):
return inverse_and_ildj(f, *trace_args)(*args, **kwargs)[1]
return wrapped
def default_rule(prim, invals, outvals, **params):
"""Default inversion rule that only does forward eval."""
if all(outval.bottom() for outval in outvals):
if all(inval.top() for inval in invals):
vals = [inval.val for inval in invals]
ans = prim.bind(*vals, **params)
if not prim.multiple_results:
ans = [ans]
# Propagate can only invert functions that are constructed
# autoregressively, and therefore the Jacobians of propagate-invertible
# functions are lower-triangular. We are therefore safe assign outvals an
# ILDJ value of 0 as they are part of forward propagation that will fill
# in an off-diagonal entry of the Jacobian and will not contribute to the
# log-det Jacobian.
outvals = safe_map(InverseAndILDJ.new, ans)
return invals, outvals, None
if any(outval.bottom() for outval in outvals):
return invals, outvals, None
raise NotImplementedError(f'No registered inverse for `{prim}`.')
class InverseDict(object):
"""Default rules dictionary that uses a default rule for inverse."""
def __init__(self):
self.rules = {}
def __getitem__(self, prim):
if prim not in self.rules:
self[prim] = jax_util.partial(default_rule, prim)
return self.rules[prim]
def __setitem__(self, prim, val):
self.rules[prim] = val
def register_elementwise(prim):
"""Registers an elementwise primitive with ILDJ."""
def make_rule(f):
"""Accepts an inverse function for a primitive."""
def ildj_rule(incells, outcells, **params):
"""General InverseAndILDJ rule for elementwise functions."""
outcell, = outcells
incell, = incells
if incell.is_unknown() and not outcell.is_unknown():
val = outcell.val
f_sum = lambda x: f(x).sum()
ildj_ = outcell.ildj + np.log(jax.grad(f_sum)(val))
ndslice = NDSlice.new(f(val), ildj_)
incells = [InverseAndILDJ(outcell.aval, [ndslice])]
elif outcell.is_unknown() and not incell.is_unknown():
outcells = [InverseAndILDJ.new(prim.bind(incell.val, **params))]
return incells, outcells, None
ildj_registry[prim] = ildj_rule
return make_rule
def register_binary(prim):
"""Registers an binary primitive with ILDJ."""
def make_rule(f_left, f_right):
def ildj_rule(incells, outcells, **params):
outcell, = outcells
left, right = incells
if not outcell.bottom():
val, ildj_ = outcell.val, outcell.ildj
if not left.bottom():
right_val, right_ildj = f_left(left.val, val, ildj_)
ndslice = NDSlice.new(right_val, right_ildj)
incells = [left, InverseAndILDJ(right.aval, [ndslice])]
elif not right.bottom():
left_val, left_ildj = f_right(right.val, val, ildj_)
ndslice = NDSlice.new(left_val, left_ildj)
incells = [InverseAndILDJ(left.aval, [ndslice]), right]
elif (outcell.bottom() and not left.bottom() and
not right.bottom()):
out_val = prim.bind(left.val, right.val, **params)
outcells = [InverseAndILDJ.new(out_val)]
return incells, outcells, None
ildj_registry[prim] = ildj_rule
return make_rule
ildj_registry = InverseDict()
@lu.transformation_with_aux
def flat_propagate(tree, *flat_invals):
invals, outvals = tree_util.tree_unflatten(tree, flat_invals)
subenv = yield ((invals, outvals), {})
subenv_vals, subenv_tree = tree_util.tree_flatten(subenv)
yield subenv_vals, subenv_tree
def call_ildj(prim, incells, outcells, **params):
"""InverseAndILDJ rule for call primitives."""
f, incells = incells[0], incells[1:]
flat_vals, in_tree = tree_util.tree_flatten((incells, outcells))
new_params = dict(params)
if 'donated_invars' in params:
new_params['donated_invars'] = (False,) * len(flat_vals)
f, aux = flat_propagate(f, in_tree)
subenv_vals = prim.bind(f, *flat_vals, **new_params)
subenv_tree = aux()
subenv = tree_util.tree_unflatten(subenv_tree, subenv_vals)
new_incells = [subenv.read(var) for var in subenv.jaxpr.invars]
new_outcells = [subenv.read(var) for var in subenv.jaxpr.outvars]
return new_incells, new_outcells, subenv
ildj_registry[xla.xla_call_p] = jax_util.partial(call_ildj, xla.xla_call_p)
ildj_registry[jax_core.call_p] = jax_util.partial(call_ildj, jax_core.call_p)
ildj_registry[pe.remat_call_p] = jax_util.partial(call_ildj, pe.remat_call_p)
ildj_registry[harvest.nest_p] = jax_util.partial(call_ildj, harvest.nest_p)
def hop_inverse_rule(prim):
ildj_registry[prim] = jax_util.partial(call_ildj, prim)
primitive.register_hop_transformation_rule('inverse', hop_inverse_rule)
def map_ildj(prim, incells, outcells, **params):
"""InverseAndILDJ rule for the map primitives."""
f, incells = incells[0], incells[1:]
def slice_aval(aval):
return abstract_arrays.ShapedArray(aval.shape[1:], aval.dtype,
aval.weak_type)
def add_slice(cell, old_cell):
new_slices = [
NDSlice(ndslice.value, ndslice.ildj, Slice(0, old_cell.aval.shape[0]),
*ndslice.slices) for ndslice in cell.slices
]
return InverseAndILDJ(old_cell.aval, new_slices)
def remove_slice(cell):
new_slices = [
NDSlice(ndslice.value, ndslice.ildj, *ndslice.slices[1:])
for ndslice in cell.slices
]
aval = slice_aval(cell.aval)
return InverseAndILDJ(aval, new_slices)
mapped_incells = safe_map(remove_slice, incells)
mapped_outcells = safe_map(remove_slice, outcells)
flat_vals, in_tree = tree_util.tree_flatten((mapped_incells, mapped_outcells))
f, aux = flat_propagate(f, in_tree)
# Assume all invars as mapped
new_mapped_invars = (True,) * len(flat_vals)
new_params = dict(params, mapped_invars=new_mapped_invars)
subenv_vals = prim.bind(f, *flat_vals, **new_params)
subenv_tree = aux()
subenv = tree_util.tree_unflatten(subenv_tree, subenv_vals)
new_incells = [subenv.read(var) for var in subenv.jaxpr.invars]
new_outcells = [subenv.read(var) for var in subenv.jaxpr.outvars]
new_incells = [add_slice(v, old_v)
for old_v, v in safe_zip(incells, new_incells)]
new_outcells = [add_slice(v, old_v)
for old_v, v in safe_zip(outcells, new_outcells)]
return new_incells, new_outcells, subenv
ildj_registry[pxla.xla_pmap_p] = jax_util.partial(map_ildj, pxla.xla_pmap_p)
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
fe7aaf443019bb4a2a55ba525bbd0e0e5ae91a3f
|
3965ebf2125cfe1ee375f9ed6c4be393e780f482
|
/process/CubeConstants.py
|
61b613bc8bcf6a5c97876680c977a4dee4cf1ced
|
[] |
no_license
|
treephones/RubikMosaic
|
37a909b0e50685635d8d6800e735b994c99adf8b
|
989830b97ed4dd77cb9765b789a084f763914675
|
refs/heads/master
| 2023-07-05T16:48:18.270905
| 2021-08-14T21:15:50
| 2021-08-14T21:15:50
| 373,982,965
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
VERTICES = (
(1, -1, -1),
(1, 1, -1),
(-1, 1, -1),
(-1, -1, -1),
(1, -1, 1),
(1, 1, 1),
(-1, -1, 1),
(-1, 1, 1),
)
EDGES = (
(0,1),
(0,3),
(0,4),
(2,1),
(2,3),
(2,7),
(6,3),
(6,4),
(6,7),
(5,1),
(5,4),
(5,7)
)
FACES = (
(0,1,2,3), #RED
(3,2,7,6), #YELLOW
(6,7,5,4), #ORANGE
(4,5,1,0), #WHITE
(1,5,7,2), #BLUE
(4,0,3,6) #GREEN
)
|
[
"moezbajwa@hotmail.com"
] |
moezbajwa@hotmail.com
|
f6710cab1e177e9f9db7f1ca51161835ab62137c
|
bf4b911f97aa4796508bddc1331d0ac8c0ae9dd5
|
/quickreport/gui_utils.py
|
59c7ebd0010bb66861efc54d6b83c159bc09d8c2
|
[
"ISC"
] |
permissive
|
ricpol/quickreport
|
d8b42944c2c3d25c552fbb8d105d8b7fb838718c
|
d59c860f128a723b4b024242bb0bc49c62f8a688
|
refs/heads/master
| 2021-03-13T00:01:57.530891
| 2012-12-04T14:23:34
| 2012-12-04T14:23:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 767
|
py
|
# -*- coding: utf8 -*-
"""Quickreport
Utilita' per la gui di Quickreport.
==========================================
:version: vedi quickreport.__version.py__
:copyright: Riccardo Polignieri 2012
:license: ISC
"""
import wx
import wx.lib.newevent
def ask_path(parent_window):
dlg = wx.FileDialog(parent_window, message='Save report', style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
return dlg.GetPath()
else:
return None
ParamChangedEvt, EVT_PARAM_CHANGED = wx.lib.newevent.NewCommandEvent()
def post_evt_param_changed(event):
widget = event.GetEventObject()
e = ParamChangedEvt(widget.GetId(), param_name=widget.GetName())
wx.PostEvent(widget, e)
# event.Skip() # TODO e' il caso? in realta' non mi serve mai...
|
[
"ric.pol@libero.it"
] |
ric.pol@libero.it
|
a58977ab0eab755a78944597ba8efe93299a9cc7
|
cb47dc98d6789b17398ec12b9c27905a9ab2ff1f
|
/scanner.py
|
77d6df1fc028a71f4246f380187f1b3ee3a28358
|
[] |
no_license
|
nebstarmalala/network_scanner
|
5b0ca1b517d3bab6d0ec097e38b9387e2e6e8dd8
|
cac91d397e0182ec427bec811fd1d1651050e740
|
refs/heads/master
| 2022-12-18T16:10:47.778524
| 2020-08-30T18:57:28
| 2020-08-30T18:57:28
| 291,532,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,168
|
py
|
#!/usr/bin/env python
import subprocess
def bash(command):
return subprocess.check_output(['bash', '-c', command])
def nmap_scan(ip):
print("Scanning TCP ports on " + ip)
print("------------------------------------\n")
res = bash('nmap -T4 -p1-65535 %s | grep"open"' % ip).splitlines()
ports = []
for port in res:
print(port)
ports.append(port.split("/")[0])
port_list = ",".join(ports)
print("Running Intense scan on open ports...\n")
bash("nmap -T4 -A -sV -p%s -oN output.txt %s" % (port_list, ip))
print("Nmap Intense scan result logged in output.txt")
exit()
ip_string = bash('ifconfig wlan0 | grep "inet "')
ip = ip_string.strip().split(" ")[1]
octets = ".".join(ip.split(".")[:-1])
subnet = octets + ".0/24"
print("\nRunning netdiscover on local subnet: %s " % subnet)
print("----------------------------------------------------\n")
ips = bash('netdiscover -P -r %s | grep "1" | cut -d " " -f2' % subnet).splitlines()
for i in range(0, len(ips)):
ip = ips[i]
print("%s. %s" %(i + 1, ip))
choice = input("Enter an option 1 - %s or 0 to exit: " % len(ips))
nmap_scan(ips[choice - 1])
|
[
"nebstartrizzle@gmail.com"
] |
nebstartrizzle@gmail.com
|
da01e6382b6d4724c849c3608ea60a9335ad79bf
|
49d904eda93c11c4563d6d9b257246d5907b562c
|
/food_platform/migrations/0001_initial.py
|
9a0735fdee6e77b18b1d2428ed5c8f1278c79a48
|
[] |
no_license
|
abrusebas1997/realreal
|
b182586e31051b5a1e8e8f8e7010c98f0b4033d1
|
3f3771e44cd0c6e7150bc986e2b38eff404bff11
|
refs/heads/master
| 2022-11-18T04:49:53.218561
| 2020-07-20T07:50:40
| 2020-07-20T07:50:40
| 278,265,723
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,804
|
py
|
# Generated by Django 2.2.7 on 2020-07-20 06:56
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('is_foodriver', models.BooleanField(default=False)),
('is_foodonator', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Interested_area',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('color', models.CharField(default='#007bff', max_length=7)),
],
),
migrations.CreateModel(
name='Pickup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('interested_area', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pickups', to='food_platform.Interested_area')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pickups', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Foodriver',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('area', models.ManyToManyField(related_name='interested_foodrivers', to='food_platform.Interested_area')),
],
),
migrations.CreateModel(
name='PickupTime',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=255, verbose_name='PickupTime')),
('pickup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pickup_times', to='food_platform.Pickup')),
],
),
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=255, verbose_name='Answer')),
('is_correct', models.BooleanField(default=False, verbose_name='Correct answer')),
('pickup_time', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='answers', to='food_platform.PickupTime')),
],
),
migrations.CreateModel(
name='TakenPickup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('score', models.FloatField()),
('date', models.DateTimeField(auto_now_add=True)),
('pickup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='taken_pickups', to='food_platform.Pickup')),
('foodriver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='taken_pickups', to='food_platform.Foodriver')),
],
),
migrations.CreateModel(
name='FoodriverAnswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='food_platform.Answer')),
('foodriver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pickup_answers', to='food_platform.Foodriver')),
],
),
migrations.AddField(
model_name='foodriver',
name='pickups',
field=models.ManyToManyField(through='food_platform.TakenPickup', to='food_platform.Pickup'),
),
]
|
[
"sebastianabarca@Sebastians-MacBook-Pro.local"
] |
sebastianabarca@Sebastians-MacBook-Pro.local
|
f0843180bc9b65d61f04683f3da57a11775d3631
|
f0b8a36f343ff94b04a8dda3cf1629f9faf11c83
|
/Simple Text classifiers/20Newsgroup dataset based basic DNN Classifiers/20ng_classifier - RNN.py
|
53b9868ad73dbbab96cf2cd30f4990068370fa41
|
[
"MIT"
] |
permissive
|
tejasurya/Text_Classification_using_Neural_Networks
|
0731c82c867b59fb4682a1482b104f1b0f61215c
|
d4852780e6c86843aee768d306d19428c8cb9c7f
|
refs/heads/master
| 2022-04-25T11:01:54.775855
| 2020-04-28T10:09:06
| 2020-04-28T10:09:06
| 259,592,758
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,693
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 6 15:55:01 2018
@author: HP
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 5 14:31:43 2018
@author: HP
"""
import os
import pandas as pd
import nltk
import gensim
from gensim import corpora, models, similarities
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from gensim.models.keyedvectors import KeyedVectors as KV
from numpy import asarray
from numpy import zeros
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten, LSTM ,Dropout,GRU, Bidirectional, SimpleRNN
from keras.layers import Embedding
from collections import defaultdict
from keras.layers import Conv1D, MaxPooling1D
import random
from sklearn.datasets import fetch_20newsgroups
batch_size=32
embedding_size=128
nclass=20
# Convolution
kernel_size = 5
filters1 = 64
filters2 =128
filters3=256
filters4=512
filters5=1024
pool_size = 4
# GRU
gru_output_size = 70
#LSTM
lstm_output_size = 70
trim_len=200
sample_cnt=500
trainer = fetch_20newsgroups(subset='train')
tester = fetch_20newsgroups(subset='test')
#input - output
train_ip=trainer.data
train_op=list(trainer.target)
test_ip=tester.data
test_op=list(tester.target)
ip=train_ip+test_ip
op=train_op+test_op
ip=ip[0:sample_cnt]
for ty in range(len(ip)):
ip[ty]=ip[ty][0:trim_len]
op=op[0:sample_cnt]
len_finder=[]
for dat in ip:
len_finder.append(len(dat))
#Splitting train and test
input_train=[]
input_test=[]
input_valid=[]
j=0;
for zz in ip:
j=j+1
if (j%5 is 0):
input_test.append(zz)
elif(j%5 is 1):
input_valid.append(zz)
else:
input_train.append(zz)
label_train=[]
label_test=[]
label_valid=[]
j=0;
for zz in op:
j=j+1
if (j%5 is 0):
label_test.append(zz)
elif(j%5 is 1):
label_valid.append(zz)
else:
label_train.append(zz)
#one hot encoding
i=0
y_train=np.zeros((len(label_train),max(label_train)+1))
for x in label_train:
y_train[i][x]=1
i=i+1
i=0
y_test=np.zeros((len(label_test),max(label_test)+1))
for x in label_test:
y_test[i][x]=1
i=i+1
i=0
y_valid=np.zeros((len(label_valid),max(label_valid)+1))
for x in label_valid:
y_valid[i][x]=1
i=i+1
t = Tokenizer()
t.fit_on_texts(input_train)
vocab_size = len(t.word_index) + 1
# integer encode the documents
encoded_docs = t.texts_to_sequences(input_train)
#print(encoded_docs)
# pad documents to a max length of 4 words
max_length = max(len_finder)
padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
#print(padded_docs)
# load the whole embedding into memory
embeddings_index = dict()
f = open("G:\\NLP\\Dataset\\GloVe\\glove.6B.100d.txt", encoding="utf8")
for line in f:
values = line.split()
word = values[0]
coefs = asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
#print('Loaded %s word vectors.' % len(embeddings_index))
# create a weight matrix for words in training docs
embedding_matrix = zeros((vocab_size, 100))
for word, i in t.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
#Validating the model
vt = Tokenizer()
vt.fit_on_texts(input_valid)
vvocab_size = len(vt.word_index) + 1
# integer encode the documents
vencoded_docs = vt.texts_to_sequences(input_valid)
#print(encoded_docs)
# pad documents to a max length of 4 words
vpadded_docs = pad_sequences(vencoded_docs, maxlen=max_length, padding='post')
#print(padded_docs)
#Testing the model
tt = Tokenizer()
tt.fit_on_texts(input_test)
tvocab_size = len(tt.word_index) + 1
# integer encode the documents
tencoded_docs = tt.texts_to_sequences(input_test)
#print(encoded_docs)
# pad documents to a max length of 4 words
tpadded_docs = pad_sequences(tencoded_docs, maxlen=max_length, padding='post')
#print(padded_docs)
# define model
model = Sequential()
e = Embedding(vocab_size, 100, weights=[embedding_matrix], input_length=max_length, trainable=False)
model.add(e)
model.add(SimpleRNN(lstm_output_size,dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(nclass, activation='softmax'))
# compile the model
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
# summarize the model
print(model.summary())
# fit the model
model.fit(padded_docs,y_train, epochs=1, verbose=0, validation_data=(vpadded_docs, y_valid))
# evaluate the model
loss, accuracy = model.evaluate(tpadded_docs, y_test, verbose=0)
print('Accuracy: %f' % (accuracy*100))
|
[
"hltejasurya@hotmail.com"
] |
hltejasurya@hotmail.com
|
cd2cb50e8b49ee90f5cbf9eeb526f2f1166169e7
|
ce083128fa87ca86c65059893aa8882d088461f5
|
/python/pytest-labs/.venv/lib/python3.6/site-packages/facebook_business/adobjects/adruleevaluationspec.py
|
4480f554fadb01249a0b941d34d8e43f3d4e8247
|
[] |
no_license
|
marcosptf/fedora
|
581a446e7f81d8ae9a260eafb92814bc486ee077
|
359db63ff1fa79696b7bc803bcfa0042bff8ab44
|
refs/heads/master
| 2023-04-06T14:53:40.378260
| 2023-03-26T00:47:52
| 2023-03-26T00:47:52
| 26,059,824
| 6
| 5
| null | 2022-12-08T00:43:21
| 2014-11-01T18:48:56
| null |
UTF-8
|
Python
| false
| false
| 2,133
|
py
|
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class AdRuleEvaluationSpec(
AbstractObject,
):
def __init__(self, api=None):
super(AdRuleEvaluationSpec, self).__init__()
self._isAdRuleEvaluationSpec = True
self._api = api
class Field(AbstractObject.Field):
evaluation_type = 'evaluation_type'
filters = 'filters'
trigger = 'trigger'
class EvaluationType:
schedule = 'SCHEDULE'
trigger = 'TRIGGER'
_field_types = {
'evaluation_type': 'EvaluationType',
'filters': 'list<AdRuleFilters>',
'trigger': 'AdRuleTrigger',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['EvaluationType'] = AdRuleEvaluationSpec.EvaluationType.__dict__.values()
return field_enum_info
|
[
"marcosptf@yahoo.com.br"
] |
marcosptf@yahoo.com.br
|
b5de1597546ebf95d936671f6c05e9fd990fff3f
|
e49edb54a747c33b95108872de2536cce9ee3ec1
|
/src/evaluate/non_rg_metrics.py
|
1216e61dd116063b993a0f848f2ea8345b284651
|
[] |
no_license
|
anusha66/TextGen-Deep-Learning
|
74714f4bd2f0728664863f1f5acc1b8cb5c68856
|
a1cfad2d40d3bd5067508696acffddb74b0d5421
|
refs/heads/master
| 2021-04-05T23:59:33.562388
| 2018-05-27T00:03:53
| 2018-05-27T00:03:53
| 124,617,587
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,331
|
py
|
import sys
from pyxdameraulevenshtein import normalized_damerau_levenshtein_distance
full_names = ['Atlanta Hawks', 'Boston Celtics', 'Brooklyn Nets', 'Charlotte Hornets',
'Chicago Bulls', 'Cleveland Cavaliers', 'Detroit Pistons', 'Indiana Pacers',
'Miami Heat', 'Milwaukee Bucks', 'New York Knicks', 'Orlando Magic',
'Philadelphia 76ers', 'Toronto Raptors', 'Washington Wizards', 'Dallas Mavericks',
'Denver Nuggets', 'Golden State Warriors', 'Houston Rockets', 'Los Angeles Clippers',
'Los Angeles Lakers', 'Memphis Grizzlies', 'Minnesota Timberwolves', 'New Orleans Pelicans',
'Oklahoma City Thunder', 'Phoenix Suns', 'Portland Trail Blazers', 'Sacramento Kings',
'San Antonio Spurs', 'Utah Jazz']
cities, teams = set(), set()
ec = {} # equivalence classes
for team in full_names:
pieces = team.split()
if len(pieces) == 2:
ec[team] = [pieces[0], pieces[1]]
cities.add(pieces[0])
teams.add(pieces[1])
elif pieces[0] == "Portland": # only 2-word team
ec[team] = [pieces[0], " ".join(pieces[1:])]
cities.add(pieces[0])
teams.add(" ".join(pieces[1:]))
else: # must be a 2-word City
ec[team] = [" ".join(pieces[:2]), pieces[2]]
cities.add(" ".join(pieces[:2]))
teams.add(pieces[2])
def same_ent(e1, e2):
if e1 in cities or e1 in teams:
return e1 == e2 or any((e1 in fullname and e2 in fullname for fullname in full_names))
else:
return e1 in e2 or e2 in e1
def trip_match(t1, t2):
return t1[1] == t2[1] and t1[2] == t2[2] and same_ent(t1[0], t2[0])
def dedup_triples(triplist):
"""
this will be inefficient but who cares
"""
dups = set()
for i in xrange(1, len(triplist)):
for j in xrange(i):
if trip_match(triplist[i], triplist[j]):
dups.add(i)
break
return [thing for i, thing in enumerate(triplist) if i not in dups]
def get_triples(fi):
all_triples = []
curr = []
with open(fi) as f:
for line in f:
if line.isspace():
all_triples.append(dedup_triples(curr))
curr = []
else:
pieces = line.strip().split('|')
curr.append(tuple(pieces))
if len(curr) > 0:
all_triples.append(dedup_triples(curr))
return all_triples
def trip_match(t1, t2):
return t1[1] == t2[1] and t1[2] == t2[2] and same_ent(t1[0], t2[0])
def calc_precrec(goldfi, predfi):
gold_triples = get_triples(goldfi)
pred_triples = get_triples(predfi)
total_tp, total_predicted, total_gold = 0, 0, 0
assert len(gold_triples) == len(pred_triples)
for i, triplist in enumerate(pred_triples):
tp = sum((1 for j in xrange(len(triplist))
if any(trip_match(triplist[j], gold_triples[i][k])
for k in xrange(len(gold_triples[i])))))
total_tp += tp
total_predicted += len(triplist)
total_gold += len(gold_triples[i])
avg_prec = float(total_tp)/total_predicted
avg_rec = float(total_tp)/total_gold
print("totals:", total_tp, total_predicted, total_gold)
print("prec:", avg_prec, "rec:", avg_rec)
return avg_prec, avg_rec
def norm_dld(l1, l2):
ascii_start = 0
assert len(l1) + len(l2) <= 128
# make a string for l1
# all triples are unique...
s1 = ''.join((chr(ascii_start+i) for i in xrange(len(l1))))
s2 = ''
for j in xrange(len(l2)):
next_char = chr(ascii_start+len(s1)+j)
for k in xrange(len(l1)):
if trip_match(l2[j], l1[k]):
next_char = s1[k]
break
s2 += next_char
# return 1- , since this thing gives 0 to perfect matches etc
return 1.0-normalized_damerau_levenshtein_distance(s1, s2)
def calc_dld(goldfi, predfi):
gold_triples = get_triples(goldfi)
pred_triples = get_triples(predfi)
assert len(gold_triples) == len(pred_triples)
total_score = 0
for i, triplist in enumerate(pred_triples):
total_score += norm_dld(triplist, gold_triples[i])
avg_score = float(total_score)/len(pred_triples)
print("avg score:", avg_score)
return avg_score
calc_precrec(sys.argv[1], sys.argv[2])
calc_dld(sys.argv[1], sys.argv[2])
# usage python non_rg_metrics.py gold_tuple_fi pred_tuple_fi
|
[
"ubuntu@ip-172-31-10-194.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-10-194.us-west-2.compute.internal
|
5a0258d2f04e9677afeaca8f0973b94b89eac1e4
|
1063018a00e9aef63402a4e76102c6ab0dce9302
|
/my3_1.py
|
88bd705027147a51c80df1166aef959b71aa5988
|
[] |
no_license
|
seoeugenee/algorithmStudy
|
8b48864633b322786c8346730e29b8bc00127457
|
00e2d8c84097155eb4bd4c236e3afa3e3ac53977
|
refs/heads/main
| 2023-07-17T23:55:37.826323
| 2021-08-31T16:31:48
| 2021-08-31T16:31:48
| 400,388,241
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
n = 1260
count = 0
money = [500, 100, 50, 10]
for m in money:
count += n // m
n %= m
print(count)
|
[
"noreply@github.com"
] |
seoeugenee.noreply@github.com
|
56d35a738ab699c2cc0e2d1cfdf90e576b4a8f65
|
7507795830d4113bfd63c73fb764bb967edec266
|
/desafio_032.py
|
32431ab67f9b2487c5e3c036d834d184290b1113
|
[] |
no_license
|
OtherU/Python_Cursos_online
|
e631ed99ddb04d0e69465d1c7be3147fa8678209
|
796c044a33a227cdc368c27f8fc984413f075a18
|
refs/heads/master
| 2020-05-18T12:52:32.543779
| 2019-05-01T13:39:48
| 2019-05-01T13:39:48
| 184,421,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
# ------ Modules ------ #
from datetime import date
# ------ Header & Footers ------ #
header = str(' Desafio 032 ')
subfooter = ('-'*68)
footer = ('='*68)
# ------ Header ------ #
print('{:=^68}'.format(header))
# ------ Body ------ #
ano = int(input('Digite 0 para analisar o ano atual ou digite um ano: '))
print()
if ano == 0:
ano = date.today().year
if ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0:
print('O ano {} e um ano bixesto!'.format(ano))
else:
print('O ano {} NAO e um ano bixesto!'.format(ano))
# ------ Footers ------ #
print(subfooter)
print(footer)
|
[
"annuit-coeptis@hotmail.co.jp"
] |
annuit-coeptis@hotmail.co.jp
|
204abf2e1db41b95502b3d8ec44918d588887b80
|
6721542d3cc44c50a5d40e3f3a6ac49b266069d6
|
/Python_algorithms_and_functions/sync_v1.py
|
5ad281b0d568940d4c02c3988a396f0613d1fb4a
|
[] |
no_license
|
LuizaM21/Learn_python
|
c943245b5c2101bf93bb4704dc56c72213b99098
|
e9c6f1213de287b671fd6eff20453843cfcd2786
|
refs/heads/master
| 2021-07-11T19:21:38.613300
| 2020-07-10T19:54:31
| 2020-07-10T19:54:31
| 151,253,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,094
|
py
|
"""Modul de sincronizare a doua fisere."""
from __future__ import print_function
import sys
import os
import time
import shutil
if len(sys.argv) < 3:
print("Nu am primit 2 parametri")
sys.exit(1)
parametru_1 = sys.argv[1]
parametru_2 = sys.argv[2]
# verifcia daca primul parametru este director
if not os.path.isdir(parametru_1):
print("Primul parametru nu este director ")
sys.exit(1)
if not os.path.isdir(parametru_2):
print("Al doilea parametru nu este director ")
sys.exit(1)
if parametru_1 == parametru_2:
print("Nu pot sincroniza acelasi director cu el insusi")
sys.exit(1)
print("Incepem sincronizarea ...")
def get_file(director):
fisiere_director = []
for item_name in os.listdir(director):
full_path = os.path.join(os.path.abspath(director), item_name)
if os.path.isfile(full_path):
fisiere_director.append(item_name)
return fisiere_director
def get_dir(director):
dirs = []
for item_name in os.listdir(director):
full_path = os.path.join(os.path.abspath(director), item_name)
if os.path.isdir(full_path):
dirs.append(item_name)
return dirs
def sincronizeaza_fisiere(sursa, destinatie, prefix=""):
"""Sincronizam directorul sursa cu directorul destinatie."""
print(prefix, "++ ", sursa, " ... ", destinatie)
fisiere_sursa = get_file(sursa)
fisiere_destinatie = get_file(destinatie)
dirs_sursa = get_dir(sursa)
dirs_destinatie = get_dir(destinatie)
print(prefix, "Sursa : ")
for item in fisiere_sursa:
print(prefix, " - {}".format(item))
print(prefix, "Destinatie: ")
for item in fisiere_destinatie:
print(prefix, " - {}".format(item))
print("\n")
# sincronizare I - daca un fisier exista in sursa
# - dar nu exista in destinatie
# ->
# * copie fisierul din sursa in destinatie
for item in fisiere_sursa:
item_to_copy = os.path.join(os.path.join(sursa, item))
dest_item = os.path.join(os.path.join(destinatie, item))
if item not in fisiere_destinatie:
print(prefix, item_to_copy, " - copy - > ", dest_item)
shutil.copy(item_to_copy, dest_item)
else:
# verific daca continutul difera
continut_sursa = open(item_to_copy, "r").read()
continut_destinatie = open(dest_item, "r").read()
if continut_sursa != continut_destinatie:
print(prefix, item_to_copy, " - modify - > ", dest_item)
shutil.copy(item_to_copy, dest_item)
# sincronizare II - daca un fisier exista in destinatie
# - dar nu exista in sursa
# ->
# * sterge fisierul din destinatie
for item in fisiere_destinatie:
if item not in fisiere_sursa:
item_to_remove = os.path.abspath(os.path.join(destinatie, item))
print(prefix, item_to_remove, " - remove - > ")
os.remove(item_to_remove)
# ----- dirs logic
print(prefix, "Dirs Sursa : ")
for item in dirs_sursa:
print(prefix, " - {}".format(item))
print(prefix, "Dirs Destinatie: ")
for item in dirs_destinatie:
print(prefix, " - {}".format(item))
print("\n")
for dir_name in dirs_destinatie:
if dir_name not in dirs_sursa:
item_to_remove = os.path.join(
os.path.abspath(destinatie),
dir_name)
print(prefix, item_to_remove, " - remove dir - >")
shutil.rmtree(item_to_remove)
for item in dirs_sursa:
dir_sursa = os.path.join(os.path.join(sursa, item))
dir_to_create = os.path.join(os.path.join(destinatie, item))
if not os.path.exists(dir_to_create):
os.mkdir(dir_to_create)
sincronizeaza_fisiere(dir_sursa, dir_to_create, prefix=prefix + " ")
while True:
time.sleep(1)
os.system("clear")
print("Start sincronizare ...")
sincronizeaza_fisiere(parametru_1, parametru_2)
|
[
"luiza.mihaiuc@gmail.com"
] |
luiza.mihaiuc@gmail.com
|
35114d1d0d2e97b10c9a8423a1abe59454670407
|
6e331f2713952958669aaeaf95bf653582390b54
|
/.idea/3.py
|
1f4bfb51c3bf243f125498dacd6b23fb69e836e5
|
[] |
no_license
|
XQ96/huawei_online_programming
|
be1576abde9624588a64f382cc8f1fde12d842d5
|
3e0070fc8bfeaa66fcefb5dcb6dab83b0cf6abc2
|
refs/heads/master
| 2020-04-27T20:38:57.382389
| 2019-03-20T11:24:18
| 2019-03-20T11:24:18
| 174,665,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,179
|
py
|
# -*- coding:utf-8 -*-
# @Author:xuqi
# @time:2019/3/7 10:39
# @File:3.py
'''
题目描述
明明想在学校中请一些同学一起做一项问卷调查,为了实验的客观性,他先用计算机生成了N个1到1000之间的随机整数(N≤1000),对于其中重复的数字,只保留一个,把其余相同的数去掉,不同的数对应着不同的学生的学号。然后再把这些数从小到大排序,按照排好的顺序去找同学做调查。请你协助明明完成“去重”与“排序”的工作(同一个测试用例里可能会有多组数据,希望大家能正确处理)。
Input Param
n 输入随机数的个数
inputArray n个随机整数组成的数组
Return Value
OutputArray 输出处理后的随机整数
'''
import sys
while True:
try:
n=int(sys.stdin.readline().strip('\n'))
L=[]
for i in range(n):
num=int(sys.stdin.readline().strip('\n'))
L.append(num)
a=list(set(L))
b=sorted(a)
for i in b:#注意py遍历数组list特别方便直接遍历,range针对一个的、范围
print(i)
except:
break
|
[
"mf1832199@smail.nju.edu.cn"
] |
mf1832199@smail.nju.edu.cn
|
d981be4bb28ee793707e5fb9eda4573c006d75af
|
8d0b4e03c605f517bd92615975806588d4770034
|
/tracking/twodim/matutil.py
|
0dbfb3e2063ace1e3d159b7e94f1644f14e8252d
|
[] |
no_license
|
ezhou7/CS563
|
0c2b4c4db94de44a50e2444744c175f39019a0ea
|
223cfb5be34d8a8dbf652bb9c9462c34079760a8
|
refs/heads/master
| 2021-08-23T14:06:04.140224
| 2017-12-05T05:35:56
| 2017-12-05T05:35:56
| 107,323,625
| 0
| 0
| null | 2017-10-18T22:37:22
| 2017-10-17T21:03:03
|
Python
|
UTF-8
|
Python
| false
| false
| 541
|
py
|
import numpy as np
from tracking.twodim import bcell
def many_to_many_dists(group1: np.array, group2: np.array) -> np.array:
p1 = group1[:, bcell.BEG_POS_INDEX:bcell.END_POS_INDEX]
p2 = group2[:, bcell.BEG_POS_INDEX:bcell.END_POS_INDEX]
p1 = p1.reshape((p1.shape[0], p1.shape[1], 1))
p2 = p2.reshape((p2.shape[0], p2.shape[1], 1))
p1_repmat = np.tile(p1, (1, 1, p2.shape[0]))
p2_repmat = np.tile(p2, (1, 1, p1.shape[0])).swapaxes(0, 2)
return np.linalg.norm(p1_repmat - p2_repmat, axis=1).astype("float32")
|
[
"noreply@github.com"
] |
ezhou7.noreply@github.com
|
8be61fd1d4a1401aa590c93db9ba7735e637b88c
|
b09359f45057a91a4f532c5cb5b1cc44bc86a8e1
|
/app.py
|
2746a449b48ced0a42534f949fddc1c895208806
|
[] |
no_license
|
AnitaVaish/cosmosTracker
|
374f081e116aa9e2896b16d7fde23f76c24bcf89
|
109acd614991de801fdf6c21a76d8a391f4f729e
|
refs/heads/main
| 2023-04-22T00:46:19.601138
| 2021-05-15T10:13:34
| 2021-05-15T10:13:34
| 367,356,946
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,805
|
py
|
from config import application, scheduler
from schedule_message import schedule_message
from schedule_report import schedule_report
from schedule_users_reset import schedule_users_reset
import tracker_application
from utils.constant_variables import INITIAL_MESSAGE_HOUR, INITIAL_MESSAGE_MINUTES, REPORT_HOUR, REPORT_MINUTES, \
DAY_OF_WEEKS, USER_RESET_HOUR, USER_RESET_MINUTES
if __name__ == "__main__":
"""
Scheduler to reset all users states
"""
scheduler.add_job(id='schedule_users_reset',
func=schedule_users_reset,
trigger='cron',
day_of_week=DAY_OF_WEEKS,
hour=int(USER_RESET_HOUR),
minute=int(USER_RESET_MINUTES))
"""
Scheduler for the initial (default) message
Method arguments:
id = unique id (string) the scheduler
func = which method should be executed
trigger = how should the scheduler be repeated - "cron" (24 hours)
day_of_week = on which days should the scheduler be executed
hour, minute = exact time of the scheduler
"""
scheduler.add_job(id='schedule_message',
func=schedule_message,
trigger='cron',
day_of_week=DAY_OF_WEEKS,
hour=int(INITIAL_MESSAGE_HOUR),
minute=int(INITIAL_MESSAGE_MINUTES))
"""
Scheduler for the final report message
"""
scheduler.add_job(id='schedule_report',
func=schedule_report,
trigger='cron',
day_of_week=DAY_OF_WEEKS,
hour=int(REPORT_HOUR),
minute=int(REPORT_MINUTES))
application.run(host='0.0.0.0',
port=6000,
debug=False)
|
[
"anita.vaish@cosmosthrace.com"
] |
anita.vaish@cosmosthrace.com
|
d3593d8c300ad70e69bb82d3d61b0b2704d1da09
|
6b687ec14f44d5724f5f58696291dcf8f98d8c55
|
/lesson2/exmpl_for_3.py
|
8ba2987194fa5c769a28a4f857e1ebbde4cd8e9b
|
[] |
no_license
|
SvyatZanozdra/LP_projects
|
b215305199b454d74aa070f0586744b4c124a02c
|
2e648c700d275e77bc7652ed5690c7699d9db837
|
refs/heads/master
| 2020-09-21T10:20:24.526915
| 2019-12-08T09:56:16
| 2019-12-08T09:56:16
| 224,763,333
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
from exmpl_if_3 import discounted
stock = [
{'name': 'iPhone Xs Plus', 'stock': 24, 'price': 65432.1, 'discount': 25},
{'name': 'Samsung Galaxy S10', 'stock': 8, 'price': 50000.0, 'discount': 10},
{'name': '', 'stock': 18, 'price': 10000.0, 'discount': 10}
]
for phone in stock:
phone['final_price'] = discounted(phone['price'], phone['discount'], name=phone['name'])
print(stock)
|
[
"ZanozdraSV@yandex.ru"
] |
ZanozdraSV@yandex.ru
|
1eafb2a7ad82c3c27f689f825cced52edcbf1a0c
|
46b432cd3557038c454601367b878f889c9b6a8f
|
/naomi/tutorial13/test_hmm_beam.py
|
627af3b1bcdc7893c2fcc0165fd28222731d08bd
|
[] |
no_license
|
tmu-nlp/NLPtutorial2019
|
84ceec06568fd9d899a686658fb8851466133375
|
d77d199c50cd37d70e462209a7bfcd4dee9140a1
|
refs/heads/master
| 2020-05-14T13:34:05.336594
| 2019-09-25T02:25:41
| 2019-09-25T02:25:41
| 181,814,723
| 1
| 0
| null | 2019-08-01T18:53:54
| 2019-04-17T04:04:06
|
Python
|
UTF-8
|
Python
| false
| false
| 5,641
|
py
|
from collections import defaultdict
import numpy as np
# 入力
# train-input.txt
# a_X b_Y a_Z
# train-answer.txt
# T <s> X 1.000000
# E X a 0.666667
def train_hmm():
in_path = '../../test/05-train-input.txt'
in_path = '../../data/wiki-en-train.norm_pos'
out_path = 'trained_model.txt'
emission = defaultdict(lambda: 0)
transition = defaultdict(lambda: 0)
context = defaultdict(lambda: 0)
for line in open(in_path, 'r', encoding='utf-8'):
word_tag_list = line.rstrip().split()
# 文頭記号
previous = '<s>'
# 単語_タグずつ
for word_tag in word_tag_list:
# 出力(今の単語 → 今のtag)
emission[word_tag] += 1
word, tag = word_tag.split('_')
# 遷移(前のtag → 今のtag)
transition[f'{previous} {tag}'] += 1
# 前のtag
context[previous] += 1
# 次のステップのために保存
previous = tag
# 文末記号
context[previous] += 1
transition[f'{tag} </s>'] += 1
with open(out_path, 'w+', encoding='utf-8') as f:
# 遷移(前の品詞から今の品詞)確率の計算
for (key, value) in transition.items():
[previous, current] = key.split()
print('T {0} {1:.5f}'.format(key, value/context[previous]), file=f)
# 生成(その品詞からその単語)確率の計算
for (key, value) in emission.items():
[word, tag] = key.split('_')
print('E {0} {1} {2:.5f}'.format(tag, word, value/context[tag]), file=f)
def test_hmm_beam():
modelpath = 'trained_model.txt'
prob_e = defaultdict(lambda: 0)
prob_t = defaultdict(lambda: 0)
possible_tags = defaultdict(lambda: 0)
for line in open(modelpath, 'r', encoding='utf-8'):
TE, key1, key2, prob = line.split()
possible_tags[key1] += 1
if TE == 'T':
prob_t[f'{key1} {key2}'] = float(prob)
else:
prob_e[f'{key1} {key2}'] = float(prob)
l1 = 0.9
# 未知語を含んだ語彙数
V = 1e6
tags_list = []
testpath = '../../test/05-test-input.txt'
testpath = '../../data/wiki-en-test.norm'
# 前向きステップ
for line in open(testpath, 'r', encoding='utf-8'):
best_score = defaultdict(lambda: 0)
best_edge = defaultdict(lambda: 0)
active_tags = defaultdict(lambda: 0)
best_score['0 <s>'] = 0 # <s> で開始
best_edge['0 <s>'] = None
active_tags[0] = ['<s>']
words = line.rstrip().split()
for i, word in enumerate(words):
my_best = {}
for prev in active_tags[i]:
for nxt in possible_tags:
if f'{i} {prev}' not in best_score or\
f'{prev} {nxt}' not in prob_t:
continue
# 遷移確率
Pt = prob_t[f'{prev} {nxt}']
# 未知語を含むときの生成確率
Pe = l1 * prob_e[f'{nxt} {word}'] + (1-l1)/V
score = (best_score[f'{i} {prev}']
- np.log2(Pt)
- np.log2(Pe))
if f'{i+1} {nxt}' in best_score and\
best_score[f'{i+1} {nxt}'] <= score:
continue
best_score[f'{i+1} {nxt}'] = score
best_edge[f'{i+1} {nxt}'] = f'{i} {prev}'
my_best[nxt] = score
sorted_tags = [k for k in sorted(my_best, key=my_best.get, reverse=False)]
active_tags[i+1] = sorted_tags[:3]
# 文末記号への遷移を考える
for tag in possible_tags:
if '{0} {1}'.format((i+1), tag) not in best_score \
or tag + ' </s>' not in prob_t:
continue
# 遷移確率
Pt = prob_t[tag + ' </s>']
# 未知語を含むときの生成確率
Pe = l1 * prob_e[tag + ' </s>'] + (1-l1)/V
# スコアの計算
score = (best_score['{0} {1}'.format(i+1, tag)]
- np.log2(Pt)
- np.log2(Pe))
# ベストスコアのチェック(小さいほどよい)
if f'{i+1+1} </s>' in best_score \
and best_score[f'{i+1+1} </s>'] <= score:
continue
# ベストスコアの更新
best_score[str(i+1+1)+' </s>'] = score
best_edge[str(i+1+1)+' </s>'] = '{0} {1}'.format(i+1, tag)
# 後ろ向きステップ
tags = []
next_edge = best_edge[str(i+1+1)+' </s>']
while next_edge != '0 <s>':
# このエッジの品詞を出力に追加
position, tag = next_edge.split()
tags.append(tag)
next_edge = best_edge[next_edge]
# 順番を入れ替える
tags = tags[::-1]
tags_list.append(' '.join(tags))
return tags_list
if __name__ == "__main__":
train_hmm()
tags_list = test_hmm_beam()
with open('tutorial13.txt', 'w+', encoding='utf-8') as fout:
for tags in tags_list:
print(tags, file=fout)
# Accuracy: 90.51% (4130/4563)
# Most common mistakes:
# NNS --> NN 55
# NN --> JJ 29
# NNP --> NN 25
# JJ --> DT 24
# JJ --> NN 15
# VBN --> NN 12
# JJ --> VBN 11
# NN --> IN 10
# NN --> DT 10
# VBG --> NN 9
|
[
"naomi@komachi.live"
] |
naomi@komachi.live
|
d37a223d39efa8a3b2a59efcd47746197fd813f0
|
59754dd50b71346da2b26d77eb5ad33d55ec4cbc
|
/models.py
|
5d529df7f1d044a98f66dcdf3b6b01eec45383d6
|
[] |
no_license
|
AllenCall/ICBC
|
4380d431d40b45b13ef4d5494a4302d7fa9df12f
|
d1ac3037dd352d7cd0b399f0bb868a48d470177e
|
refs/heads/master
| 2020-09-21T09:54:01.795286
| 2019-12-04T01:34:30
| 2019-12-04T01:34:30
| 224,552,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
from exts import db
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer,autoincrement=True,primary_key=True)
email = db.Column(db.String(50))
userName = db.Column(db.String(50))
passWord = db.Column(db.String(10))
balance = db.Column(db.Float,default=0)
article_tag_table = db.Table(
'article_tag',
db.Column('article_id',db.Integer,db.ForeignKey('article.id'),primary_key=True),
db.Column('tag_id',db.Integer,db.ForeignKey('tag.id'),primary_key=True)
)
class Article(db.Model):
__tablename__ = 'article'
id = db.Column(db.Integer,autoincrement=True,primary_key=True)
articleName = db.Column(db.String(50))
author_id = db.Column(db.Integer,db.ForeignKey('user.id'))
author = db.relationship('User',backref = 'articles')
tags = db.relationship('Tag',secondary = article_tag_table,backref = 'articles')
class Tag(db.Model):
__tablename__ = 'tag'
id = db.Column(db.Integer, autoincrement=True,primary_key=True)
tag = db.Column(db.String(12))
if __name__ == '__main__':
pass
|
[
"310315734@qq.com"
] |
310315734@qq.com
|
2f095febfca64956c65edf32419e1d73c16ff423
|
88928147ef247c4112caa08cc4a20d262d614066
|
/src/reportgen/reportgen.py
|
67c891f410ebd1b961b7ca2b9822700dc2b1a116
|
[] |
no_license
|
HSIYJND/TreeCrownDelineation
|
0d5671f7be846b9a7af32b84b03617f921e57335
|
0d628a80fe055556488ac7b119d79c22297782f2
|
refs/heads/master
| 2021-06-22T04:54:44.792478
| 2017-08-27T21:49:51
| 2017-08-27T21:49:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,076
|
py
|
'''
Created on May 1, 2017
@author: arvind
'''
from jinja2 import Environment, FileSystemLoader
import datetime
import numpy as np
from weasyprint import HTML
import cv2
import georasters as gr
from matplotlib import pyplot as plt
import gdal
import shapefile
from osgeo import osr
import pylab as py
from os import listdir
from os.path import isfile, join
from eval_itcd_task import DelineationMetric
from eval_align_task import AlignMetric
from eval_classify_task import ClassifyMetric
class ReportGenerator(object):
'''
classdocs
'''
def __init__(self, config):
'''
Constructor
'''
params = config.template_vars
date = datetime.datetime.now()
params['time_submitted'] = str(date)
self.template_vars = params
pwd = config.datadir
sf = shapefile.Reader(pwd + 'vector/Final Untagged Trees.shp')
shapes = sf.shapes()
sf = shapefile.Reader(pwd + 'vector/Final Tagged Trees.shp')
self.shapes = shapes + sf.shapes()
self.number_of_plots = 42
self.config = config
self.is_debug = False
self.resolution_of_graphs = 100
def generate(self):
report_template = self.config.template_dir
env = Environment(loader=FileSystemLoader(report_template))
template = env.get_template('DSEReportTemplate.html')
self.generate_task_1()
self.generate_task_2()
self.generate_task_3()
date = datetime.datetime.now()
self.template_vars['time_evaluated'] = str(date)
html_out = template.render(self.template_vars)
HTML(string=html_out, base_url=report_template).write_pdf(self.config.outdir + 'report.pdf', zoom=1.0, stylesheets=["/home/arvind/Desktop/style.css"])
def generate_task_3(self):
task3_evaluator = ClassifyMetric(self.config)
self.template_vars['t3_score'] = '%.3f' % (task3_evaluator.evaluate()* 100)
self.template_vars['t3_r1_score'] = '%.3f' % (task3_evaluator.rank_1_acc * 100)
precision_map = task3_evaluator.get_precision_map()
self.plot_and_save(precision_map, #map
'Species', #xlabel
'Precision', #ylabel
'Species Classification Precision', #title
'species_classification_precision.png'); #filename
recall_map = task3_evaluator.get_recall_map()
self.plot_and_save(recall_map, #map
'Species', #xlabel
'Recall', #ylabel
'Species Classification Recall', #title
'species_classification_recall.png'); #filename
self.draw_confusion_matrix_table(task3_evaluator.confusion_matrix, task3_evaluator.species_list)
self.template_vars['confusion_matrix_table'] = task3_evaluator.confusion_matrix
self.template_vars['species_list'] = task3_evaluator.species_list
def intersects(self, r1, r2):
return (r1[0] < r2[2] and r1[2] > r2[0] and r1[1] < r2[3] and r1[3] > r2[1] )
def generate_task_1(self):
pwd = self.config.indir
bpwd = self.config.datadir
task1_evaluator = DelineationMetric()
files = [f for f in listdir(pwd) if isfile(join(pwd, f)) and f.endswith('shp')]
self.number_of_plots = len(files)
for f in files:
sf_pred = shapefile.Reader(pwd+f)
plotno = f.split('_')[1].split('.')[0]
filepath = bpwd + 'raster/chm/OSBS_' + plotno + '_chm.tif'
chmimg = gr.from_file(filepath)
#loading image
filepath = bpwd + 'raster/camera/OSBS_' + plotno + '_camera.tif'
camera_file = gdal.Open(filepath)
b = np.flipud(camera_file.GetRasterBand(1).ReadAsArray(0, 0, camera_file.RasterXSize, camera_file.RasterYSize).astype(np.uint8))
g = np.flipud(camera_file.GetRasterBand(2).ReadAsArray(0, 0, camera_file.RasterXSize, camera_file.RasterYSize).astype(np.uint8))
r = np.flipud(camera_file.GetRasterBand(3).ReadAsArray(0, 0, camera_file.RasterXSize, camera_file.RasterYSize).astype(np.uint8))
img = cv2.merge([r,g,b])
sf = shapefile.Reader(bpwd+'/vector/Final Tagged Trees.shp')
#reading projection extent from chmimg
plot_extent = [chmimg.xmin,chmimg.ymin,chmimg.xmax,chmimg.ymax]
dataset = gdal.Open(filepath)
sr = dataset.GetProjectionRef()
osrobj = osr.SpatialReference()
osrobj.ImportFromWkt(sr)
srs = osr.SpatialReference()
srs.ImportFromWkt('GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]]')
ct = osr.CoordinateTransformation( osrobj, srs )
ct1 = osr.CoordinateTransformation( srs, osrobj )
_plot = ct.TransformPoint(plot_extent[0], plot_extent[1])
_plot = _plot[0:2] + ct.TransformPoint(plot_extent[2], plot_extent[3])
_plot = _plot[:-1]
_plot = np.array(_plot)
shp_cont = []
# checks for polygons which intersect with the current region of interest(plot)
# also converts world coordinates to pixel coordinates to have a standard form
# which enables jaccard computation. Since the transform is affine it shouldnt matter!
for shape in sf.shapes():
if self.intersects(_plot, shape.bbox):
points = []
for p in shape.points:
l = ct1.TransformPoint(p[0], p[1])[:-1] - np.array(plot_extent[:2])
l[0] = (l[0] * img.shape[0])/(plot_extent[2] - plot_extent[0])
l[1] = (l[1] * img.shape[1])/(plot_extent[3] - plot_extent[1])
points.append(np.int32(np.ceil(l)))
shp_cont.append(np.array([points]))
sf = shapefile.Reader(bpwd+'/vector/Final Untagged Trees.shp')
for shape in sf.shapes():
if self.intersects(_plot, shape.bbox):
points = []
for p in shape.points:
l = ct1.TransformPoint(p[0], p[1])[:-1] - np.array(plot_extent[:2])
l[0] = (l[0] * img.shape[0])/(plot_extent[2] - plot_extent[0])
l[1] = (l[1] * img.shape[1])/(plot_extent[3] - plot_extent[1])
points.append(np.int32(np.ceil(l)))
shp_cont.append(np.array([points]))
shp_pred = []
for shape in sf_pred.shapes():
points = []
for p in shape.points:
points.append(np.int32(p))
shp_pred.append(np.array([points]))
_, base, pred = task1_evaluator.calculateHungarianAssignment(plotno, shp_cont, shp_pred)
if self.is_debug and plotno=='006':
img1 = img.copy()
for pair in task1_evaluator.assigmentMap:
cv2.drawContours(img1, [base[pair[0]]], -1, thickness=1, color=[0,255,0])
cv2.drawContours(img1, [pred[pair[1]]], -1, thickness=1, color=[255,0,0])
plt.imshow(img1)
plt.show()
i = 0
bottom5, top5 = task1_evaluator.getTopPolygons()
for entry in top5:
plotno = entry[0][0]
contour = entry[1][1]
self.draw_contour_and_save(plotno, contour, 'top5', i)
i+=1
i = 0
for entry in bottom5:
plotno = entry[0][0]
contour = entry[1][1]
self.draw_contour_and_save(plotno, contour, 'bottom5', i)
i+=1
ind = np.arange(self.number_of_plots)
width = 0.2
p1 = plt.bar(ind, task1_evaluator.plotLevelTruePositives, width, color='g')
p2 = plt.bar(ind, task1_evaluator.plotLevelFalsePositives, width, color='r', bottom=task1_evaluator.plotLevelTruePositives)
p3 = plt.bar(ind, task1_evaluator.plotLevelFalseNegatives, width, color='b', bottom=task1_evaluator.plotLevelFalsePositives)
plt.ylabel('Scores')
plt.title('Plot Level Confusion Matrix')
plt.ylim(1,np.max([np.max(task1_evaluator.plotLevelTruePositives), np.max(task1_evaluator.plotLevelFalsePositives), np.max(task1_evaluator.plotLevelFalseNegatives)]))
plt.legend((p1[0], p2[0], p3[0]), ('TruePositive', 'FalsePositive','FalseNegative'))
py.savefig(self.config.outdir + 'confusionMatrix.png', bbox_inches='tight', dpi=self.resolution_of_graphs)
self.template_vars['t1_score'] = '%.3f' % (task1_evaluator.getFinalJaccardScore())
plt.clf()
task1_evaluator.getHistogramForRecall()
py.savefig(self.config.outdir + 'histogramMatrix.png', bbox_inches='tight', dpi=self.resolution_of_graphs)
tp, fp, fn = task1_evaluator.getConfusionMatrix()
self.template_vars['true_positive'] = tp
self.template_vars['false_positive'] = fp
self.template_vars['true_negative'] = '-'
self.template_vars['false_negative'] = fn
def generate_task_2(self):
task2_evaluator = AlignMetric(self.config)
self.template_vars['t2_score'] = '%.3f' % (task2_evaluator.evaluate()* 100)
count_correct_pred = task2_evaluator.plotwise_accuracy
self.plot_and_save(count_correct_pred, #map
'Plot No.', #xlabel
'Count of Correct Alignment', #ylabel
'Crown Alignment Accuracy', #title
'crown_alignment.jpg'); #filename
def plot_and_save(self, val_map, xlab, ylab, title, filename):
plt.clf()
plt.bar(range(len(val_map)), val_map.values(), align='center')
_, labels = plt.xticks(range(len(val_map)), val_map.keys())
plt.setp(labels, rotation=90)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.title(title)
py.savefig(self.config.outdir + filename, bbox_inches='tight', dpi=self.resolution_of_graphs)
def draw_contour_and_save(self, plotno, contour, filepfx, count):
filepath = self.config.datadir + 'raster/camera/OSBS_' + plotno + '_camera.tif'
camera_file = gdal.Open(filepath)
col = [0,0,255]
if filepfx.find('top')!=-1:
col = [0, 255, 0]
b = np.flipud(camera_file.GetRasterBand(1).ReadAsArray(0, 0, camera_file.RasterXSize, camera_file.RasterYSize).astype(np.uint8))
g = np.flipud(camera_file.GetRasterBand(2).ReadAsArray(0, 0, camera_file.RasterXSize, camera_file.RasterYSize).astype(np.uint8))
r = np.flipud(camera_file.GetRasterBand(3).ReadAsArray(0, 0, camera_file.RasterXSize, camera_file.RasterYSize).astype(np.uint8))
img = cv2.merge([r,g,b])
cv2.drawContours(img, [contour], -1, color=col, thickness=2)
cv2.imwrite(self.config.outdir + filepfx + '_'+str(count)+'.jpg', img)
def draw_confusion_matrix_table(self, confusion_matrix, species_list):
plt.clf()
_, axs =plt.subplots(1,1)
col_width=.070
axs.axis('tight')
axs.axis('off')
tab = axs.table(cellText=np.int32(confusion_matrix),loc='center')
tab.auto_set_font_size(False)
tab.set_fontsize(10)
tab.scale(1.2, 1.5)
hoffset=-0.07
voffset=1.12
count=0
for s in species_list:
axs.annotate(' '+s , xy=(hoffset+count*col_width,voffset),
xycoords='axes fraction', ha='left', va='bottom',
rotation=90, size=10)
count+=1
py.savefig(self.config.outdir + 'confusion_matrix_table.jpg', bbox_inches='tight', dpi=self.resolution_of_graphs)
|
[
"Nishant Agarwal"
] |
Nishant Agarwal
|
21f0e590ada159ab80fccd55fd6750e45b800aad
|
d75c1f9645c8c80ca33c0c461e3c39ea3ec30b9b
|
/app/recipe/tests/test_tags_api.py
|
d48d403914c6f5977bd13278c1c83f52672fca68
|
[
"MIT"
] |
permissive
|
ahrav/recipe-app-api
|
f47af2c99ced8cf6e9e193744b56557a81922379
|
31c0ff9bda5b12113c78eb314549c2cb0243db25
|
refs/heads/master
| 2020-05-27T05:50:21.674371
| 2019-06-22T13:12:50
| 2019-06-22T13:12:50
| 188,508,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,018
|
py
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""Test the public available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
class PrivateTagsApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'test@tes.com',
'password'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name='Carnivore')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for authenticated users"""
user2 = get_user_model().objects.create_user(
'other@go.com',
'password22'
)
Tag.objects.create(user=user2, name='Savory')
tag = Tag.objects.create(user=self.user, name='Fried Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag_success(self):
"""Test creating a new tag"""
payload = {"name": "Mexican Food"}
res = self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
def test_create_tag_invalid(self):
"""Test creating tag with invalid payload"""
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
"""Test filtering tags by those assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Dinner')
recipe = Recipe.objects.create(
title='Breakfast jam',
time_minutes=4,
price=5,
user=self.user
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_tags_assigned_unique(self):
"""Test filtering tags by assigned return unique items"""
tag = Tag.objects.create(user=self.user, name='Breakfast')
Tag.objects.create(user=self.user, name='Dinner')
recipe1 = Recipe.objects.create(
title='Waffles',
time_minutes=2,
price=.50,
user=self.user
)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.create(
title='Oatmeal',
time_minutes=2,
price=.75,
user=self.user
)
recipe2.tags.add(tag)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
|
[
"ahravdutta02@gmail.com"
] |
ahravdutta02@gmail.com
|
efae90dda347c3f09c8f1505cdd2780152074765
|
7a5ddd25569adc8d48fb5abdb5786dafd760e0e2
|
/env/bin/pyrcc5
|
8bb2f6faec39aa12284c097f045778c77c4134d6
|
[] |
no_license
|
roblivesinottawa/MoviesDatabasePyQt
|
07c064be7a0cb187a4e287a61b9e426b81856edb
|
cf10721cee7075b12afe2b77520830382f389dba
|
refs/heads/main
| 2023-05-01T18:39:54.426668
| 2021-05-12T18:31:50
| 2021-05-12T18:31:50
| 366,515,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
#!/Users/macbookpro/Desktop/programming/may(2021)/MovieDatabasePyQt/movies_database_gui/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from PyQt5.pyrcc_main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"tech.rob@icloud.com"
] |
tech.rob@icloud.com
|
|
81fee8688ffc59c38c493d59ce1bb7103d87c201
|
fc7023a9c35ca34e682e9fb1b4d97f4a0a7064d4
|
/demo/classes.py
|
1806e9ffd858257725ec6c748f7d8a8b442c52db
|
[] |
no_license
|
fredcollman/sublime-config
|
22472551806f7f9b9e3cd009cfdced49930878c5
|
bcee840b95e05e37b445724c2e576bae6b6b16e9
|
refs/heads/master
| 2021-01-19T09:02:36.379880
| 2017-08-11T14:12:20
| 2017-08-11T14:12:20
| 87,714,232
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
class Hello:
"""Remember to include docs"""
def __init__(self, arg):
self.arg = arg
class Goodbye:
some_attr = False
def scope(self):
return "this is a method"
def nest_scope(self):
for thing in range(10):
for another in "string":
yield another, thing
print(thing)
yield thing + 1
for another in "string":
print("hello")
yield "char is: {}".format(another)
def flip(func):
def wrapped(*args, **kwargs):
return func(reversed(args), **kwargs)
return wrapped
|
[
"fredcollman@gmail.com"
] |
fredcollman@gmail.com
|
fff80535ba22ef4b7f28c7d8d2d614570d6ad082
|
52c35b0715b216a3bf901d4a468fa74b104953b6
|
/3-绘图函数/ellipse.py
|
a63676e249413ca982a16ca5706a334edcdb2a95
|
[] |
no_license
|
McFlyWYF/opencv-for-python
|
3d05886b4eb02352f9d1c3dd6cf58d18570c9e6c
|
478c103bb422e25547fe0683dfb4b4043cdbbc20
|
refs/heads/master
| 2020-03-27T15:01:21.736794
| 2018-09-29T13:55:18
| 2018-09-29T13:55:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
import cv2
import numpy as np
'''
矩形
'''
img = np.zeros((512,512,3),np.uint8)
#中心点坐标,长轴和短轴长度,沿逆时针方向旋转的角度
cv2.ellipse(img, center=(256, 256), axes=(100, 50), angle=0, startAngle=0, endAngle=180, color=255,
thickness=-1)
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"1650043869@qq.com"
] |
1650043869@qq.com
|
94dedfc1b2dffc2c11e82f14a5410aa0e7d7c4f1
|
2b033667a8b0b97d7080f55575169179e48c8cd8
|
/D01.py
|
c33083251fb32bce8f1d026235ad79e758e3582e
|
[] |
no_license
|
aa033793336/python
|
277c4413b7d0f396a87495464bd6fb1c9f6b50f6
|
30776659c12354de75c02daa6a1db4ebde98eea9
|
refs/heads/main
| 2023-03-07T17:51:23.188021
| 2021-02-19T01:53:04
| 2021-02-19T01:53:04
| 330,838,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 109
|
py
|
import numpy as np
#D01#1
a = np.arange(0,21,1)
print(a)
#D01#2
b=a[::2]
print(b)
#D01#3
c=a[::3]
print(c)
|
[
"75462113+aa033793336@users.noreply.github.com"
] |
75462113+aa033793336@users.noreply.github.com
|
dd04c8572915ba1c43e29a3fe90bfbfb03645720
|
144565da9ebb7dc07e781fc09dbb7d83fc434eb4
|
/lintcode algo ladder/608. Two Sum II - Input array is sorted.py
|
e5fe6c00e94890a42634716e88b0943978dbbf39
|
[] |
no_license
|
liulehui/LintcodeSolution
|
853ed81667dfa5aabbb20fd6e677285f90716ef6
|
b9bf9b4192bd2130824193a0088c4f2ab396310f
|
refs/heads/master
| 2020-04-14T00:06:40.704674
| 2019-08-23T06:03:39
| 2019-08-23T06:03:39
| 163,524,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
# coding:utf-8
def twoSum(self, nums, target):
# write your code here
left,right = 0,len(nums) - 1
while left<right:
if nums[left] + nums[right] < target:
left += 1
if nums[left] + nums[right] > target:
right -= 1
if nums[left] + nums[right] == target:
return left+1,right+1
return -1
|
[
"imliulehui@gmail.com"
] |
imliulehui@gmail.com
|
f057ed39140d159384543a563041daf49702ac65
|
eeacfabfb918c9b0f922a4f6a96e50e63f029fad
|
/search_engine.py
|
419fe701ec44fda3b35011c0be75a83a1e1fdee9
|
[] |
no_license
|
lch743/Python
|
f36af505f24cd88ab9900354d14f6a62f71f108c
|
c5bf64def9703842eefab2423347d16a9ae4478d
|
refs/heads/master
| 2021-01-20T15:44:20.024352
| 2012-12-11T08:07:16
| 2012-12-11T08:07:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,291
|
py
|
#Feeling Lucky
#In Unit 6, we implemented a page ranking algorithm, but didn't finish the final
#step of using it to improve our search results. For this question, you will use
#the page rankings to produce the best output for a given query.
#Define a procedure, lucky_search, that takes as input an index, a ranks
#dictionary (the result of compute_ranks), and a keyword, and returns the one
#URL most likely to be the best site for that keyword. If the keyword does not
#appear in the index, lucky_search should return None.
def lucky_search(index, ranks, keyword):
tmp=0
result=""
if keyword in index:
for e in index[keyword]:
if ranks[e]>tmp:
tmp=ranks[e]
result=e
return result
return None
cache = {
'http://udacity.com/cs101x/urank/index.html': """<html>
<body>
<h1>Dave's Cooking Algorithms</h1>
<p>
Here are my favorite recipies:
<ul>
<li> <a href="http://udacity.com/cs101x/urank/hummus.html">Hummus Recipe</a>
<li> <a href="http://udacity.com/cs101x/urank/arsenic.html">World's Best Hummus</a>
<li> <a href="http://udacity.com/cs101x/urank/kathleen.html">Kathleen's Hummus Recipe</a>
</ul>
For more expert opinions, check out the
<a href="http://udacity.com/cs101x/urank/nickel.html">Nickel Chef</a>
and <a href="http://udacity.com/cs101x/urank/zinc.html">Zinc Chef</a>.
</body>
</html>
""",
'http://udacity.com/cs101x/urank/zinc.html': """<html>
<body>
<h1>The Zinc Chef</h1>
<p>
I learned everything I know from
<a href="http://udacity.com/cs101x/urank/nickel.html">the Nickel Chef</a>.
</p>
<p>
For great hummus, try
<a href="http://udacity.com/cs101x/urank/arsenic.html">this recipe</a>.
</body>
</html>
""",
'http://udacity.com/cs101x/urank/nickel.html': """<html>
<body>
<h1>The Nickel Chef</h1>
<p>
This is the
<a href="http://udacity.com/cs101x/urank/kathleen.html">
best Hummus recipe!
</a>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/kathleen.html': """<html>
<body>
<h1>
Kathleen's Hummus Recipe
</h1>
<p>
<ol>
<li> Open a can of garbonzo beans.
<li> Crush them in a blender.
<li> Add 3 tablesppons of tahini sauce.
<li> Squeeze in one lemon.
<li> Add salt, pepper, and buttercream frosting to taste.
</ol>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/arsenic.html': """<html>
<body>
<h1>
The Arsenic Chef's World Famous Hummus Recipe
</h1>
<p>
<ol>
<li> Kidnap the <a href="http://udacity.com/cs101x/urank/nickel.html">Nickel Chef</a>.
<li> Force her to make hummus for you.
</ol>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/hummus.html': """<html>
<body>
<h1>
Hummus Recipe
</h1>
<p>
<ol>
<li> Go to the store and buy a container of hummus.
<li> Open it.
</ol>
</body>
</html>
""",
}
def get_page(url):
if url in cache:
return cache[url]
return ""
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def get_all_links(page):
links = []
while True:
url, endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def union(a, b):
for e in b:
if e not in a:
a.append(e)
def add_page_to_index(index, url, content):
words = content.split()
for word in words:
add_to_index(index, word, url)
def add_to_index(index, keyword, url):
if keyword in index:
index[keyword].append(url)
else:
index[keyword] = [url]
def lookup(index, keyword):
if keyword in index:
return index[keyword]
else:
return None
def crawl_web(seed): # returns index, graph of inlinks
tocrawl = [seed]
crawled = []
graph = {} # <url>, [list of pages it links to]
index = {}
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index, page, content)
outlinks = get_all_links(content)
graph[page] = outlinks
union(tocrawl, outlinks)
crawled.append(page)
return index, graph
def compute_ranks(graph):
d = 0.8 # damping factor
numloops = 10
ranks = {}
npages = len(graph)
for page in graph:
ranks[page] = 1.0 / npages
for i in range(0, numloops):
newranks = {}
for page in graph:
newrank = (1 - d) / npages
for node in graph:
if page in graph[node]:
newrank = newrank + d * (ranks[node] / len(graph[node]))
newranks[page] = newrank
ranks = newranks
return ranks
#Here's an example of how your procedure should work on the test site:
index, graph = crawl_web('http://udacity.com/cs101x/urank/index.html')
ranks = compute_ranks(graph)
print index['Hummus']
print ranks
print lucky_search(index, ranks, 'Hummus')
#>>> http://udacity.com/cs101x/urank/kathleen.html
print lucky_search(index, ranks, 'the')
#>>> http://udacity.com/cs101x/urank/nickel.html
print lucky_search(index, ranks, 'babaganoush')
#>>> None
|
[
"lch743@gmail.com"
] |
lch743@gmail.com
|
749cd9a3101a7cb68ab10d36816e911dddd50b77
|
3118d4fc5078e96fd71b408f03aa5ed9aecdc2df
|
/common/common_fun.py
|
2cb04b34046c85f4bd3d5f575cfef83c919d80cd
|
[] |
no_license
|
chengming0719/zhanyebao_app002
|
28bcf651f43a2838d6f3c82f89ca76bb8047edb7
|
37883e2643c69b9b06192ad31682fdda93fee665
|
refs/heads/master
| 2023-01-07T11:41:23.489839
| 2020-11-09T07:37:49
| 2020-11-09T07:37:49
| 311,258,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,606
|
py
|
from baseView.baseView import BaseView
from common.desired_caps import appium_desired
from selenium.common.exceptions import NoSuchElementException
import logging, time, csv
from selenium.webdriver.common.by import By
from os import path
class Common(BaseView):
cancelBtn=(By.ID,'android:id/button2')
skipBtn=(By.ID,'com.tal.kaoyan:id/tv_skip')
wemedia_cacel=(By.ID,'com.tal.kaoyan:id/view_wemedia_cacel')
def check_cancelBtn(self):
logging.info('===========check cancelBtn==========')
try:
cancelBtn=self.driver.find_element(*self.cancelBtn)
except NoSuchElementException:
logging.info('no checkBtn')
else:
cancelBtn.click()
def check_skipBtn(self):
logging.info('============check skipBtn==========')
try:
skipBtn=self.driver.find_element(*self.skipBtn)
except NoSuchElementException:
logging.info('no skipBtn')
else:
skipBtn.click()
def get_size(self):
x = self.driver.get_window_size()['width']
y = self.driver.get_window_size()['height']
return x, y
def swipeLeft(self):
l = self.get_size()
x1 = int(l[0]*0.5)
y1 = int(l[1]*0.5)
x2 = int(l[0]*0.1)
self.swipe(x1, y1, x2, y1, 1000)
def getTime(self):
self.now = time.strftime("%Y-%m-%d %H_%M_%S")
return self.now
def getScreenShot(self, module):
time = self.getTime()
image_file = path.dirname(path.dirname(__file__)) + '/screenshots/%s_%s.png' %(module,time)
logging.info('get %s screenshot' %module)
self.driver.get_screenshot_as_file(image_file)
# 判断是否有广告弹框
def check_market_ad(self):
logging.info('=====check_market_ad=====')
try:
element=self.driver.find_element(*self.wemedia_cacel)
except NoSuchElementException:
pass
else:
logging.info('=====click_wemedia_cancel=====')
element.click()
def get_csv_data(self, csv_file, line):
with open(csv_file, 'r', encoding='utf-8-sig') as file:
reader = csv.reader(file)
for index, row in enumerate(reader, 1):
if index == line:
return row
if __name__ == '__main__':
driver = appium_desired()
com = Common(driver)
# com.check_cancelBtn()
# # com.check_skipBtn()
# com.swipeLeft()
# com.getScreenShot('start_APP')
csv_file = '../data/account.csv'
data = com.get_csv_data(csv_file, 1)
print(data)
|
[
"18296158516@163.com"
] |
18296158516@163.com
|
e03af5270cca1a0058b33f9fa5933d1aee3d2b07
|
77e78a55375bc515ed6b2c247d8cf4bec05d3bc8
|
/sloane_amazing_graphs/fly_straight_dammit.py
|
4f5ac9d6e2cc4e84999405af1872cbbff1fd685f
|
[] |
no_license
|
GuidoDipietro/python_art
|
63079df616726fee21ee13cbe501c4b01aa0bd5b
|
5d9be0f1c697922c111eb3417157fb65516bb68c
|
refs/heads/master
| 2022-12-09T03:32:05.037248
| 2020-09-07T14:27:34
| 2020-09-07T14:27:34
| 289,156,751
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
# Fly straight, dammit!
# https://oeis.org/A133058
# 31/aug/2020 | Guido Dipietro
import matplotlib.pyplot as plt
# CONSTS #
COLOR = "#b20000"
POINTS = 1200
# SEQUENCE DEFINITION #
def divs(n):
return set([x for x in range(2,n+1) if n%x==0])
def genner(ind, n):
div = divs(n) & divs(ind)
if ind<2: num = 1
else:
num = ind+n+1 if div==set() else n//max(div)
return ind+1, num
# SEQUENCE GENERATION #
y = []
ind, n = 0, 1
for _ in range(POINTS):
ind, n = genner(ind, n)
y.append(n)
# PLOT #
plt.scatter(range(POINTS), y, s=1, color=COLOR)
plt.show()
|
[
"dipietroguido@gmail.com"
] |
dipietroguido@gmail.com
|
6576f6ade43f4f4218e63728c5722a9762a7b0f2
|
86cb696cc44c45ecd9f707d620e9b92f9eebb91f
|
/main/examples/grid.py
|
f5f63e5b4e6eb30653cb0201c51e3448bc5b90ed
|
[] |
no_license
|
etschgi1/GOL
|
1032c9b9e9f28c3ff6f125f770581a4a77c88670
|
bdbe5fc31066195cab356f88e1f158193eaa6c62
|
refs/heads/master
| 2023-02-09T01:53:30.586337
| 2021-01-05T21:02:07
| 2021-01-05T21:02:07
| 326,028,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
from tkinter import *
root = Tk() # window initialises
test_label = Label(root, text="Hello World!").grid(
row=0, column=0) # create a widget, also valid bc obj oriented
test_label2 = Label(root, text="Test Text") # create a widget
# use grids
# same as row = 1 col = 5 only relative position
test_label2.grid(row=1, column=5) # or in 2 steps
root.mainloop()
|
[
"elias.wachmann@gmail.com"
] |
elias.wachmann@gmail.com
|
e6d928ad07d679a7f2fe24fcd018ad39e11ec5c1
|
305567bc6e1f8c771fcf299a743f26e9e90ef268
|
/character/character_eval.py
|
7decbf79a3477af57dae5e6d047d26e59df1ba9c
|
[] |
no_license
|
gugug/tensorflow_demo
|
ee6a1a5d53dc84fb7f4f359dc6843aa0ee57d763
|
c027dbf2f85be6f6ada740240db9b5082268e42e
|
refs/heads/master
| 2021-04-26T22:21:59.658150
| 2018-09-05T14:37:56
| 2018-09-05T14:37:56
| 124,079,845
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,440
|
py
|
# coding=utf-8
"""
测试过程
"""
import character
__author__ = 'gu'
import time
import tensorflow as tf
import character_inference
import numpy as np
import input_data
from crawl_textmind_data import input_textmind_data
from Emotion_Lexicon import data_helper
MOVING_AVERAGE_DECAY = 0.99 # 活动平均衰减率
MODEL_SAVE_PATH = "character_model/tfidf/"
MODEL_NAME = "character_model"
print(MODEL_SAVE_PATH)
# 加载的时间间隔。
EVAL_INTERVAL_SECS = 2
# 加载d2v 和 tfidf的数据
train_list_side, train_list_tag, text_list_side, text_list_tag = input_data.load_data_label('')
# train_list_side1, train_list_tag1, text_list_side1, text_list_tag1 = input_data.load_data_label1('')
# # 加载textmind的特征
# train_list_side1, train_list_tag1, text_list_side1, text_list_tag1 = \
# input_textmind_data.load_textmind_data_label_with_normalization('../crawl_textmind_data')
# # # 加载情感的特征
# train_list_side, train_list_tag, text_list_side, text_list_tag = \
# data_helper.load_emotion_data_label('../Emotion_Lexicon')
#
# # 整合特征
# train_list_side, text_list_side = input_data. \
# load_data_label_combine(X_train=train_list_side, X_test=text_list_side, X1_train=train_list_side1,
# X1_test=text_list_side1)
def evaluate(character):
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32, [None, character_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, name='y-input')
y = character_inference.inference(x, None)
# 训练时损失函数
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=y, targets=y_)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
dict_acc = {}
dict_precision = {}
dict_recall = {}
dict_f1 = {}
dict_acc_lsit = {}
while True:
with tf.Session() as sess:
validate_feed = {x: text_list_side, y_: text_list_tag}
# tf.train.get_checkpoint_state 会根据checkpoint文件自动找到目录中最新模型的文件名
ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
# accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
# accuracy_score = get_acc(sess,true_y, pred_y)
# print("After %s training step(s), validation accuracy = %g" % (global_step, accuracy_score))
# print("the input data are \n%s" % test_list_side)
# print("the truly answer are \n%s" % test_list_tag)
eval_aws = sess.run(y, feed_dict=validate_feed)
eval_loss = sess.run(loss, feed_dict=validate_feed)
print("========the evaluate eval_loss are %s" % eval_loss)
# print("the evaluate answer are \n%s" % eval_aws)
accuracy_score, acc_list = get_acc(sess, text_list_tag, eval_aws)
print("After %s training step(s), all validation accuracy = %g" % (global_step, accuracy_score))
print("After %s training step(s), 5 validation accuracy = %s" % (global_step, acc_list))
precision_list = get_precision(text_list_tag, eval_aws)
print("After %s training step(s), 5 precision = %s" % (global_step, precision_list))
recall_list = get_recall(text_list_tag, eval_aws)
print("After %s training step(s), 5 recall = %s" % (global_step, recall_list))
f1_list = get_f1(precision_list, recall_list)
print("After %s training step(s), 5 f1 = %s" % (global_step, f1_list))
print("==========================================")
if int(global_step) > 1:
dict_acc[global_step] = accuracy_score
dict_precision[global_step] = precision_list
dict_recall[global_step] = recall_list
dict_f1[global_step] = f1_list
dict_acc_lsit[global_step] = acc_list
if int(global_step) == 29001:
# print("================全部准确率===================")
# sort_dict(dict_acc)
print("================5个准确率===================")
sort_dict(dict_acc_lsit)
print("================5个精准率===================")
sort_dict(dict_precision)
print("================5个召回率===================")
sort_dict(dict_recall)
print("================5个f1===================")
sort_dict(dict_f1)
break
else:
print('No checkpoint file found')
return
time.sleep(EVAL_INTERVAL_SECS)
def get_acc(sess, true_y, pred_y):
"""
计算总的准确率和5个标签的准确率
:param sess:
:param true_y:
:param pred_y:
:return:
"""
pred_y_ = np.where(pred_y > 0, 1, 0)
correct_prediction = tf.equal(true_y, pred_y_)
accuracy = sess.run(tf.reduce_mean(tf.cast(correct_prediction, tf.float32)))
acc_list = []
for clazz in range(5):
true_class1 = true_y[:, clazz]
pred_class1 = pred_y[:, clazz]
pred_class1_ = np.where(pred_class1 > 0, 1, 0)
acc = 0
for i in range(len(true_class1)):
if true_class1[i] == pred_class1_[i]:
acc += 1
acc_list.append(acc * 1.0 / len(true_class1))
return accuracy, acc_list
def get_precision(true_y, pred_y):
"""
返回五个标签的精确率
:param true_y:
:param pred_y:
:return:
"""
precison_list = []
for clazz in range(5):
true_class1 = true_y[:, clazz]
pred_class1 = pred_y[:, clazz]
pred_class1_ = np.where(pred_class1 > 0, 1, 0)
precison = 0
for i in range(len(true_class1)):
if true_class1[i] == 1 and pred_class1_[i] == 1:
precison += 1
precison_list.append(precison * 1.0 / np.sum(pred_class1_))
return precison_list
def get_recall(true_y, pred_y):
"""
返回5个标签的召回率
:param true_y:
:param pred_y:
:return:
"""
recall_list = []
for clazz in range(5):
true_class1 = true_y[:, clazz]
pred_class1 = pred_y[:, clazz]
pred_class1_ = np.where(pred_class1 > 0, 1, 0)
precison = 0
for i in range(len(true_class1)):
if true_class1[i] == 1 and pred_class1_[i] == 1:
precison += 1
recall_list.append(precison * 1.0 / np.sum(true_class1))
return recall_list
def get_f1(precison_list, recall_list):
"""
返回5个标签的f1值
:param precison:
:param recall:
:return:
"""
f1_list = []
for i in range(5):
precison = precison_list[i]
recall = recall_list[i]
f1_list.append((2 * precison * recall) / (precison + recall))
return f1_list
def mymean(acc_list):
acc_set = set(acc_list[1:])
mean_acc = np.average(list(acc_set))
print('After 20091 training steps mean_acc', mean_acc)
def sort_dict(dict):
sorted_dict = sorted(dict.items(), key=lambda e: e[0], reverse=False)
print(sorted_dict)
item0 = 0
item1 = 0
item2 = 0
item3 = 0
item4 = 0
for ke in sorted_dict:
k = ke[1]
# print(k)
item0 = item0 + k[0]
item1 = item1 + k[1]
item2 = item2 + k[2]
item3 = item3 + k[3]
item4 = item4 + k[4]
le = len(sorted_dict)
print([item0 / le, item1 / le, item2 / le, item3 / le, item4 / le])
def main(argv=None):
evaluate(character)
# mymean([1, 2, 1, 1, 2])
if __name__ == '__main__':
tf.app.run()
|
[
"734093894@qq.com"
] |
734093894@qq.com
|
ade516ca679c9523f360daeceed1aef37c571bbd
|
6a52627de2f88be818d3517847d84f20827bfa99
|
/clients/models.py
|
2eb0aeb32ed5f363e45e26228fbffdb74fb6b2cb
|
[] |
no_license
|
rizwanmeo/sabzi_mandi
|
46a8a3c6516a3aa4b234342dbd2328e96ce86a24
|
cd801fd9ffebceff051791ecee6486468d9cdbf7
|
refs/heads/master
| 2023-04-29T05:49:17.670068
| 2023-02-05T16:02:32
| 2023-02-05T16:02:32
| 231,114,753
| 0
| 0
| null | 2023-04-21T22:34:15
| 2019-12-31T16:04:41
|
Python
|
UTF-8
|
Python
| false
| false
| 274
|
py
|
from django.db import models
from shops.models import Shop
from sabzi_mandi.models import BasicInfo
class Client(BasicInfo):
shop = models.ForeignKey(Shop, on_delete=models.CASCADE)
class Meta:
unique_together = [('shop', 'name'), ('shop', 'identifier')]
|
[
"rizwan_meo@rocketmail.com"
] |
rizwan_meo@rocketmail.com
|
3488f61c7f8dbd0d52a96c6bd8d46df6dbcdb89e
|
b6542f17c21b76aa237fefcfa303105922602ce5
|
/Tasks/Statistic/81_linear_regression.py
|
c667538f6e0009b989be1626c6a0c45f82dc1538
|
[] |
no_license
|
AV272/Python
|
9b1963c798c491f86e0f73b2b684ec444b761d6f
|
b66e79bd9c4814cc286e48ecad70591865024baf
|
refs/heads/master
| 2023-06-08T14:47:26.292007
| 2021-07-05T11:28:39
| 2021-07-05T11:28:39
| 283,837,397
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
x = [95, 85, 80, 70, 60]
y = [85, 95, 70, 65, 70]
n = len(x)
sumx = sum(x)
sumy = sum(y)
mx = sumx/n
my = sumy/n
sumx2 = sum(map(lambda x: x**2, x))
xy = sum([x[i]*y[i] for i in range(n)])
b = (n*xy - sumx*sumy)/(n*sumx2 - sumx**2)
a = my - b*mx
print(round(a + 80*b,3))
|
[
"noreply@github.com"
] |
AV272.noreply@github.com
|
0fa68f4a8ee766434f5c0958d2a0b2b6b4571a32
|
83ef08951ce01c53a3a794bdf0f03a1a2ba195ea
|
/ml_api.py
|
5bfee834172a3567e3fbf3f9c63c53c157c7bd44
|
[] |
no_license
|
chart90/movielens-stream-search
|
8f4e81dd8b8f56a16a6913cc9e93302e3a99ecaa
|
442d665627ac7ff0ddd892709aedf85e95c0f059
|
refs/heads/master
| 2022-12-10T06:21:46.597721
| 2018-02-25T23:30:43
| 2018-02-25T23:30:43
| 122,883,585
| 0
| 0
| null | 2021-06-01T21:49:57
| 2018-02-25T22:27:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,788
|
py
|
import requests
import json
import os
from urllib.parse import urlencode
import pickle as pkl
import config
from time import time
class MovieLens:
def __init__(self):
self.headers = {
'Accept': 'application/json, text/plain, */*',
'Accept-Encoding': 'gzip, deflate',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Content-Type': 'application/json;charset=utf-8',
'DNT': '1',
'Host': 'movielens.org',
'Pragma': 'no-cache',
}
self.core_url = 'https://movielens.org'
self.auth_cookie = None
self.get_auth_cookie()
def set_auth_cookie(self, cookie):
with open('config/cookies.pkl', 'wb') as f:
pkl.dump(cookie, f)
self.auth_cookie = cookie
def get_auth_cookie(self):
if os.path.isfile('config/cookies.pkl'):
with open('config/cookies.pkl', 'rb') as f:
auth_cookie = pkl.load(f)
now = time()
cookie_data = [k for k in auth_cookie][0]
if cookie_data.expires > now:
self.auth_cookie = auth_cookie
return
username = config.USERNAME
password = config.PASSWORD
cookie = self.login(username, password)
if cookie is None:
print('Invalid login! Please check username and password.')
else:
self.set_auth_cookie(cookie)
def login(self, username, password):
auth = {
'userName': username,
'password': password
}
auth = json.dumps(auth)
path = '/api/sessions'
url = self.core_url + path
headers = self.headers
headers['Referer'] = 'https://movielens.org/login'
r = requests.post(url, data=auth, headers=headers)
print(f'Request status: {r.status_code}, {r.text}')
if r.json()['status'] == 'success':
return r.cookies
return None
def request_getter(self, path, query_str=''):
url = self.core_url + path + '?' + query_str
req = requests.get(url, cookies=self.auth_cookie)
res = req.json()
if res['status'] == 'success':
res = res['data']
return res
def get_genres(self):
return self.request_getter('/api/movies/genres')
def get_me(self):
return self.request_getter('/api/users/me')
def get_mytags(self):
return self.request_getter('/api/users/me/tags')
def explore(self, params):
return self.request_getter('/api/movies/explore', urlencode(params))
def top_picks(self):
params = {
'hasRated': 'no',
'sortBy': 'prediction'
}
return self.explore(params)
|
[
"chrishart90@gmail.com"
] |
chrishart90@gmail.com
|
503fa6b7abb0cfb33690e14f68ad1975d785840f
|
b4efe7a85bbde01cd47189bcc0298594baae7a14
|
/code/89.py
|
4d92fda40c156d24ca30d75475b4fd4045bc92f6
|
[] |
no_license
|
HarshaaArunachalam/guvi
|
c200e05dc0c259bfabfc0ee58c1ab9b6412b89a7
|
67c87e8fe50d645036333649759b5b1a40369004
|
refs/heads/master
| 2020-05-31T06:27:27.104793
| 2019-06-11T18:31:43
| 2019-06-11T18:31:43
| 190,141,812
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 80
|
py
|
name=input()
name=list(name)
let=sorted(name)
for i in let:
print(i,end="")
|
[
"noreply@github.com"
] |
HarshaaArunachalam.noreply@github.com
|
197073fdfefb60a514635179e44d1d165ed7e417
|
b5ca937ce81b8a45e35e57b4a1781816889f4705
|
/FactorInt.py
|
e64d5311c56aa5b595b0bd920e91a00f296f2a26
|
[] |
no_license
|
susuk4/Homework2_CholYoon
|
4fe5c0d2375fcf7c815a23e9304b9f1d98e5ec60
|
3c3f8f2b7c8c318c101560f213a11aadc07b9b84
|
refs/heads/master
| 2020-12-24T13:29:00.690774
| 2013-04-22T06:00:27
| 2013-04-22T06:00:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,032
|
py
|
class FactorInt:
def __Init__(self, integer):
#initialize class with initial string, and denominator and check if it is integer
if type(integer) == int:
self.n = integer
if not isAlreadyPrime():
self.denominator = [7,5,3,2]
self.string = ""
if isNegative(self.n) == True:
self.negative = True
self.n = -1 * self.n
self.lastnumber = str(self.n)
elif self.n==0:
raise ValueError, "0 cannot be factored"
else: #print if given vallue is already a prime number
raise ValueError, str(self.n)+" is already a prime number"
else: #raise ValueError if it is not an integer
raise ValueError, "Arguement is not an integer"
#check if the number is divisible by specific number
def isDivisible(self,d):
return self.n%d==0
#check if the number is divisible by any number
def isDivisible(self):
return isAlreadyPrime()
#if check if integer is already primenumber
def isAlreadyPrime(self):
for prime in [2,3,5,7,9,11]:
if isDivisible(prime):
return False
return True
#check to see if input is negative
def isNegative(self,g1):
return gi<0
#looping through 2,3,5,7 which are prime numbers between 1 - 10 except 1
#and return resulted string
def toString(self):
for de in self.denominator:
#every new loop start while loop until input number is not divisible by new loop number
#and add divisible string at the front.
while isDivisible(de):
self.lastnumber = self.lastnumber / de
self.string = str(de)+"*"+self.string(self.lastnumber)
if self.negative: #if negative add negative value
self.string = str(-1)+"*"+self.string
return str(self.n)+": "+self.string
|
[
"susuk4@uw.edu"
] |
susuk4@uw.edu
|
5234a4cc2cf0abf2e9f870ed1fb4f98ab451e479
|
5636cb0c282d03e91a830d30cec3bd54c225bd3b
|
/Divers/TP_SPE_Supplementaires/TD_euler/TD_euler_implicite_explicite.py
|
b48efbfc92b6aac69464492e094bfc63101c73d2
|
[] |
no_license
|
xpessoles/Informatique
|
24d4d05e871f0ac66b112eee6c51cfa6c78aea05
|
3cb4183647dc21e3acbcbe0231553a00e41e4e55
|
refs/heads/master
| 2023-08-30T21:10:56.788526
| 2021-01-26T20:57:51
| 2021-01-26T20:57:51
| 375,464,331
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,909
|
py
|
### TD sur les méthodes d'Euler explicite et implicite et étude de la stabilité et rapidité
#Importation des modules
import matplotlib.pyplot as plt
import math
import numpy as np
import time
### question 1
#relation de récurrence de la méthode d'Euler explicite yk+1=yk+hf(yk,tk)
# pour l'équation choisie domega/dt=(Kc*U-omega)/tau
# donc omega=omega+h*(Kc*U-omega)/tau
#Définition des paramètres
T=2
U0=5
omega0=0
### question 2
def euler1_explicite(T,h,U0,omega0):
''' Renvoie une liste des ordonnées pour un premier ordre
soumis à un échelon u de tension '''
s=omega0
sortie=[omega0]
for i in range(1,int(T/h)):
f=(0.5*U0-s)/0.2
s=s+f*h
sortie.append(s)
return sortie
### question 3
### tracé de la solution exacte et des solutions approchées
def ordre1(t):
return (0.5*5*(1-np.exp(-t/0.2)))
#trace des solutions pour différents pas de temps
marqueurs = ['^', '+', '*', '.', '', 'o'] #Les marqueurs
plt.figure(1)
k=0
for i in (0.5,0.2,0.1,0.01,0.005):
x=np.linspace(0,T,int(T/i))
y=euler1_explicite(T,i,U0,omega0)
plt.plot(x,y,'--',color='b',marker=marqueurs[k],label='euler, pas= '+str(i))
k=k+1
x=np.linspace(0,2,100)
y=ordre1(x)
plt.plot(x,y,'r',label='exacte')
plt.title('Euler explicite ordre 1')
plt.legend()
plt.show()
# La rapidité de la méthode d’Euler explicite ne dépends que des opérations de l’équation de récurrence et du nombre d’itérations souhaitées n. Ainsi, nous aurons un temps de calcul directement proportionnel au facteur n.
# En conclusion, le pas de temps d’un schéma explicite doit être choisi suffisamment petit devant les constantes de temps de l’équation différentielle pour éviter des instabilités numériques.
#facteur h/tau... qui crée l'instabilité
### question 4 sur un intervalle de temps de 2s
T=2
temps=[]
for i in (0.1,0.01,0.001,0.0001,0.00001,0.000001):
x=np.linspace(0,T,int(T/i))
td=time.clock()
y=euler1_explicite(T,i,U0,omega0)
tf=time.clock()-td
temps.append(tf)
# print (temps)
# [2.7479030685469386e-05, 0.0001433544612868463, 0.001265690774825897, 0.013448171402936946, 0.08863642759358183, 0.8244762016695993]
### question 5 erreur de consistance
# Calcul de de l’erreur locale (ou erreur de consistance) :
# ci=h**2/2*deriveesecondefexacte(ti)
def consistance(t,h):
return (-(h**2)/2*5*0.5/(0.2**2)*np.exp(-t/0.2))
T=2
erreur=[]
for h in (0.1,0.01,0.001,0.0001,0.00001,0.000001):
erreur.append(consistance(T-h,h))
# print (erreur)
# [-2.3391196839906476e-05, -1.4914885605250613e-07, -1.4258593080448336e-09, -1.4194573563532274e-11, -1.4188187442413721e-13, -1.4187548988344106e-15]
### question 6...
fichier=open('euler_explicite.csv','w')
fichier.write("pas de temps h"+'\;'+"10**(-1)"+'\;'+'10**(-2)'+'\;'+'10**(-3)'+'\;'+'10**(-4)'+'\;'+'10**(-5)'+'\;'+'10**(-6)'+'\n')
fichier.write("N pas de temps"+'\;'+str(2//0.1)+'\;'+str(2//0.01)+'\;'+str(2//0.001)+'\;'+str(2//0.0001)+'\;'+str(2//0.00001)+'\;'+str(2//0.000001)+'\;'+'\n')
fichier.write("erreur"+'\;'+'\n')
fichier.write("temps de calcul"+'\;'+'\n')
fichier.close()
pas_de_temps=[1e-01, 1e-02, 1e-03,1e-04, 1e-05, 1e-06]
fichier=open('euler_explicite2.csv','w')
fichier.write("pas de temps h"+';'+"N pas de temps"+';'+"erreur"+';'+"temps de calcul"+'\n')
for i in range(len(pas_de_temps)):
N=2//pas_de_temps[i]
fichier.write(str(pas_de_temps[i])+';'+str(N)+';'+str(erreur[i])+';'+str(temps[i])+';'+'\n')
fichier.close()
### euler explicite pour une equation différentielle du second ordre
### question 7 sur feuille
### question 8
#Données
J=0.015
mu=0.01
dmg=0.6
theta0=np.pi/4
def euler2_explicite(T,Dt):
#intervalle de temps
n=int(T/Dt)
#initialisation
theta=[theta0,theta0]
#résolution
for i in range(2,n):
theta.append((2-Dt*mu/J)*theta[i-1]+(Dt*mu/J-1)*theta[i-2]-(dmg*Dt**2/J)*np.sin(theta[i-2]))
return theta
### avec l'exemple de sup 2014
def ordre2_euler(w_0,xi,K,u,temps):
''' Renvoie une liste des ordonnées pour un premier ordre
soumis à un échelon u de tension
pour une liste abscisse des temps fournie'''
v=0
a=0
vitesse=[0]
acc=[0]
for i in range(1,len(temps)):
f1=(K*u-v-2*xi*a/w_0)*((w_0)**2)
f2=a
a2=a+f1*(temps[i]-temps[i-1])
v2=v+f2*(temps[i]-temps[i-1])
vitesse=vitesse + [v2]
acc=acc+[a2]
v=v2
a=a2
return vitesse, acc
#Question 9
def ordre2_exacte(t):
'''pour un theta petit solution de J*(theta..)+mu*(theta.)+dmg*theta=0'''
z=0.01/(2*np.sqrt(1.5*10**(-2)*0.6))
om=np.sqrt(0.6*100/1.5)
return (np.pi/4*np.exp(-z*om*t)*np.cos((np.sqrt(1-z**2))*om*t)+z/(np.sqrt(1-z**2))*np.sin((np.sqrt(1-z**2))*om*t))
T=15
Dt=0.001 #prendre aussi Dt=0.1 pour une divergence de la fonction
plt.figure(2)
for i in [0.001,0.01]:
temps=np.linspace(0,T,int(T/i))
plt.plot(temps,euler2_explicite(T,i))
les_t=np.linspace(0,T,1000)
y=ordre2_exacte(les_t)
plt.plot(les_t,y,'r',label='exacte')
plt.title('Euler explicite ordre 2')
plt.legend()
plt.show()
### partie 3 methode d'euler implicite - utilisation de la méthode de newton
### question 10
### relation de recurrence de la méthode d'euler implicite Yi+1 = Yi + hF(ti+1;Yi+1)
### question 11
# Yi+1 - Yi - hF(ti+1;Yi+1) = 0
#si la fonction n'est pas linéaire il faut approcher le zéro de la fonction par la méthode de Newton
# si la fonction est linéaire mais avec des matrices (ou vecteurs) il faut inverser la matrice (coût temporel important)
#définition de la fonction dérivée
def dP(x):
return 3*x**2-8*x+2
def zero_newton(f,df,u0,epsilon):
x=float(u0)
y=float(u0)-f(u0)/df(u0)
i=0
while abs(y-x)>epsilon:
x=y
y=y-f(y)/df(y)
i=i+1
return [y,f(y),i]
# Dans les cas linéaires, il faut au moins une inversion, ce qui est aussi très lourd pour les problèmes de grande dimension. Il faut donc retenir qu’une méthode implicite est généralement plus coûteuse qu’une méthode explicite à pas de temps égal.
### question 12
### euler implicite
def euler1_implicite(T,h,U0,omega0):
s=omega0
sortie=[omega0]
for i in range(1,int(T/h)):
s=(s*0.2+h*0.5*U0)/(0.2+h)
sortie.append(s)
return sortie
### question 13
marqueurs = ['^', '+', '*', '.', '', 'o'] #Les marqueurs
plt.figure(3)
k=0
T=3
for i in (1,0.5,0.2,0.1,0.01,0.005):
x=np.linspace(0,T,int(T/i))
y=euler1_implicite(T,i,U0,omega0)
plt.plot(x,y,'--',color='b',marker=marqueurs[k],label='euler, pas= '+str(i))
k=k+1
x=np.linspace(0,T,100)
y=ordre1(x)
plt.plot(x,y,'r',label='exacte')
plt.title('Euler implicite')
plt.legend()
plt.show()
### question 14
#pas de divergence de courbe et s=(s*0.2+h*0.5*U0)/(0.2+h) on a h+tau au dénominateur
|
[
"xpessoles.ptsi@free.fr"
] |
xpessoles.ptsi@free.fr
|
de8048db0aac82d89d7511c3c607134e5810c5f8
|
f82f71fc98c0fb0f26ee05206857be2569140349
|
/backend/app.py
|
249f3c8e096ea8b815c652913d11cc676e135e3f
|
[] |
no_license
|
JasonGilman18/Spotify-Mood-Search
|
6a43b83b23375d4e71990419f6a98c41a3ba9d9b
|
e73dd58c37bf12c0cdc839735fcc141abb829c4b
|
refs/heads/master
| 2023-01-10T16:52:58.003698
| 2020-11-17T16:54:11
| 2020-11-17T16:54:11
| 303,192,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,574
|
py
|
from flask import Flask, jsonify, render_template
from flask.helpers import send_file
from flask_restful import Resource, Api
from flask_cors import CORS
import os.path
import rank
app = Flask(__name__, static_folder="frontend/static", template_folder='frontend')
CORS(app)
api = Api(app)
class frontend(Resource):
def get(self):
dir = os.path.join(app.template_folder, 'index.html')
return send_file(dir)
class rank_api(Resource):
def get(self, acousticness, danceability, energy, instrumentalness, liveness, speechiness, valence):
#USER_PREFS = {"acousticness": 0.051, "danceability": .901, "energy": .4, "instrumentalness": 0.0, "liveness": .0599, "speechiness": .126, "valence": .346}
USER_PREFS = {"acousticness": float(acousticness), "danceability": float(danceability), "energy": float(energy), "instrumentalness": float(instrumentalness), "liveness": float(liveness), "speechiness": float(speechiness), "valence": float(valence)}
#call loadDataset to load data from excel into a list of dictonaries. Each Dictionary is a row in the excel
list_of_songs = rank.loadDataset()
#create average vectors for artists
centroid_vectors = rank.createCentriods(list_of_songs)
artist_centriods = centroid_vectors[0]
album_centriods = centroid_vectors[1]
#call rankSongs to rank the artists according to the user's prefs
ranked_list_of_artists = rank.rankArtists(artist_centriods, USER_PREFS)
#call rankSongs to rank the albums according to the user's prefs
ranked_list_of_albums = rank.rankAlbums(album_centriods, USER_PREFS)
#call rankSongs to rank the songs according to the user's prefs
ranked_list_of_songs = rank.rankSongs(list_of_songs, USER_PREFS)
ranked_lists = (ranked_list_of_songs[:100], ranked_list_of_artists, ranked_list_of_albums[:100])
return jsonify({"ranked_songs": ranked_lists[0], "ranked_artists": ranked_lists[1], "ranked_albums": ranked_lists[2]})
class img(Resource):
def get(self, id):
filename = id + ".png"
dir = os.path.join(app.static_folder, 'img', filename)
return send_file(dir)
api.add_resource(frontend, '/')
api.add_resource(rank_api, '/rank/<string:acousticness>/<string:danceability>/<string:energy>/<string:instrumentalness>/<string:liveness>/<string:speechiness>/<string:valence>')
api.add_resource(img, '/img/<string:id>')
if __name__=="__main__":
app.run(host='0.0.0.0', debug=False, port=os.environ.get('PORT', 80))
|
[
"jasongilman18@gmail.com"
] |
jasongilman18@gmail.com
|
c9a5516f0128c5bc1c1d48f74acd33bed59f97fd
|
537d28fb2142331e27c84ebf2c16bad77aceb24e
|
/cslee201907/bit0729/test0809_pca.py
|
c2fd4b09ccd0c675214da1d7269552e87374abb8
|
[] |
no_license
|
gema0000/bit2019
|
c27c3cec8d8d3a0907ade41523ce1c5ee86337b6
|
2f44ad3956b387186935374d9a488ad40a13bcaf
|
refs/heads/master
| 2020-07-03T05:19:41.051447
| 2019-10-26T23:56:25
| 2019-10-26T23:56:25
| 201,796,021
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,930
|
py
|
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target,
random_state=1)
print(X_train.shape)
print(X_test.shape)
from sklearn.svm import SVC
svm = SVC(C=100)
svm.fit(X_train, y_train)
print("테스트 세트 정확도: {:.2f}".format(svm.score(X_test, y_test)))
# 0.63
from sklearn.preprocessing import MinMaxScaler, StandardScaler
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# 조정된 데이터로 SVM 학습
svm.fit(X_train_scaled, y_train)
# 스케일 조정된 테스트 세트의 정확도
print("스케일 조정된 테스트 세트의 정확도: {:.2f}".format(svm.score(X_test_scaled, y_test)))
# 0.97
# 평균 0, 분산 1을 갖도록 스케일 조정
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# 조정된 데이터로 SVM 학습
svm.fit(X_train_scaled, y_train)
# 스케일 조정된 테스트 세트의 정확도
print("SVM test accuracy: {:.2f}".format(svm.score(X_test_scaled, y_test)))
# 0.97
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
scaler = StandardScaler()
scaler.fit(cancer.data)
X_scaled = scaler.transform(cancer.data)
from sklearn.decomposition import PCA
# 데이터의 처음 두 개 주성분만 유지시킵니다
pca = PCA(n_components=2)
# 유방암 데이터로 PCA 모델을 만듭니다
pca.fit(X_scaled)
# 처음 두 개의 주성분을 사용해 데이터를 변환합니다
X_pca = pca.transform(X_scaled)
print("원본 데이터 형태:", str(X_scaled.shape))
print("축소된 데이터 형태:", str(X_pca.shape))
|
[
"gema0000@naver.com"
] |
gema0000@naver.com
|
dce3ff7ebba6c050682e6e0bac39c91f54dca34f
|
0054d1400952895b3480bc5c912ee2f19bea122c
|
/plugin.video.yatp/server.py
|
f6642f62ea459f46896786f28e354eff2a930047
|
[] |
no_license
|
Inter95/tutvguia
|
a143bd2525d218882f6e7431c998eadbd47f229f
|
49ca0e894a8a28bf79e995221025c96171ea25a0
|
refs/heads/master
| 2022-12-25T17:58:54.353580
| 2017-08-25T08:44:59
| 2017-08-25T08:44:59
| 44,966,893
| 2
| 1
| null | 2022-12-14T13:06:23
| 2015-10-26T12:29:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,430
|
py
|
# coding: utf-8
# Module: server
# Created on: 01.07.2015
# Author: Roman Miroshnychenko aka Roman V.M. (romanvm@yandex.ua)
# License: GPL v.3 https://www.gnu.org/copyleft/gpl.html
"""
Torrent streamer WSGI server
"""
import sys
import xbmc
from libs.server.addon import Addon
addon = Addon()
if not addon.start_server:
addon.log('Torrent Server is disabled in Settings.', xbmc.LOGWARNING)
sys.exit()
from time import sleep
import xbmcgui
from libs.server import wsgi_app
from libs.server.wsgi_server import create_server
sleep(2.0)
addon.log('***** Starting Torrent Server... *****')
if addon.enable_limits:
wsgi_app.limits_timer.start()
if addon.persistent:
wsgi_app.save_resume_timer.start()
wsgi_app.log_torrents_timer.start()
httpd = create_server(wsgi_app.app, port=addon.server_port)
httpd.timeout = 0.2
start_trigger = True
while not xbmc.abortRequested:
httpd.handle_request()
if start_trigger:
addon.log('***** Torrent Server started *****', xbmc.LOGNOTICE)
xbmcgui.Dialog().notification('YATP', addon.get_localized_string(32028), addon.icon, 3000, False)
start_trigger = False
addon.log('***** Torrent Server stopped *****', xbmc.LOGNOTICE)
wsgi_app.torrent_client.abort_buffering()
if addon.enable_limits:
wsgi_app.limits_timer.abort()
if addon.persistent:
wsgi_app.save_resume_timer.abort()
wsgi_app.log_torrents_timer.abort()
del wsgi_app.torrent_client
|
[
"inter95@netzero.com"
] |
inter95@netzero.com
|
7ced65cfc3ef47676dd8e95513ae0b9a0d7fa43f
|
ffd2ce990cf15efa5705a30ae3a1ebe5b44eaef9
|
/hoth/base.py
|
43afa46f6c81a2a7b14b721dba96ff45abddf012
|
[] |
no_license
|
andre-deregle/hoth_framework
|
8ad6277f73582bd9fd378a056f3b75f4951a55c5
|
998ef9a78ee019787e040ae533f2659345c3ccd6
|
refs/heads/master
| 2021-01-10T14:30:34.744408
| 2015-12-15T15:09:49
| 2015-12-16T10:09:53
| 47,707,565
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,078
|
py
|
import os
import time
from bs4 import BeautifulSoup
from selenium import webdriver
class Base:
DRIVER = None
PAGE_HTML = None
def start_driver(self, driver_type="Firefox", path=None, remote=False):
"""!
Method that starts Selenium webdriver.
Args:
driver_type: string, browser type - Firefox, Chrome, etc.;
path: string, path in your file system to executable driver;
remote: boolean, trigger driver on remote machine.
Returns:
Selenium webdriver instance.
"""
if remote:
pass
else:
if path:
os.environ['webdriver.chrome.driver'] = path
else:
path = ''
global DRIVER
DRIVER = eval('webdriver.'+driver_type+'('+path+')')
return DRIVER
def visit_page(self, page_url):
"""!
Method that visit page with "page_url" URL.
Args:
page_url: string, http(-s) address.
"""
DRIVER.get(page_url)
self.get_current_page_html()
def get_current_page_html(self):
"""!
Method that returns HTML of current page.
Returns:
PAGE_HTML - html of current page.
"""
html = DRIVER.page_source
global PAGE_HTML
PAGE_HTML = BeautifulSoup(html, 'html.parser')
return PAGE_HTML
def get_page_html(self):
return PAGE_HTML
def get_driver(self):
return DRIVER
def close_driver(self):
DRIVER.close()
def quit_driver(self):
DRIVER.quit()
def maximize_window(self):
DRIVER.maximize_window()
def screen(self, location='./tmp/screenshots'):
"""! Saves screenshot."""
timestamp = time.strftime('%d_%b_%Y_%H_%M')
filename = timestamp + '.png'
path = os.path.abspath(location)
if not os.path.exists(path):
os.makedirs(path)
full_path = path + '/' + filename
DRIVER.get_screenshot_as_file(full_path)
|
[
"abon.lits@gmail.com"
] |
abon.lits@gmail.com
|
c865ab16fb282ca2fcd60d1db0446fbe307de26c
|
9b5d06139061f6de33d81d9611495600d2c86df0
|
/newsfeed_template.py
|
ca32da9441830095fad3bb168b2db67afe1d03dc
|
[] |
no_license
|
rakesh82/Flask-Example
|
28fa0795ce344f2d55f40b87b1cefe0cd6ef5231
|
ebe1aac105ba32aa5e0e535722ed535efc8bbe44
|
refs/heads/master
| 2022-12-18T04:03:59.156295
| 2020-09-25T11:24:58
| 2020-09-25T11:24:58
| 298,550,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 826
|
py
|
###
# newsfeed_template.py - multiple news feeds and uses of Jinja templates
###
import feedparser
from flask import Flask
from flask import render_template
app = Flask(__name__)
# Feeds channel
NEWS_FEED = {
'bbc': "http://feeds.bbci.co.uk/news/rss.xml",
'cnn': "http://rss.cnn.com/rss/edition.rss",
'fox': "http://feeds.foxnews.com/foxnews/latest",
'iol': "http://www.iol.co.za/cmlink/1.640"
}
# routing
@app.route("/")
@app.route("/<publication>")
# functions
def get_news(publication='bbc'):
feed = feedparser.parse(NEWS_FEED[publication])
#first_article = feed['entries'][0]
return render_template("home.html",
news=publication.upper(), articles=feed['entries'])
if __name__ == '__main__':
app.run(port=5000, debug=True)
|
[
"noreply@github.com"
] |
rakesh82.noreply@github.com
|
e70e33f6e6b8c38ee246fdd13f744f958336a9b2
|
c50e7b5a9597980f7b659fa4294f065b152ef9cf
|
/jogo_da_velha_utilizando_arrays_funções_e_estrutura_para_faça.py
|
f319d511dae456ffad5e8ed63f5958fbd7eeb6ae
|
[] |
no_license
|
crishonsou/modern_python3_bootcamp
|
aab60ede8f8bec4cb38dc81e896823795460ea5b
|
086e0e97dacaf05862fa8d73184b217cf5688114
|
refs/heads/main
| 2022-12-24T00:00:03.621358
| 2020-10-06T15:35:26
| 2020-10-06T15:35:26
| 301,769,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,954
|
py
|
import sys
def menu():
continuar=1
while continuar:
continuar = int(input("0. Sair \n"+
"1. Jogar novamente\n"))
if continuar:
jogador_1 = input('Digite o nome do jogador 1: ')
jogador_2 = input('Digite o nome do jogador 2: ')
game()
else:
print("Saindo...")
def game():
jogada=0
while ganhou() == 0:
print("\nJogador ", jogada%2 + 1)
exibe()
linha = int(input("\nLinha :"))
coluna = int(input("Coluna:"))
if board[linha-1][coluna-1] == 0:
if(jogada%2+1)==1:
board[linha-1][coluna-1]=1
else:
board[linha-1][coluna-1]=-1
else:
print("Nao esta vazio")
jogada -=1
if ganhou():
print("Jogador ",jogada%2 + 1," ganhou apos ", jogada+1," rodadas")
jogada +=1
def ganhou():
#checando linhas
for i in range(3):
soma = board[i][0]+board[i][1]+board[i][2]
if soma==3 or soma ==-3:
return 1
#checando colunas
for i in range(3):
soma = board[0][i]+board[1][i]+board[2][i]
if soma==3 or soma ==-3:
return 1
#checando diagonais
diagonal1 = board[0][0]+board[1][1]+board[2][2]
diagonal2 = board[0][2]+board[1][1]+board[2][0]
if diagonal1==3 or diagonal1==-3 or diagonal2==3 or diagonal2==-3:
return 1
return 0
def exibe():
for i in range(3):
for j in range(3):
if board[i][j] == 0:
print(" _ ", end=' ')
elif board[i][j] == 1:
print(" X ", end=' ')
elif board[i][j] == -1:
print(" O ", end=' ')
print()
board = [ [0,0,0],
[0,0,0],
[0,0,0] ]
menu()
|
[
"noreply@github.com"
] |
crishonsou.noreply@github.com
|
43bacb73308ce17131c7af16d237c8beaddd291b
|
0c8e23c3bc701cd043edcc99dc61c0a910985406
|
/main.py
|
b085f31d1c6def8eecf4ac856e4b7ad6edbdd4ce
|
[
"MIT"
] |
permissive
|
talisonfc/DevOpsAssistent
|
55137d8596c286aeb057338bed36c5e12d455ea8
|
0a9b41c65d3dfbaec5210bdc7794be83042f1b52
|
refs/heads/master
| 2021-04-11T21:36:31.683842
| 2020-03-21T20:45:17
| 2020-03-21T20:45:17
| 249,056,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,670
|
py
|
# -*- coding: utf-8 -*-
import speech_recognition as sr
from controllers import DBController
dbController = DBController.DBController()
mapOfHandlers = {
"adicionar banco de dados": dbController.addDB,
"meus bancos de dados": dbController.show,
"remover banco de dados": dbController.removeDB,
"conectar com banco de dados": dbController.connectDB,
"criar banco de dados": dbController.createDB,
"excluir banco de dados": dbController.dropDB,
"carregar banco de dados": dbController.loadDump
}
def dispatch(command):
# if command == "adicionar banco de dados":
# dbController.addDB()
try:
if command.encode("utf-8") in "O que você sabe fazer":
print("Eu sei fazer isso:")
for action in mapOfHandlers.keys():
print('- '+action)
else:
mapOfHandlers[command]()
except KeyError:
print("nenhum handler disponivel")
def ouvir_microfone():
microfone = sr.Recognizer()
with sr.Microphone() as source:
microfone.adjust_for_ambient_noise(source)
print('estou lhe ouvindo')
audio = microfone.listen(source)
try:
frase = microfone.recognize_google(audio, language='pt-BR')
print('voce disse: ' + frase)
if frase != '':
dispatch(frase)
if frase != 'tchau':
ouvir_microfone()
else:
print("Obrigado Sr.")
except sr.UnknownValueError:
ouvir_microfone()
# mode = 'text'
mode = 'voz'
if mode == 'text':
dispatch("carregar banco de dados")
else:
ouvir_microfone()
|
[
"tfccomputation@gmail.com"
] |
tfccomputation@gmail.com
|
94cc4e4ccf09fb0aac8357be15b814d889a2fdba
|
778607e1cc3636326057b43f90713e683f05621c
|
/pyUtil/pimon_temp.py
|
df90a5702f77c3ad3d07850d1699b3ddce964d48
|
[] |
no_license
|
fgrehl/esxi-raspi
|
f9ddb2419aed901e324efd4d833bd11b4b6ee674
|
639d7c2838f4a7c5d615c39164d4586cfdfdadbf
|
refs/heads/main
| 2023-01-09T05:29:22.682287
| 2020-11-15T12:21:10
| 2020-11-15T12:21:10
| 311,383,029
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
#!/usr/bin/python
import time
import sys
from pimonLib import *
def main(argv):
if (argv):
SECONDS = int(argv[0])
else:
SECONDS = 10
print('Polling CPU temperature every {} seconds...'.format(str(SECONDS)))
pimon = PiMon()
while True:
timestamp = int(time.time())
print('CPU Temperature: {} C'.format(pimon.getTemp()))
time.sleep(SECONDS)
if __name__ == '__main__':
main(sys.argv[1:])
|
[
"git@virten.net"
] |
git@virten.net
|
54634da9ef0f9cbf589a83574d5307aaa0f2557e
|
83a1c7251ff2d4d0090760227245856e28edfeac
|
/10/ascii_knot.py
|
54b56fdfed56f7e07e21971d65e74f5b4d063a4a
|
[] |
no_license
|
mkolas/advent2017
|
db6e6c758247a3d1f153608227ed3ffcf8cd9c23
|
40706b7e4af6f1e9f08f1a3ce3d28749a3cc661a
|
refs/heads/master
| 2021-09-01T05:41:10.714619
| 2017-12-25T05:44:48
| 2017-12-25T05:44:48
| 112,755,382
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,160
|
py
|
list_ = [x for x in range(256)]
size = len(list_)
lengths = [ord(x) for x in "183,0,31,146,254,240,223,150,2,206,161,1,255,232,199,88"]
lengths.extend([17, 31, 73, 47, 23])
position = 0
skip = 0
def wrap_slice(list_, position, size):
if position + size > len(list_):
return list_[position:] + list_[:(position+size)%len(list_)]
return list_[position:position+size]
# do this 64 times now
for x in range(64):
for length in lengths:
# first, reverse length
sublist = wrap_slice(list_, position, length)
sublist.reverse()
for i in range(len(sublist)):
list_[(position+i)%size] = sublist[i]
# move position forward...
position = (position+length+skip)%size
skip += 1
# generate dense hash
hash_array = []
val = 0
for x in range(16):
to_hash = list_[:16]
val = 0
for y in to_hash:
val = val ^ y
hash_array.append(val)
list_ = list_[16:]
# convert to hex string
hash_string = ""
for x in hash_array:
no_pad = hex(x)[2:]
if len(no_pad) == 1:
no_pad = "0" + no_pad
hash_string += no_pad
print("hash {}".format(hash_string))
|
[
"barrelrolled@gmail.com"
] |
barrelrolled@gmail.com
|
a4b85e3f638f3efabd6247858c62d1c3b21e8722
|
42de376efe849a6c205b56f3f1093aec4742bfc4
|
/battleship.py
|
4592ec5aa24b3e3c41df9ac90abcc6694e616962
|
[] |
no_license
|
Ihsara/battleship
|
397551d70eedd92be8698eda0b98b702b71a457b
|
346cbcb20d272bccf311ce1280e558165a485a6b
|
refs/heads/master
| 2021-06-21T01:06:13.687996
| 2019-09-19T02:48:14
| 2019-09-19T02:48:14
| 209,442,670
| 0
| 0
| null | 2021-04-20T18:41:25
| 2019-09-19T02:12:40
|
Python
|
UTF-8
|
Python
| false
| false
| 264
|
py
|
DEFAULT_SIZE = (10, 10)
PVE = 'pve'
PVP = 'pvp'
MODES = [PVE, PVP]
def play_battleship(board_size=DEFAULT_SIZE, playmode=PVE):
return 1
if __name__ == '__main__':
play_battleship(board_size=DEFAULT_SIZE, playmode=PVE)
|
[
"longchau21@gmail.com"
] |
longchau21@gmail.com
|
de3cc0898496f0289168f3bbfa565960cb719813
|
dbe0db719e6d8a8c7e9974c590a3199e5792fde8
|
/blog/views.py
|
bf70355cfd8d1ffceadc2eac61be622ae9e9a1db
|
[] |
no_license
|
PurunStar/Hosting
|
bbde7f069cf780554ad999e4c811699e04888d06
|
71f5aea468351b394d1e653d145289b545075a5e
|
refs/heads/master
| 2023-04-28T23:58:49.774782
| 2019-06-28T02:23:05
| 2019-06-28T02:23:05
| 194,189,733
| 0
| 0
| null | 2023-04-21T20:33:00
| 2019-06-28T02:03:03
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,648
|
py
|
from django.shortcuts import render, get_object_or_404, redirect, HttpResponse
from .models import Blog , Comment
from django.utils import timezone
from django.core.paginator import Paginator
from django.contrib.auth.decorators import login_required
from .forms import BlogForm
# Create your views here.
def home(request):
blogs = Blog.objects
blog_list = Blog.objects.all()
paginator = Paginator(blog_list, 3)
page = request.GET.get('page')
posts = paginator.get_page(page)
return render(request, 'home.html', {'blogs' :blogs, 'posts':posts})
def details(request, blog_id):
details = get_object_or_404(Blog, pk=blog_id)
return render(request, 'details.html', {'detail': details})
def new(request):
return render(request, 'new.html')
def delete(request, blog_id):
blog = get_object_or_404(Blog, pk=blog_id)
blog.delete()
return redirect('/')
def create(request):
# blog = Blog()
# blog.title = request.GET['title']
# blog.body = request.GET['body']
# blog.pub_date = timezone.datetime.now()
# blog.save()
# return redirect('/blog/' + str(blog.id))
if request.method == 'POST':
blog = Blog
form = BlogForm(request.POST)
if form.is_valid():
BlogForm.pub_date = timezone.datetime.now()
bloggroup = form.save(commit=False)
bloggroup.save()
return redirect('/blog/' + str(blog.id))
else:
form = BlogForm()
return render(request, 'new.html', {'form' : form})
def edit(request,blog_id):
# blog = get_object_or_404(Blog, pk=blog_id)
# if request.method == "POST":
# blog.title = request.POST['title']
# blog.body = request.POST['body']
# blog.pub_date = timezone.datetime.now()
# blog.save()
# return redirect('/blog/' + str(blog.id))
# return render(request,'edit.html',{'blog':blog})
blog = get_object_or_404(Blog, pk=blog_id)
if request.method == 'POST':
form = BlogForm(request.POST, instance=blog)
if form.is_valid():
blog.pub_date = timezone.datetime.now()
blog = form.save(commit=False)
blog.save()
return redirect('details', pk=blog.id)
else:
form = BlogForm(instance=blog)
return render(request, 'edit.html', {'form': form, 'blog': blog})
#blog/views.py
@login_required
def comment_add(request,blog_id):
if request.method == "POST":
post = Blog.objects.get(pk=blog_id)
comment = Comment()
comment.user = request.user
comment.body = request.POST['body']
comment.post = post
comment.save()
return redirect('/blog/' + str(blog_id))
else:
return HttpResponse('잘못된 접근입니다.')
@login_required
def comment_edit(request,comment_id):
comment = get_object_or_404(Comment,pk=comment_id)
if request.user == comment.user:
if request.method =="POST":
comment.body = request.POST['body']
comment.save()
return redirect('/blog/' + str(comment.post.id))
elif request.method=="GET":
context ={
'comment' : comment
}
return render(request,'comment_edit.html', context)
@login_required
def comment_delete(request, comment_id):
comment = get_object_or_404(Comment,pk=comment_id)
if request.user == comment.user:
if request.mehtod=="POST":
post_id = comment.post.id
comment.delete()
return redirect('/blog/' + str(post_id))
return HttpResponse('잘못된 접근입니다.')
|
[
"maxlevel1324@naver.com"
] |
maxlevel1324@naver.com
|
71475e313cdac46415bf31dbe3f90cf158e2b1bb
|
6b5bbdbc3e6f54b312738103b8c348a05b8f81db
|
/Code/calibration.py
|
e606912ded5d664deb0ed1f26928680ea689b3a4
|
[
"MIT"
] |
permissive
|
irvingliao/CarND-Advanced-Lane-Lines
|
39ec388828391b19c979bb4cc78eb641b6638dcf
|
f9d136e0440285efb564d41c05c1346a2cd7c5c5
|
refs/heads/master
| 2020-04-12T18:44:07.744844
| 2018-12-27T08:34:11
| 2018-12-27T08:34:11
| 162,688,995
| 0
| 0
|
MIT
| 2018-12-21T08:39:36
| 2018-12-21T08:39:36
| null |
UTF-8
|
Python
| false
| false
| 2,673
|
py
|
#%% [markdown]
# ## First, I'll compute the camera calibration using chessboard images
#%%
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from IPython import get_ipython
#%%
get_ipython().run_line_magic('matplotlib', 'qt')
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('../camera_cal/calibration*.jpg')
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
cv2.imshow('img',img)
cv2.waitKey(500)
cv2.destroyAllWindows()
#%% [markdown]
# ## Undistort a test image
#%%
import pickle
get_ipython().run_line_magic('matplotlib', 'inline')
# Test undistortion on an image
img = cv2.imread('../camera_cal/calibration1.jpg')
img_size = (img.shape[1], img.shape[0])
# Do camera calibration given object points and image points
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
# Save the camera calibration result for later use (we won't worry about rvecs / tvecs)
dist_pickle = {}
dist_pickle["mtx"] = mtx
dist_pickle["dist"] = dist
pickle.dump( dist_pickle, open( "../camera_cal/dist_pickle.p", "wb" ) )
dst = cv2.undistort(img, mtx, dist, None, mtx)
# Visualize undistortion
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(dst)
ax2.set_title('Undistorted Image', fontsize=30)
#%%
import pickle
import re
# Apply Distortion Correction to test images
test_imgs = glob.glob('../test_images/*.jpg')
pattern = re.compile('/test_images/(.*).jpg')
dist_pickle = pickle.load( open( "../camera_cal/dist_pickle.p", "rb" ) )
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
for fname in test_imgs:
image = cv2.imread(fname)
name = pattern.search(fname).group(1)
path = '../test_images/' + name + '_undist.jpg'
test_dst = cv2.undistort(image, mtx, dist, None, mtx)
cv2.imwrite(path, test_dst)
|
[
"irvingliao@gmail.com"
] |
irvingliao@gmail.com
|
1674fe95e66f1a1515e0f9f2041ce6e82087a29a
|
2537756cb251008eacd247db82a26c43967e1381
|
/sdk/python/test_data/pipelines/pipeline_with_importer_and_gcpc_types.py
|
98431d362cbd686875944fc8fdff87db3846f011
|
[
"Apache-2.0"
] |
permissive
|
TheMichaelHu/pipelines
|
074edf3b08e2877a996061bacdf4b9563270cc5b
|
4c6abe5afcc1b0511f0d0ab91b7ec4522da5a1df
|
refs/heads/master
| 2022-12-10T18:34:08.540736
| 2022-11-02T00:31:45
| 2022-11-02T00:31:45
| 241,268,119
| 0
| 0
|
Apache-2.0
| 2020-02-18T04:00:31
| 2020-02-18T04:00:30
| null |
UTF-8
|
Python
| false
| false
| 1,626
|
py
|
# Copyright 2022 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline using dsl.importer and GCPC types."""
from kfp import compiler
from kfp import components
from kfp import dsl
from kfp.dsl import importer
class VertexDataset(dsl.Artifact):
"""An artifact representing a GCPC Vertex Dataset."""
schema_title = 'google.VertexDataset'
consumer_op = components.load_component_from_text("""
name: consumer_op
inputs:
- {name: dataset, type: google.VertexDataset}
implementation:
container:
image: dummy
command:
- cmd
args:
- {inputPath: dataset}
""")
@dsl.pipeline(
name='pipeline-with-importer-and-gcpc-type', pipeline_root='dummy_root')
def my_pipeline():
importer1 = importer(
artifact_uri='gs://ml-pipeline-playground/shakespeare1.txt',
artifact_class=VertexDataset,
reimport=False,
metadata={'key': 'value'})
consume1 = consumer_op(dataset=importer1.output)
if __name__ == '__main__':
compiler.Compiler().compile(
pipeline_func=my_pipeline,
package_path=__file__.replace('.py', '.yaml'))
|
[
"noreply@github.com"
] |
TheMichaelHu.noreply@github.com
|
dff6ff1934280c768a14f7a374a59512e2cf35bb
|
c7f4cc258c3f18a80bd94378bc826b59d0b6bb1d
|
/python/modules/argparse/test.py
|
c13d50c6a84db48a0ab3a7719ff84104042c66df
|
[] |
no_license
|
CoptimT/basic
|
92799e48e5ac264b4af61bc65041320d9c928489
|
e98a9bd3efcaf9cccb6514996c0318a833ae7b06
|
refs/heads/master
| 2020-03-14T22:56:05.207429
| 2018-08-01T09:53:02
| 2018-08-01T09:53:02
| 131,832,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,499
|
py
|
#!/bin/env python
# -*- coding: UTF-8 -*-
import argparse
def test1():
parser = argparse.ArgumentParser()
parser.add_argument('echo')
args = parser.parse_args()
print(args)
print(args.echo)
# > python test.py ofo
# Namespace(echo='ofo')
# ofo
def test2():
parser = argparse.ArgumentParser(description = 'this is a description')
parser.add_argument('--ver', '-v', action = 'store_true', help = 'hahaha')
# 将变量以标签-值的字典形式存入args字典
args = parser.parse_args()
print(args)
if args.ver:
print("Ture")
else:
print("False")
# > python test.py -v
# > python test.py --ver
# Namespace(ver=True)
# Ture
# > python test.py -h
# usage: test.py [-h] [--ver]
# this is a description
# optional arguments:
# -h, --help show this help message and exit
# --ver, -v hahaha
def test3():
parser = argparse.ArgumentParser(description = 'this is a description')
parser.add_argument('--ver', '-v', required = True, type = int)
args = parser.parse_args()
print(args)
if args.ver:
print("Ture")
else:
print("False")
# > python test.py -v
# usage: test.py [-h] --ver VER
# test.py: error: argument --ver/-v: expected one argument
# > python test.py --ver 10
# Namespace(ver=10)
# Ture
if __name__ == "__main__":
test3()
|
[
"zhangxw17@lenovo.com"
] |
zhangxw17@lenovo.com
|
6c9607cf91f8f75cb437f6ebc6042a0b2bc5cbd7
|
3c124cd9758886b331e6dd1ee146ac3ab0b369a4
|
/period3/32414845dailyahead/XGB.py
|
c665eafed23d815ba0acdece459166289b61d755
|
[] |
no_license
|
WzcTHU/LMP_Forecast
|
3ff109fb4da402cfd7d093d9ee13586e4082497a
|
c5794cfef0b3c778e35502aa5ddbaac1446e65b2
|
refs/heads/master
| 2020-05-17T01:30:12.789552
| 2019-04-25T13:00:39
| 2019-04-25T13:00:39
| 183,426,832
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,299
|
py
|
from DataStandardScaler import *
from DataCut import *
from SummaryResults import *
from xgboost import XGBRegressor
from sklearn.externals import joblib
import scipy.io as sio
print('Cutting dataset...')
data = DataCut('data/x.csv', 'data/y.csv')
data.cut()
print('Data standardizating...')
data_scaler = DataStandardScaler(data.train_xset, data.train_yset,
data.validation_xset, data.validation_yset)
print('XGB training...')
# regressor = XGBRegressor(n_estimators=100)
regressor = XGBRegressor()
regressor.fit(data_scaler.x_train_standard, data_scaler.y_train_standard.ravel())
joblib.dump(regressor, 'models/xgb_model.m')
y_fore_train = regressor.predict(data_scaler.x_train_standard)
y_fore_validation = regressor.predict(data_scaler.x_validation_standard)
data_scaler.reverse_trans(y_fore_train, y_fore_validation)
print('Getting results...')
sum_res_train = SummaryResults(data.train_yset, data_scaler.rev_y_train)
sum_res_validation = SummaryResults(data.validation_yset, data_scaler.rev_y_validation)
sio.savemat('ForecastResult/Validation/XGB.mat', {'XGBfore': data_scaler.rev_y_validation})
sum_res_train.get()
sum_res_validation.get()
res_list = sum_res_validation.cal_residual()
sio.savemat('res/XGBres.mat', {'XGB_res': res_list})
print(sum_res_validation.cal_variance())
|
[
"zhengche16@mails.tsinghua.edu.cn"
] |
zhengche16@mails.tsinghua.edu.cn
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.