blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a827c73237bc593d7776fbc61aafe15c97776895
|
28b0f19a30aae14925d24a55d0dbe91da8fb4f44
|
/Main.py
|
efe5bf0ce7dbbb33e7b2fe35c6ebaa44b1eae7f5
|
[] |
no_license
|
rowan-maclachlan/IPDSDGA
|
c5b342ba86c25018e968c67a85d3a1cff6b1f745
|
c91d02e52c8aa1217e3ae4cd54001a6c213d439d
|
refs/heads/master
| 2021-06-16T15:43:54.423167
| 2017-01-08T05:28:15
| 2017-01-08T05:28:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,782
|
py
|
import Cell
import Position as ps
GENERATIONS = 2
SIMULATION_STEPS = 8
def get_largest(cells):
return cells[len(cells)-1], cells[len(cells)-2]
if __name__ == "__main__":
# Print and recombinate tests
cellID = 0
cell_a = Cell.Cell(cellID, ps.Position(0, cellID))
cellID += 1
cell_b = Cell.Cell(cellID, ps.Position(0, cellID))
cellID += 1
cell_c = Cell.Cell(cellID, ps.Position(0, cellID))
cellID += 1
cell_d = Cell.Cell(cellID, ps.Position(0, cellID))
cellID += 1
cell_e = Cell.Cell(cellID, ps.Position(0, cellID), cell_a, cell_b)
cellID += 1
cell_f = Cell.Cell(cellID, ps.Position(0, cellID), cell_c, cell_d)
cellID += 1
# Interaction Tests
allCells = [cell_a, cell_b, cell_c, cell_d, cell_e, cell_f]
avg_def = 0
initial_move_percent = 0
totalScore = 0
for cell in allCells:
avg_def += cell.get_gene().get_defect_fraction()
if 'd' == cell.get_gene().get_choice_at(1):
initial_move_percent += 1
totalScore += cell._score
if not 0 == len(allCells):
print("\nAverage %defect: " + str(avg_def / len(allCells)))
print("Initial move %defect: " + str(float(initial_move_percent) / float(len(allCells))))
print("Average score: " + str(float(totalScore) / float(len(allCells))))
for i in range(GENERATIONS):
for cell in allCells:
cell.reset_score()
for x in range(SIMULATION_STEPS):
for cell in allCells:
cell.clear_interactions()
for cell in allCells:
cell.interact(allCells)
for cell in allCells:
if cell.is_dead():
allCells.remove(cell)
allCells.sort(key=lambda c: c._score)
allCells.remove(allCells[0])
best_cell_a, best_cell_b = get_largest(allCells)
allCells.append(Cell.Cell(cellID, cellID, best_cell_a, best_cell_b))
cellID += 1
avg_def = 0
initial_move_percent = 0
totalScore = 0
for cell in allCells:
avg_def += cell.get_gene().get_defect_fraction()
if 'd' == cell.get_gene().get_choice_at(1):
initial_move_percent += 1
totalScore += cell._score
if not 0 == len(allCells):
print("\nAverage %defect: " + str(avg_def/len(allCells)))
print("Initial move %defect: " + str(float(initial_move_percent)/float(len(allCells))))
print("Average score: " + str(float(totalScore)/float(len(allCells))))
allCells.sort(key=lambda c: c._score)
cell_1, cell_2 = get_largest(allCells)
print("\nBest Cells: \n")
print("a:" + str(cell_1))
print("b: " + str(cell_2))
allCells.sort(key=lambda c: c._score)
for cell in allCells:
print(str(cell))
|
[
"rdm695@mail.usask.ca"
] |
rdm695@mail.usask.ca
|
e93926a3af6d7c201d8c06ab19c7dc14984d1529
|
e43bf421edc060d5b3767adf2826cfd71472442c
|
/Python_function_challenges.py
|
bc0e74ef49efabe4cd609a2e14b07fd3ec0c29aa
|
[] |
no_license
|
NatrezC/Unit-4_deliverables
|
dc8e4e2e253354d3bbf66710263dfe656db01a18
|
b3050b940077d956f0c08f758f67f030364ded6c
|
refs/heads/main
| 2023-03-01T00:37:07.011022
| 2021-01-17T06:20:19
| 2021-01-17T06:20:19
| 330,251,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,434
|
py
|
#1 Write a function named sum_to() that takes a number parameter n and returns the sum of the numbers from 1 to n. For example:
def sum_to(n):
sum = 0
for i in range(1 + n):
sum +=i
print(sum)
sum_to(6)
sum_to(10)
#2 Write a function named largest() that takes a list parameter and returns the largest element in that list. You can assume the list contents are all positive numbers. For example:
num_list_1 = [10, 4, 2, 231, 91, 54]
# largest = max(num_list_1)
# print(largest)
num_list_2 = [1,2,3,4,0]
# largest_two = max(num_list_2)
# print(largest_two)
def largest(num_list_1):
largest_num = 0
for num in num_list_1:
if num >largest_num:
largest_num = num
print(largest_num)
largest(num_list_1)
def largest_two(num_list_2):
largest_num = 0
for num in num_list_2:
if num > largest_num:
largest_num = num
print(largest_num)
largest_two(num_list_2)
#3 Write a function named occurances() that takes two string parameters and counts the number of occurrances of the second string inside the first string.
#Write a function named product() that takes an arbitrary number of parameters, multiplies them all together, and returns the product. (HINT: Review your notes on *args).
numbers = [1,2,3,4,5]
def product(numbers):
product = 1
for number in numbers:
product *= number
print(product)
product(numbers)
|
[
"cnatrez@gmail.com"
] |
cnatrez@gmail.com
|
fbf23f0b0fd82074e65b27b87e5b411567df6d1c
|
0446602db0d7d65f19c2488b4379e8477890057c
|
/prac_02/string_formatting_examples.py
|
8fe96be07def54052843b92cb1ad09dbe0a47992
|
[] |
no_license
|
zacgilby/cp1404practicals
|
9b57966e62bded312230a29b3c070e78af43537f
|
6d0455c8ef89a60ae7e1c0e869b46e0da26084b9
|
refs/heads/master
| 2020-03-26T23:04:05.241861
| 2018-09-11T05:40:48
| 2018-09-11T05:40:48
| 145,507,338
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,209
|
py
|
"""
CP1404/CP5632 - Practical
Various examples of using Python string formatting with the str.format() method
Want to read more about it? https://docs.python.org/3/library/string.html#formatstrings
"""
name = "Gibson L-5 CES"
year = 1922
cost = 16035.40
numbers = [0, 50, 100]
for i in range(len(numbers)):
print("{:>3}".format(numbers[i]))
# The ‘old’ manual way to format text with string concatenation:
# print("My guitar: " + name + ", first made in " + str(year))
# A better way - using str.format():
# print("My guitar: {}, first made in {}".format(name, year))
# print("My guitar: {0}, first made in {1}".format(name, year))
# print("My {0} was first made in {1} (that's right, {1}!)".format(name, year))
# Formatting currency (grouping with comma, 2 decimal places):
# print("My {} would cost ${:,.2f}".format(name, cost))
# Aligning columns:
# numbers = [1, 19, 123, 456, -25]
# for i in range(len(numbers)):
# print("Number {0} is {1:>5}".format(i + 1, numbers[i]))
# Another (nicer) version of the above loop using the enumerate function
# for i, number in enumerate(numbers):
# print("Number {0} is {1:>5}".format(i + 1, number))
|
[
"zachary.gilby@my.jcu.edu.au"
] |
zachary.gilby@my.jcu.edu.au
|
02704e22f9b22b1a65a44d8ae9eb70844c824f17
|
5a75799f34488b263c9d1587578ada907e40f5bc
|
/mytest/socket/single/single_s_socket.py
|
ae6ee0d744f9e5b0e70bd4f864165f3e73109f11
|
[] |
no_license
|
zenwuyuan/mytest
|
b4e3f4a67c83e339f163b87dcff8737836daca78
|
838d2791bb75d8bd8b6057bf74c34b14d84bb418
|
refs/heads/master
| 2021-08-16T13:07:53.361691
| 2020-04-03T08:49:35
| 2020-04-03T08:49:35
| 146,848,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
#!/usr/bin/env python3
from socket import *
from time import ctime
HOST = '127.0.0.1'
PORT = 8080
BUFSIZ = 1024
ADDR = (HOST,PORT)
tcpSerSock = socket(AF_INET,SOCK_STREAM)
tcpSerSock.bind(ADDR)
tcpSerSock.listen(2)
while True:
print('Waiting for connection...')
tcpCliSock,addr = tcpSerSock.accept()
print('...connect from :',addr)
while True:
data = tcpCliSock.recv(BUFSIZ).decode()
print('server_data :',data)
if not data:
break
tcpCliSock.send(('[%s] %s' %(ctime(),data)).encode())
tcpCliSock.close()
tcpSerSock.close()
|
[
"jf871030@gmail.com"
] |
jf871030@gmail.com
|
9d8dcc8421ab1d253140e90d08e521e68fefc2b6
|
bf75656248b0b0def53807648fb35b658e48412b
|
/examples/Dynamic/TRMM/rise/plot.py
|
65882fe41aba5651e197a1bc23866e5e9cbfb12d
|
[] |
no_license
|
ilhamv/MC-old
|
5942d771c33583f0f8f0f64ff2108b3a0adfd7a3
|
1ef3e89ef147f65dcc7fc0321657fc80b8194ed1
|
refs/heads/master
| 2021-09-14T18:45:21.713131
| 2018-05-17T13:24:02
| 2018-05-17T13:24:02
| 117,119,085
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,374
|
py
|
import h5py
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation
f_mc = h5py.File('output.h5', 'r');
f = h5py.File('output_TRMM.h5', 'r');
#===============================================================================
# alpha eigenvalues
#===============================================================================
alpha = np.array(f['alpha']).transpose()[0]
alpha_adj = np.array(f['alpha_adj']).transpose()[0]
N = len(alpha)
J = 6;
G = N - J;
idx = alpha.argsort()
idx_adj = alpha_adj.argsort()
alpha = alpha[idx]
alpha_adj = alpha_adj[idx_adj]
plt.plot(alpha.real,alpha.imag,'o');
plt.plot(alpha_adj.real,alpha_adj.imag,'x');
plt.xlabel("Re");
plt.ylabel("Im");
plt.grid();
plt.show();
#===============================================================================
# eigenvectors
#===============================================================================
phi_mode = np.array(f['phi_mode'])
phi_mode_adj = np.array(f['phi_mode_adj'])
phi_mode = phi_mode[:,idx]
phi_mode_adj = phi_mode_adj[:,idx_adj]
# Inverse speed
v_inv = np.array(f_mc['inverse_speed'])
# Energy bin and lethargy step
energy_grid = np.array(f_mc['TRM_simple/energy'])
energy = energy_grid
du = np.log(energy[-1] / energy[-2])
energy = np.array(energy)*1E-6;
energy = (energy[1:] + energy[:-1])/2;
#===============================================================================
# Verification with TDMC
#===============================================================================
# Initial condition
phi_initial = np.zeros(N)
psi_initial = np.array(f_mc['psi_initial'])
C_initial = np.array(f_mc['C_initial'])
for g in range(G):
phi_initial[g] = psi_initial[g]
for j in range(J):
phi_initial[G+j] = C_initial[j]
# Expansion coefficients
A = np.zeros(N,dtype=complex)
for i in range(N):
num = complex(0,0)
gamma = complex(0,0)
for g in range(G):
num = num + phi_mode_adj[g][i] * phi_initial[g] * v_inv[g]
gamma = gamma + phi_mode_adj[g][i] * v_inv[g] * phi_mode[g][i]
for g in range(G,G+J):
num = num + phi_mode_adj[g][i] * phi_initial[g]
gamma = gamma + phi_mode_adj[g][i] * phi_mode[g][i]
A[i] = num / gamma
#===============================================================================
# animation
#===============================================================================
energy = np.array(f_mc['TRM_simple/energy'])
energy = np.array(energy)*1E-6;
energy = (energy[1:] + energy[:-1])/2;
time = np.logspace(-9,3,500)
fig = plt.figure()
ax = plt.axes(xlim=(1E-9, 20), ylim=(1E-4, 1E5))
ax.set_xscale('log')
ax.set_yscale('log')
line, = ax.plot([], [], '-', lw=2)
time_text = ax.text(0.02, 0.95, '', transform=ax.transAxes)
plt.xlabel("Energy, MeV");
plt.ylabel("Scalar flux");
def init():
line.set_data([], [])
time_text.set_text('')
return time_text, line
def animate(i):
phi = np.zeros(N,dtype=complex)
for g in range(N):
for n in range(N):
phi[g] = phi[g] + A[n] * phi_mode[g][n] * np.e**(alpha[n] * time[i])
phi = phi# / du
line.set_data(energy, phi[:G])
time_text.set_text('time = %.9f s' %time[i])
return time_text, line
inter = 5000 / len(time)
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=len(time), interval=inter, blit=True)
plt.show()
|
[
"ilhamv@umich.edu"
] |
ilhamv@umich.edu
|
8b23311b26580c0b5fa81c0651c11cf941b65e96
|
53be839ec30082e9e49e7593ddc5f508466ea413
|
/tests/functional_test/dumb_test.py
|
dca12a75b28ef388229fdf6291b7143570d73409
|
[] |
no_license
|
timmartin19/ripozo-html
|
c0f62fad333f1a25b351eb6f9e4e817f8ebd0542
|
1455723ac1074c8b8081542df46c1797d0169fc4
|
refs/heads/master
| 2021-01-10T02:36:56.984431
| 2016-01-19T06:09:42
| 2016-01-19T06:09:42
| 48,511,770
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 995
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import webbrowser
import unittest2
from ripozo import ResourceBase, restmixins, apimethod
from ripozo_html import HTMLAdapter
class DumbTests(unittest2.TestCase):
file_name = 'temp.html'
def tearDown(self):
# os.remove(self.file_name)
pass
def test_simple(self):
class MyResource(restmixins.ResourceBase):
pks = 'id',
@apimethod(methods=['GET'])
def something(cls, request):
return cls(properties=dict(id=1, value='It Worked'))
res = MyResource(properties=dict(id=1, value='It Worked!'))
adapter = HTMLAdapter(res, base_url='http://localhost:5000/')
with open(self.file_name, 'w') as html_page:
html_page.write(adapter.formatted_body)
resp = webbrowser.open(self.file_name)
assert False
|
[
"tim.martin@vertical-knowledge.com"
] |
tim.martin@vertical-knowledge.com
|
95ae895102b27133c7b5f434b0a386a15765b914
|
2214f236157cf8fcee24f5d28d81e73eeb43bd9d
|
/movement.py
|
bf84887ff05dd96160810b9e5ab97bbdee5f9bfa
|
[
"MIT"
] |
permissive
|
deadrobots/StackOverBot-17
|
fc867ec13ef111e243ea8993f7624d0975f61d27
|
370bb06131d810338614c4143ccb73c32ba88ff3
|
refs/heads/master
| 2021-06-15T00:16:27.856311
| 2017-04-01T02:17:28
| 2017-04-01T02:17:28
| 83,097,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
from wallaby import *
def driveTimed(left, right, time):
"""
create_drive_direct(left, right)
:rtype: object
"""
create_drive_direct(-right, -left)
msleep(time)
create_drive_direct(0, 0)
def driveTimedStraight(power, time):
"""
create_drive_straight(power)
:rtype: object
"""
create_drive_straight(power)
msleep(time)
create_drive_straight(0)
|
[
"botball@deadrobots.com"
] |
botball@deadrobots.com
|
7363da0178ffb226331388981d853a728cf55149
|
ad75e11f50facf025417979caeb37957b009b38e
|
/pmgsimproapi/api.py
|
76afce57d2bf8fee1d4c8e266a412f9d497b40df
|
[] |
no_license
|
pckmsolutions/pmgsimproapi
|
d429e70904ce0824237cb2e7587f4e57a407136a
|
8d43aff5237a422a0f83ef825d12ab56641cdc50
|
refs/heads/main
| 2023-06-09T16:17:51.319354
| 2021-06-19T16:21:06
| 2021-06-19T16:21:06
| 258,010,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,871
|
py
|
from collections import namedtuple
from datetime import timezone
from logging import getLogger
from functools import wraps
from pmgaiorest import ApiBase
from typing import Optional, Dict
from logging import getLogger
logger = getLogger(__name__)
Page = namedtuple('Page', 'items page_number number_of_pages total_count')
DEFAULT_PAGE_SIZE = 50
logger = getLogger(__name__)
def params_add_columns(*columns, params:Optional[Dict]=None):
if not params:
params = {}
params['columns'] = ','.join(columns)
return params
class SimProApi(ApiBase):
def __init__(self, aiohttp_session, base_url, header_args,
handle_reconnect=None):
super().__init__(aiohttp_session,
base_url,
header_args=header_args,
handle_reconnect=handle_reconnect)
# Setup
async def get_setup_project_custom_fields(self,*,
name: Optional[str] = None,
show_for_leads: Optional[bool] = None,
show_for_quotes: Optional[bool] = None,
show_for_jobs: Optional[bool] = None,
show_for_recurring: Optional[bool] = None,
):
params={}
if name is not None:
params['Name'] = name
def add_show_for(subtype, show):
if show is not None:
params[f'ShowFor.{subtype}'] = 'true' if show else 'false'
add_show_for('Leads', show_for_leads)
add_show_for('Quotes', show_for_quotes)
add_show_for('Jobs', show_for_jobs)
add_show_for('Recurring', show_for_recurring)
return await self.get(f'setup/customFields/projects/',
params=params)
async def create_setup_project_custom_fields(self, *,
name: str,
type: Optional[str] = "Text",
show_for_leads: Optional[bool] = False,
show_for_quotes: Optional[bool] = False,
show_for_jobs: Optional[bool] = False,
show_for_recurring: Optional[bool] = False,
is_mandatory: Optional[bool] = False,
):
return await self.post(f'setup/customFields/projects/', json={
"Name": name,
"Type": type,
"IsMandatory": is_mandatory,
"ShowFor": {
"Leads": show_for_leads,
"Quotes": show_for_quotes,
"Jobs": show_for_jobs,
"Recurring": show_for_recurring
}
})
# Invoices
async def get_invoice_pages(self, *,
page_size=None, params=None, modified_since=None):
async for page in self._get_pages(
self.get_invoice_page,
page_size=page_size,
params=params,
modified_since=modified_since):
yield page
async def get_invoice_page(self, *,
page_number=1, page_size=None, params=None, modified_since=None):
return await self._get_page('customerInvoices/',
page_number, page_size, params, modified_since)
# sites
async def get_site(self, site_id):
return await self.get(f'sites/{site_id}')
# Prebuilds
async def get_prebuild_group_pages(self, *,
page_size=None, params=None, modified_since=None):
async for page in self._get_pages(
self.get_prebuild_group_page,
page_size=page_size,
params=params,
modified_since=modified_since):
yield page
async def get_prebuild_group_page(self, *,
page_number=1, page_size=None, params=None, modified_since=None):
return await self._get_page('prebuildGroups/',
page_number, page_size, params, modified_since)
async def get_prebuild_group(self, *, name: Optional[str] = None,
parent_id: Optional[int] = None):
assert name is not None
params={'Name': name}
if parent_id is not None:
params['ParentGroup.ID'] = parent_id
prebuild_page = await self.get_prebuild_group_page(params=params)
if prebuild_page.total_count > 1:
logger.error('Got multiple prebuild groups')
return None
if prebuild_page.total_count < 1:
logger.error('Prebuild group not found')
return None
return prebuild_page.items[0]
async def create_prebuild_group(self, *, name, parent_id):
return await self.post('prebuildGroups/', json={
'Name': name,
'ParentGroup': parent_id,
})
async def get_prebuild_std_price_pages(self, *,
page_size=None, params=None, modified_since=None,
group_id: Optional[int] = None):
if params is None:
params = {}
if group_id is not None:
params['Group.ID'] = group_id
async for page in self._get_pages(
self.get_prebuild_std_price_page,
page_size=page_size,
params=params,
modified_since=modified_since):
yield page
async def get_prebuild_std_price_page(self, *,
page_number=1, page_size=None, params=None, modified_since=None):
return await self._get_page('prebuilds/standardPrice/',
page_number, page_size, params, modified_since)
async def get_prebuild_std_price(self, *,
prebuild_id: Optional[int] = None,
part_no: Optional[str] = None,
group_id: Optional[int] = None,
params: Optional[Dict] = None):
if prebuild_id is not None:
return await self.get(f'prebuilds/standardPrice/{prebuild_id}', params=params)
assert part_no is not None
if params is None:
params = {}
params['PartNo'] = part_no
if group_id is not None:
params['Group.ID'] = group_id
return await self.get('prebuilds/standardPrice/', params=params)
async def create_prebuild_std_price(self, *,
group_id, part_no, name, description):
return await self.post('prebuilds/standardPrice/', json={
'Group': group_id,
'PartNo': part_no,
'Name': name,
'Description': description,
})
async def update_prebuild_std_price(self, prebuild_id:int, *,
group_id: Optional[int] = None,
part_no: Optional[str] = None,
name: Optional[str] = None,
description: Optional[str] = None,
total_ex: Optional[float] = None):
json = {}
def add_setter(name, val):
if val is not None:
json[name] = val
add_setter('Group', group_id)
add_setter('PartNo', part_no)
add_setter('Name', name)
add_setter('Description', description)
add_setter('TotalEx', total_ex)
return await self.patch(f'prebuilds/standardPrice/{prebuild_id}', json=json)
async def get_prebuild_catalogs(self, prebuild_id:int):
return await self.get(f'prebuilds/{prebuild_id}/catalogs/')
async def create_prebuild_catalog(self, prebuild_id:int, *,
catalog_id, quantity):
return await self.post(f'prebuilds/{prebuild_id}/catalogs/', json={
'Catalog': catalog_id,
'Quantity': quantity,
})
async def del_prebuild_catalog(self, prebuild_id:int, catalog_id:int):
return await self.delete(f'prebuilds/{prebuild_id}/catalogs/{catalog_id}')
async def get_prebuild_attachments(self, prebuild_id:int):
return await self.get(f'prebuilds/{prebuild_id}/attachments/files/')
async def del_prebuild_attachment(self, prebuild_id:int, attachment_id:int):
return await self.delete(f'prebuilds/{prebuild_id}/attachments/files/{attachment_id}')
async def add_prebuild_attachment(self, prebuild_id:int, *, name, content, default):
return await self.post(f'prebuilds/{prebuild_id}/attachments/files/', json={
'Filename': name,
'Base64Data': content,
'Default': default
})
# Catalog
async def get_catalog_pages(self, *,
page_size=None, params=None, modified_since=None):
async for page in self._get_pages(
self.get_catalog_page,
page_size=page_size,
params=params,
modified_since=modified_since):
yield page
async def get_catalog_page(self, *,
page_number=1, page_size=None, params=None, modified_since=None):
return await self._get_page('catalogs/',
page_number, page_size, params, modified_since)
async def get_catalog(self, *, part_no:str, params=None):
params = params or {}
params['PartNo'] = part_no
catalogs = await self.get(f'catalogs/', params=params)
if len(catalogs) > 1:
logger.error('Got multiple catalogs for part no %s', part_no)
return None
if len(catalogs) < 1:
logger.error('Catalog part no %s not found', part_no)
return None
return catalogs[0]
async def update_catalog(self, *, catalog_id: int,
estimated_time: Optional[int] = None):
json = {}
def add_setter(name, val):
if val is not None:
json[name] = val
add_setter('EstimatedTime', estimated_time)
if not json:
logger.error('Required at least 1 field to update catalog' +
'(catalog_id: %d)', catalog_id)
return None
return await self.patch(f'catalogs/{catalog_id}', json=json)
# Quotes
async def get_quote_pages(self, *,
page_size=None, params=None, modified_since=None):
async for page in self._get_pages(
self.get_quote_page,
page_size=page_size,
params=params,
modified_since=modified_since):
yield page
async def get_quote_page(self, *,
page_number=1, page_size=None, params=None, modified_since=None):
return await self._get_page('quotes/',
page_number, page_size, params, modified_since)
async def get_quote_timeline(self, quote_id: int, *, part_no:str):
return await self.get('quotes/{quote_id}/timelines/')
# Leads
async def get_lead_pages(self, *,
page_size=None, params=None, modified_since=None):
async for page in self._get_pages(
self.get_lead_page,
page_size=page_size,
params=params,
modified_since=modified_since):
yield page
async def get_lead_page(self, *,
page_number=1, page_size=None, params=None, modified_since=None):
return await self._get_page('leads/',
page_number, page_size, params, modified_since)
async def get_lead(self, lead_id: int):
return await self.get(f'leads/{lead_id}')
async def get_lead_custom_fields(self, lead_id):
return await self.get(f'leads/{lead_id}/customFields/')
async def get_lead_custom_field(self, lead_id, custom_field_id):
return await self.get(f'leads/{lead_id}/customFields/{custom_field_id}')
async def update_lead_custom_field(self,
lead_id: int,
custom_field_id: int,
value: str):
return await self.patch(
f'leads/{lead_id}/customFields/{custom_field_id}',
json={
"Value": value
}
)
# Untilities
async def _get_pages(self, page_callable, *,
page_size=None, params=None, modified_since=None):
page_number = 1
while True:
page = await page_callable(
page_number=page_number,
page_size=page_size,
params=params,
modified_since=modified_since)
yield page
page_number += 1
if page_number >= page.number_of_pages:
break
async def _get_page(self, path, page_number, page_size, params, modified_since):
params = params or {}
params['page'] = page_number or 1
params['pageSize'] = page_size or DEFAULT_PAGE_SIZE
in_headers = {}
if modified_since is not None:
mod_time = modified_since.astimezone(tz=timezone.utc).strftime('%a, %d %b %Y %H:%M:%S GMT')
in_headers = {'If-Modified-Since': mod_time}
json, headers = await self.get_with_headers(path, params=params, headers=in_headers)
return Page(items=json, page_number=page_number, number_of_pages=int(headers['Result-Pages']),
total_count=int(headers['Result-Total']))
|
[
"possemeeg@gmail.com"
] |
possemeeg@gmail.com
|
28a47d6d8040b10d7b3ffebe51e2276eb44c4cec
|
9e7239bd96c4ca1b691d487817cd3e341feb5b54
|
/NTWebsite/migrations/0056_auto_20190107_0922.py
|
894535e8891ce1012b8ca1ca7eaf05e5f0cd75d4
|
[] |
no_license
|
mw8888/NagetiveWebsite-Django
|
50e7e3fe05fae4361f26cf0474e0edabb52e0e5c
|
8689dbfc7a5e6447965d9e3189332dd237c91a13
|
refs/heads/master
| 2020-05-02T17:32:04.098752
| 2019-01-08T11:54:10
| 2019-01-08T11:54:10
| 178,101,720
| 2
| 0
| null | 2019-03-28T01:22:35
| 2019-03-28T01:22:34
| null |
UTF-8
|
Python
| false
| false
| 449
|
py
|
# Generated by Django 2.0.6 on 2019-01-07 01:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('NTWebsite', '0055_auto_20190106_1539'),
]
operations = [
migrations.AlterField(
model_name='configparams',
name='CP_ReadsThreshold',
field=models.CharField(default=10, max_length=20, verbose_name='阅读量阈值'),
),
]
|
[
"616604060@qq.com"
] |
616604060@qq.com
|
6b3badfcb176a00aae87424890ce4d05493ca53e
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2194/60749/255882.py
|
53f47cca7bd3d09ab0df4305df9ac8394bd7b7f2
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
n=int(input())
res=[[] for _ in range(n)]
for t in range(n):
res[t]=list(map(int,input().split(" ")))
def ifprime(num):
if num==1:
return False
for t in range(2,num):
if num%t==0:
return False
return True
for h in res:
temp=[]
str1=""
for t in range(h[0],h[1]+1):
if ifprime(t):
temp.append(t)
for m in temp:
str1=str1+str(m)+" "
print(str1)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
cb348de2589f2f6f5936eee2a08fe78369f68e15
|
e6bc1f55371786dad70313eb468a3ccf6000edaf
|
/Datasets/the-minion-game/Correct/061.py
|
3dd4ff6cd83b958383f9bb0e76bf1c2a98c94fa3
|
[] |
no_license
|
prateksha/Source-Code-Similarity-Measurement
|
9da92e3b22c372ed6ea54d8b6ab2c5921e8c41c0
|
fb371b837917794d260a219a1ca09c46a5b15962
|
refs/heads/master
| 2023-01-04T07:49:25.138827
| 2020-10-25T14:43:57
| 2020-10-25T14:43:57
| 285,744,963
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
string = input()
Stuart = 0
Kevin = 0
strlen = 1
vowels = ("a","e","i","o","u")
for i in range(len(string)):
if string[i].lower() in vowels:
Kevin += (len(string)-i)
else:
Stuart += (len(string)-i)
if Stuart > Kevin:
print ("Stuart "+str(Stuart))
elif Stuart < Kevin:
print ("Kevin "+str(Kevin))
else:
print ("Draw")
|
[
"pratekshau@gmail.com"
] |
pratekshau@gmail.com
|
ee4e7d3975d86a7ba8a5bc918e35d6d57a44484e
|
5945ccbb9302da14c770f01977eb353456841f32
|
/pydicom_attempt.py
|
a97f6570a93b61bc10af001e348bd2eadb9e8597
|
[] |
no_license
|
QTIM-Lab/Processing_DICOMS_in_couch
|
8604d20c9347135ef6c3e68cabf09eb153c361de
|
f0217a993e3c18e0e520aea02600613b91f8cee5
|
refs/heads/main
| 2023-07-13T05:07:31.343803
| 2021-08-18T03:19:14
| 2021-08-18T03:19:14
| 397,455,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,036
|
py
|
import pydicom, sys, pdb, os, string, re
# pip install dicom or pydicom
from pydicom.data import get_testdata_file
from pydicom import dcmread
import pandas as pd, numpy as np
import multiprocessing, time
fpath = get_testdata_file("CT_small.dcm")
# pd.set_option('display.min_rows', 5) # How many to show
pd.set_option('display.max_rows', 50) # How many to show
pd.set_option('display.width', 200) # How far across the screen
pd.set_option('display.max_colwidth', 10) # Column width in px
pd.set_option('expand_frame_repr', True) # allows for the representation of dataframes to stretch across pages, wrapped over the full column vs row-wise
dicoms = [
"/home_dir/JK/mnt/SeriesNormalizationMCRP/40162299/E17330600/1.2.840.113619.2.312.4120.14254601.14065.1610713757.133/MR.1.2.840.113619.2.312.4120.14254601.13415.1610713779.100.dcm",
# "/home_dir/JK/mnt/SeriesNormalizationMCRP/1175480/E16862548/1.2.392.200036.9123.100.12.12.22252.90201013131016088690251964/MR.1.2.392.200036.9123.100.12.12.22252.90201013135255097415729871.dcm",
# "/home_dir/JK/mnt/SeriesNormalizationMCRP/1175480/E16862548/1.2.392.200036.9123.100.12.12.22252.90201013131628089958241481/MR.1.2.392.200036.9123.100.12.12.22252.90201013133037092854731045.dcm",
# "/home_dir/JK/mnt/SeriesNormalizationMCRP/00203141/E17497993/1.3.12.2.1107.5.2.41.69565.30000020110319572670700004474/SRe.1.3.12.2.1107.5.2.41.69565.30000020110319572670700004503.dcm",
# "/home_dir/JK/mnt/SeriesNormalizationMCRP/00447321/E17522096/1.3.12.2.1107.5.2.41.69565.30000020110319572670700001434/SRe.1.3.12.2.1107.5.2.41.69565.30000020110319572670700001459.dcm",
# "/home_dir/JK/mnt/SeriesNormalizationMCRP/00908460/E17347683/1.3.12.2.1107.5.2.41.69565.30000020110319572670700000636/SRe.1.3.12.2.1107.5.2.41.69565.30000020110319572670700000659.dcm",
# "/home_dir/JK/mnt/SeriesNormalizationMCRP/1175480/E16862548/1.2.392.200036.9123.100.12.12.22252.90201013131015088688889787/MR.1.2.392.200036.9123.100.12.12.22252.90201013131106088861492683.dcm",
# "/home_dir/JK/mnt/SeriesNormalizationMCRP/1528184/E16452256/1.2.392.200036.9123.100.12.12.22252.90210209154628414348308243/MR.1.2.392.200036.9123.100.12.12.22252.90210209163244423813452088.dcm",
# "/home_dir/JK/mnt/SeriesNormalizationMCRP/3332344/E17134087/1.3.12.2.1107.5.2.43.167009.2020100416341770716921488.0.0.0/MR.1.3.12.2.1107.5.2.43.167009.2020100416341816286921505.dcm",
# "/home_dir/JK/mnt/SeriesNormalizationMCRP/3332344/E17134087/1.3.12.2.1107.5.2.43.167009.2020100416305292834519928.0.0.0/MR.1.3.12.2.1107.5.2.43.167009.202010041633269156320549.dcm",
# "/home_dir/JK/mnt/SeriesNormalizationMCRP/20097507/E16942169/1.3.12.2.1107.5.2.36.40168.2021012110282074519524686.0.0.0/MR.1.3.12.2.1107.5.2.36.40168.2021012110282347055524867.dcm",
# "/home_dir/JK/mnt/SeriesNormalizationMCRP/26303339/E17137309/1.3.12.2.1107.5.2.36.40291.2020101315063630311024751.0.0.0/MR.1.3.12.2.1107.5.2.36.40291.2020101315063625119124750.dcm",
# "/home_dir/JK/mnt/SeriesNormalizationMCRP/26303339/E17137309/1.3.12.2.1107.5.2.36.40291.2020101315051384634324649.0.0.0/MR.1.3.12.2.1107.5.2.36.40291.20201013150540722824741.dcm",
# "/home_dir/JK/mnt/SeriesNormalizationMCRP/41324419/E17179902/1.3.12.2.1107.5.2.36.40291.2020100807114214639557319.0.0.0/MR.1.3.12.2.1107.5.2.36.40291.2020100807151383698759658.dcm",
# "/home_dir/JK/mnt/SeriesNormalizationMCRP/41324419/E17179902/1.3.12.2.1107.5.2.36.40291.2020100807114214639657320.0.0.0/MR.1.3.12.2.1107.5.2.36.40291.2020100807151393979959696.dcm",
# "/home_dir/JK/mnt/SeriesNormalizationMCRP/23992498/E16898511/1.2.840.113619.2.80.0.5682.1601563957.1.13.2/MR.1.2.840.113619.2.80.0.5682.1601563957.19.dcm",
# "/home_dir/JK/mnt/SeriesNormalizationMCRP/01617612/E16987773/1.3.12.2.1107.5.2.19.45306.2021011111101851354849202.0.0.0/MR.1.3.12.2.1107.5.2.19.45306.202101111113388812554466.dcm",
# "/home_dir/JK/mnt/SeriesNormalizationMCRP/20097507/E16942169/1.3.12.2.1107.5.2.36.40168.2021012110282074519524686.0.0.0/MR.1.3.12.2.1107.5.2.36.40168.2021012110282347055524867.dcm",
# "/home_dir/JK/mnt/SeriesNormalizationMCRP/2751554/E17361897/1.2.840.113619.2.5.19231919171116054435021605443502805000/MR.1.2.840.113619.2.311.100196653089223718141359203512546696271.dcm",
# "/home_dir/JK/mnt/SeriesNormalizationMCRP/01220044/E17038400/1.3.12.2.1107.5.2.41.169571.202010041023578162338990.0.0.0/MR.1.3.12.2.1107.5.2.41.169571.2020100410241583705439340.dcm",
]
# files = pd.read_csv("private_tags_0_11.csv")
# files = pd.read_csv("csvs/june/edited_with_mishkas_script/private_tags_1000000_1250000.csv")
# dicoms from csv:
# files = pd.read_csv("csvs/apr/all_dicoms_100000.csv")
files = pd.read_csv("csvs/apr/all_dicoms.csv")
# pdb.set_trace()
# files = files[files["fileNamePath"].str.slice(0,5) == '../JK']
# files[files['fileNamePath'].str.len() < 100].to_csv("deleteme.csv", header=True, index=None)
# dicoms = [i.replace("../","/home_dir/") for i in files['fileNamePath']]
dicoms = [i.replace("../","/home_dir/") for i in files[~pd.isna(files['fileNamePath'])]['fileNamePath']]
# # print("done reading")
# pdb.set_trace()
# files.loc[files['fileNamePath'] == "../JK/mnt/SeriesNormalizationMCRP/4115931/E17154036/1.2.276.0.45.1.7.3.83916715583278.20100909260300023.25318/PSg.1.2.276.0.45.1.7.4.83916715583278.20100909260300024.25318.dcm"]
tag_key_original = {
'EchoTime':'00180081',
'InversionTime':'00180091',
'EchoTrainLength':'00180082',
'RepetitionTime':'00180080',
'TriggerTime':'00181060',
'SequenceVariant':'00180021',
'ScanOptions':'00180022',
'ScanningSequence':'00180020',
'MRAcquisitionType':'00180023',
'ImageType':'00180008',
'PixelSpacing':'00280030',
'SliceThickness':'00180050',
'PhotometricInterpretation':'00280004',
'ContrastBolusAgent':'00180010',
'Modality':'00180060',
'SeriesDescription':'0008103E'
}
tag_key = {
'StudyInstanceUID':'0020000D',
'SeriesInstanceUID':'0020000E',
'SOPInstanceUID':'00080018',
'PatientID':'00100020', #MRN
'AccessionNumber':'00080050',
'SequenceName':'00180024',
'ImageComments':'00204000',
'ProtocolName':'00181030',
'ImagesInAcquisition':'00201002',
'EchoTime':'00180081',
'InversionTime':'00180082',
'EchoTrainLength':'00180091',
'RepetitionTime':'00180080',
'TriggerTime':'00181060',
'SequenceVariant':'00180021',
'ScanOptions':'00180022',
'ScanningSequence':'00180020',
'MRAcquisitionType':'00180023',
'ImageType':'00180008',
'ImageOrientationPatient':'00200037',
'FlipAngle': '00181314',
'DiffusionBValue': '00189087',
'SiemensBValue': '0019100C',
'GEBValue': '0051100B',
'SlopInt6-9': '00431039',
'PulseSeqName': '0019109C',
'InternalPulseSeqName': '0019109E',
'FunctionalProcessingName': '00511002',
'GEFunctoolsParams': '00511006',
'CSA Series Header Info':'00291020',
'Acq recon record checksum':'00211019',
'PixelSpacing':'00280030',
'SliceThickness':'00180050',
'PhotometricInterpretation':'00280004',
'ContrastBolusAgent':'00180010',
'Modality':'00180060',
'SeriesDescription':'0008103E'
}
# pdb.set_trace()
# Concat DFs:
def stich_dfs_together(location="csvs/apr/merge_these_csvs"):
files = os.listdir(location)
# files = ['private_tags_0_200000.csv','private_tags_200000_400000.csv','private_tags_400000_600000.csv','private_tags_600000_800000.csv','private_tags_800000_1000000.csv','private_tags_1000000_1200000.csv','private_tags_1400000_1501855.csv']
total_rows = 0
csv_name = os.path.join(location, "combined.csv")
with open(csv_name, "w") as file:
file.write("fileNamePath|00511006|00291020|0019109E\n")
for file in files:
print(file,"\n")
# pdb.set_trace()
file = pd.read_csv(os.path.join(location,file), sep="|")
total_rows += file.shape[0]
file.to_csv(csv_name, mode='a', sep="|", header=None, index=None)
print(total_rows)
def test_combined_read(location="csvs/june/edited_with_mishkas_script"):
file = pd.read_csv(os.path.join(location,"combined.csv"), sep="|")
print(file.shape)
def pre_format_find_and_replace(s, tag):
# print(tag)
printable = set(string.printable)
# pdb.set_trace()
try:
if s == '':
S = "None"
# pdb.set_trace()
elif type(s) is pydicom.valuerep.IS:
S = s.__str__()
else:
# pdb.set_trace()
for i in s:
if i not in printable:
s = s.replace(i," ")
if s.find("\r") != -1:
s = s.replace("\r"," ")
if s.find("\n") != -1:
s = s.replace("\n"," ")
if s.find("|") != -1:
s = s.replace("|"," ")
S = s
if tag == "00511006":
S = GE_sequence_ID(s)
# pdb.set_trace()
elif tag == "00291020":
S = siemens_sequence_ID(s)
# pdb.set_trace()
elif tag == "0019109E":
S = GE_sequence_ID(s) # Essentially take this as is for now
# pdb.set_trace()
else:
raise Exception("tag not recognized")
except TypeError:
pdb.set_trace()
return S
def GE_sequence_ID(s):
# pdb.set_trace()
S = s
return S
def siemens_sequence_ID(s):
# pdb.set_trace()
"""
Mishka's parser and filter for private GE\Seimens dicom tags
"""
# siemens_tag = open(file) # file like object or str
# siemens_text = siemens_tag.read()
if s.find("\n") != -1:
s = s.replace("\n"," ")
siemens_text = s
if siemens_text.find('tSequenceFileName') != -1:
# start_idx = siemens_text.find('tSequenceFileName')
# start_plus = siemens_text[start_idx:start_idx+100000].find('SiemensSeq')
siemens_idx = siemens_text.find("SiemensSeq")
if siemens_idx != -1:
# Finding all occurrences of substring
inilist = [m.start() for m in re.finditer(r"SiemensSeq%\\", siemens_text)]
longest = ''
for start in inilist:
if len(siemens_text[start:start+30]) > len(longest):
# pdb.set_trace()
longest = siemens_text[start:start+30]
# print("{}".format(siemens_text[start-15:start+30])) # debug
# seqname = np.char.split(np.char.split(siemens_text[siemens_idx:siemens_idx+100], '""').tolist()[0],'\\').tolist()[1]
seqname = np.char.split(np.char.split(longest, '""').tolist()[0],'\\').tolist()[1]
# pdb.set_trace()
elif siemens_text.find("CustomerSeq") != -1:
siemens_idx = siemens_text.find("CustomerSeq")
# Finding all occurrences of substring
inilist = [m.start() for m in re.finditer(r"CustomerSeq%\\", siemens_text)]
longest = ''
for start in inilist:
if len(siemens_text[start:start+30]) > len(longest):
longest = siemens_text[start:start+30]
seqname = np.char.split(np.char.split(longest, '""').tolist()[0],'\\').tolist()[1]
# pdb.set_trace()
else:
raise Exception("Couldn\'t find tSequenceFileName or CustomerSeq")
else:
# pdb.set_trace()
seqname = siemens_text
s = seqname
return s
# dicoms_with_private_tags
def find_private_tags(range=(0,100000)):
n = 0 # loop count
with open("private_tags_{}_{}.csv".format(range[0],range[1]), "w") as file:
file.write("fileNamePath|00511006|00291020|0019109E\n")
for dicom_path in dicoms[range[0]:range[1]]:
# print("Loading dicom: ", dicom_path.split('/')[-1], "and checking for many tags:")
n += 1
ds = dcmread(dicom_path)
private_tags = [tag_key['GEFunctoolsParams'],tag_key['CSA Series Header Info'], tag_key['InternalPulseSeqName']]
# temp_df = pd.DataFrame({'fileNamePath':[None],
# private_tags[0]:[None],
# private_tags[1]:[None]})
# if ds.get(int("0x"+private_tags[2],16)) != None:
# pdb.set_trace()
if ds.get(int("0x"+private_tags[0],16)) != None or ds.get(int("0x"+private_tags[1],16)) != None or ds.get(int("0x"+private_tags[2],16)) != None:
# ptag = private_tags[0] if ds.get(int("0x"+private_tags[1],16)) == None else private_tags[1]
# temp_df['fileNamePath'] = dicom_path
# temp_df[ptag] = ds.get(int("0x"+ptag,16)).value
# dwpt = pd.concat((dwpt,temp_df), axis=0)
# Mishka func...to parse tags...
# siemens_sequence_ID()
tag1 = "None" if ds.get(int("0x"+private_tags[0],16)) == None else ds.get(int("0x"+private_tags[0],16)).value
tag2 = "None" if ds.get(int("0x"+private_tags[1],16)) == None else ds.get(int("0x"+private_tags[1],16)).value
tag3 = "None" if ds.get(int("0x"+private_tags[2],16)) == None else ds.get(int("0x"+private_tags[2],16)).value
tag1 = pre_format_find_and_replace(tag1, private_tags[0])
tag2 = pre_format_find_and_replace(tag2, private_tags[1])
tag3 = pre_format_find_and_replace(tag3, private_tags[2])
# pdb.set_trace()
# experimental
frame = {'fileNamePath':pd.Series(dicom_path),'00511006':pd.Series(tag1),'00291020':pd.Series(tag2),'0019109E':pd.Series(tag3)}
file = pd.DataFrame(frame)
file[['fileNamePath','00511006','00291020','0019109E']].to_csv("csvs/apr/merge_these_csvs/private_tags_{}_{}.csv".format(range[0],range[1]), mode="a", header=False, sep="|", index=None)
# pdb.set_trace()
# experimental
# original
# with open("private_tags_{}_{}.csv".format(range[0],range[1]), "a") as file:
# file.write("{}|{}|{}\n".format(dicom_path,tag1,tag2))
# original
if n % 1000 == 0:
print("batch:{}-{}, {}% complete".format(range[0], range[1], float(n)/(range[1]-range[0])*100))
# if n == 3000:
# pdb.set_trace()
# Keys
# for key in tag_key.keys():
# try:
# t = pydicom.datadict.tag_for_keyword(key)
# print("{}: {}".format(key, ds[t]))
# except:
# print("{}: Not found".format(key))
def multiprocessing_find_private_tags():
global dicoms
starttime = time.time()
processes = []
process_count=40
# dicoms = dicoms[0:14783]
chunks = len(dicoms)/process_count
remainder = len(dicoms) % process_count
for i in range(0,process_count):
start_images = i * chunks
end_images = (i+1) * chunks
# pdb.set_trace()
print("{} - {}".format(start_images,end_images))
p = multiprocessing.Process(target=find_private_tags, args=((start_images,end_images),))
processes.append(p)
p.start()
if remainder != 0:
start_images = process_count * chunks
end_images = process_count * chunks + remainder
print("{} - {}".format(start_images,end_images))
p = multiprocessing.Process(target=find_private_tags, args=((start_images,end_images),))
processes.append(p)
p.start()
for process in processes:
process.join() # means wait for this to complete
print('Time taken = {} seconds'.format(time.time() - starttime))
if __name__ == "__main__":
os.chmod("pydicom_attempt.py",0x777)
if sys.argv[1] == "combine":
stich_dfs_together()
test_combined_read()
elif sys.argv[1] == "multiprocess":
multiprocessing_find_private_tags()
else:
range_low=sys.argv[1]
range_high=sys.argv[2]
print("{}, {}".format(range_low,range_high))
find_private_tags(range=(int(range_low),int(range_high)))
# sudo docker run --rm -it --name=ben_wks_python2_worker_1 -v /home/ben.bearce/:/home_dir -v /home/jayashree.kalpathy:/home_dir/JK ben_wks_python2 bash
# sudo docker run --rm -it --name=ben_wks_python2_worker_2 -v /home/ben.bearce/:/home_dir -v /home/jayashree.kalpathy:/home_dir/JK ben_wks_python2 bash
# sudo docker run --rm -it --name=ben_wks_python2_worker_3 -v /home/ben.bearce/:/home_dir -v /home/jayashree.kalpathy:/home_dir/JK ben_wks_python2 bash
# sudo docker run --rm -it --name=ben_wks_python2_worker_4 -v /home/ben.bearce/:/home_dir -v /home/jayashree.kalpathy:/home_dir/JK ben_wks_python2 bash
# sudo docker run --rm -it --name=ben_wks_python2_worker_5 -v /home/ben.bearce/:/home_dir -v /home/jayashree.kalpathy:/home_dir/JK ben_wks_python2 bash
# 750000 - 1253140,
# sudo docker run --rm -it --name=ben_wks_python2_worker_6 -v /home/ben.bearce/:/home_dir -v /home/jayashree.kalpathy:/home_dir/JK ben_wks_python2 bash"
|
[
"bbearce@gmail.com"
] |
bbearce@gmail.com
|
f8a2aa48568e27abc071b474a7337ae271771cc4
|
cdb21ded35ad572807cdf6384d43d97971263f9f
|
/fouriergraph.py
|
c981ffe3e08d1a1bd279cb7ef44693352a34e392
|
[] |
no_license
|
cdceballor/marconi
|
0eb1444bca2841b9c35fa99907985842dadd6175
|
50360248e042098fe109e588b683a11dcab926db
|
refs/heads/master
| 2022-09-22T02:05:52.224580
| 2020-06-05T01:18:33
| 2020-06-05T01:18:33
| 250,641,614
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,272
|
py
|
import scipy.io.wavfile as wavfile
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = [16,12]
plt.rcParams.update({'font.size' : 18})
def main():
s_rate, signal =wavfile.read('songs/bad_songs/not_good_song.wav')
sample_badRate, badSample = wavfile.read('songs/bad_songs/not_good_song.wav')
sample_rate, samples = wavfile.read('songs/hakuna_matata.wav')
dt = 0.001
t = np.arange(0,1,dt)
f_clean = samples[5000000:5001000]
f = badSample[5000000:5001000]
n = len(t)
fhat = np.fft.fft(f,n)
psd = fhat * np.conj(fhat) / n
print(psd)
freq = (1/(dt*n)) * np.arange(n)
l = np.arange(1,np.floor(n/2),dtype = 'int')
print(freq)
fig,axs = plt.subplots(2,1)
#Mismo calculos de arriba pero para la cancion limpia, esto para mostrar como es la solucion con respecto a la original
Lhat = np.fft.fft(f_clean,n)
lsd = Lhat * np.conj(Lhat) / n
#Crear varias inversas con filtros en lugares distintos para ver distintos posibles resultados
#---------Filtro 1------------------------
indices1 = psd >100000000
psdclean1 = psd * indices1
fhat1 = indices1 * fhat
ffilt1 = np.fft.ifft(fhat1)
#---------Filtro 2------------------------
indices2 = psd >150000000
psdclean2 = psd * indices2
fhat2 = indices2 * fhat
ffilt2 = np.fft.ifft(fhat2)
#---------Filtro 3------------------------
indices3 = psd >200000000
psdclean3 = psd * indices3
fhat3 = indices3 * fhat
ffilt3 = np.fft.ifft(fhat3)
plt.sca(axs[0])
plt.plot(freq[l],psd[l],color = 'c', LineWidth = 2, label = "Noisy")
plt.plot(freq[l],lsd[l],color = 'k', LineWidth = 2, label = "Clean")
plt.axhline(y=1000000000,color = 'y', LineWidth = 2, label = 'Filtered1')
plt.axhline(y=1500000000,color = 'b', LineWidth = 2, label = 'Filtered2')
plt.axhline(y=2000000000,color = 'r', LineWidth = 2, label = 'Filtered3')
plt.xlim(freq[l[0]],freq[l[-1]])
plt.ylabel("Espectro de poder")
plt.xlabel("Frecuencia")
plt.legend()
plt.sca(axs[1])
plt.plot(t,f_clean,color = 'k', LineWidth = 1.5,label = 'Clean')
plt.plot(t,ffilt1,color = 'y', LineWidth = 2, label = 'Filtered1')
plt.plot(t,ffilt2,color = 'b', LineWidth = 2, label = 'Filtered2')
plt.plot(t,ffilt3,color = 'r', LineWidth = 2, label = 'Filtered3')
plt.ylabel("Amplitud")
plt.xlabel("Tiempo")
plt.legend()
fig,axs = plt.subplots(2,1)
plt.sca(axs[0])
plt.plot(t,f_clean,color = 'g', LineWidth = 2, label = 'clean')
plt.plot(t,f,color = 'r', LineWidth = 1.5, label = 'noisy')
plt.xlim(t[0],t[-1])
plt.ylabel("Amplitud")
plt.xlabel("Tiempo")
plt.legend()
plt.sca(axs[1])
plt.plot(t,Lhat,color = 'c', LineWidth = 2, label = 'Clean')
plt.plot(t,fhat,color = 'k', LineWidth = 2, label = 'Noisy')
plt.xlim(t[0],t[-1])
plt.ylabel("Amplitud")
plt.xlabel("Frecuencia")
plt.legend()
# plt.sca(axs[1])
# plt.plot(freq[l],psd[l],color = 'c', LineWidth = 1.5, label = 'noisy')
# plt.plot(freq[l],psdclean1[l],color = 'k', LineWidth = 2, label = 'Filtered')
# plt.xlim(freq[0],freq[-1])
# plt.legend()
plt.show()
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
cdceballor.noreply@github.com
|
37b8fd87664fa7347b4b9809d6ac322d6b822bff
|
cb91cd103c0042094472f690aabff26637b30f98
|
/cpu_stat.py
|
3224a2b05f02cea723fb565be2aa0edff1659073
|
[
"MIT"
] |
permissive
|
sebastian-code/system_overview
|
0d10f55992331c58b7bc59efbb4380f1cbcdfef5
|
0340e163cfff89f8532a06f28ee1cbbc9f96049d
|
refs/heads/master
| 2020-05-03T22:51:45.242235
| 2015-07-29T11:41:37
| 2015-07-29T11:41:37
| 39,890,180
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,834
|
py
|
#!/usr/bin/env python
#
# Copyright (c) 2010-2013 Corey Goldberg (http://goldb.org)
#
# This file is part of linux-metrics
#
# License :: OSI Approved :: MIT License:
# http://www.opensource.org/licenses/mit-license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
"""
cpu_stat - Python Module for CPU Stats on Linux
requires:
- Python 2.6+
- Linux 2.6+
"""
import time
def cpu_times():
"""Return a sequence of cpu times.
each number in the sequence is the amount of time, measured in units
of USER_HZ (1/100ths of a second on most architectures), that the system
spent in each cpu mode:
(user, nice, system, idle, iowait, irq, softirq, [steal], [guest]).
on SMP systems, these are aggregates of all processors/cores.
"""
with open('/proc/stat') as f:
line = f.readline()
cpu_times = [int(x) for x in line.split()[1:]]
return cpu_times
def cpu_percents(sample_duration=1):
"""Return a dictionary of usage percentages and cpu modes.
elapsed cpu time samples taken at 'sample_time (seconds)' apart.
cpu modes: 'user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq'
on SMP systems, these are aggregates of all processors/cores.
"""
deltas = __cpu_time_deltas(sample_duration)
total = sum(deltas)
percents = [100 - (100 * (float(total - x) / total)) for x in deltas]
return {
'user': percents[0],
'nice': percents[1],
'system': percents[2],
'idle': percents[3],
'iowait': percents[4],
'irq': percents[5],
'softirq': percents[6],
}
def procs_running():
"""Return number of processes in runnable state."""
return __proc_stat('procs_running')
def procs_blocked():
"""Return number of processes blocked waiting for I/O to complete."""
return __proc_stat('procs_blocked')
def file_desc():
"""Return tuple with the number of allocated file descriptors,
allocated free file descriptors, and max allowed open file descriptors.
The number of file descriptors in use can be calculated as follows:
fd = file_desc()
in_use = fd[0] - fd[1]
"""
with open('/proc/sys/fs/file-nr') as f:
line = f.readline()
fd = [int(x) for x in line.split()]
return fd
def load_avg():
"""Return a sequence of system load averages (1min, 5min, 15min)."""
with open('/proc/loadavg') as f:
line = f.readline()
load_avgs = [float(x) for x in line.split()[:3]]
return load_avgs
def cpu_info():
"""Return the logical cpu info. On SMP systems, the values are
representing a single processor. The key processor_count has the number
of processors.
"""
with open('/proc/cpuinfo') as f:
cpuinfo = {'processor_count': 0}
for line in f:
if ':' in line:
fields = line.replace('\t', '').strip().split(': ')
# count processores and filter out core specific items
if fields[0] == 'processor':
cpuinfo['processor_count'] += 1
elif fields[0] != 'core id':
try:
cpuinfo[fields[0]] = fields[1]
except IndexError:
pass
return cpuinfo
def __cpu_time_deltas(sample_duration):
"""Return a sequence of cpu time deltas for a sample period.
elapsed cpu time samples taken at 'sample_time (seconds)' apart.
each value in the sequence is the amount of time, measured in units
of USER_HZ (1/100ths of a second on most architectures), that the system
spent in each cpu mode: (user, nice, system, idle, iowait, irq, softirq, [steal], [guest]).
on SMP systems, these are aggregates of all processors/cores.
"""
with open('/proc/stat') as f1:
with open('/proc/stat') as f2:
line1 = f1.readline()
time.sleep(sample_duration)
line2 = f2.readline()
deltas = [int(b) - int(a) for a, b in zip(line1.split()[1:], line2.split()[1:])]
return deltas
def __proc_stat(stat):
with open('/proc/stat') as f:
for line in f:
if line.startswith(stat):
return int(line.split()[1])
|
[
"sebaslander@gmail.com"
] |
sebaslander@gmail.com
|
6b6e175ca8bf64af929209040b404c04032022a6
|
88f8b9b43702217aa2c93e97a862ddee0b71f9a1
|
/keyboardhackerapp/views/sync.py
|
6736cc17542787ebdf5397a0682511fb1620cab8
|
[] |
no_license
|
nathantheinventor/keyboard-hacker
|
c630221bc288b7247fe6e21620ee5e9da58ea911
|
f4c61d1b2d7d3db25862cc95865764aa3f1abe47
|
refs/heads/master
| 2020-07-25T23:32:39.110317
| 2019-09-16T00:20:32
| 2019-09-16T00:20:32
| 208,457,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
from django.shortcuts import render
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from time import time
def sync(req: HttpRequest) -> HttpResponse:
return HttpResponse(time() * 1000.0)
|
[
"nathantheinventor@gmail.com"
] |
nathantheinventor@gmail.com
|
fdef9e1142b7a2ebd63155a29a40524d52f3573b
|
e2addeff623464e25364a3b07910b6c77f014dd5
|
/news_feed/migrations/0001_initial.py
|
40e1c5c81bdca73964c7c50d34ae72a71df89485
|
[
"MIT"
] |
permissive
|
Soigres/es-pl7
|
1282eb4e4abbc5303c19e6c8d4a52aa2fadaa652
|
739d0c766a2ab70f2bc87d597041921d9845cac9
|
refs/heads/master
| 2020-03-29T13:00:49.997822
| 2018-09-22T01:58:35
| 2018-09-22T01:58:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 975
|
py
|
# Generated by Django 2.1.1 on 2018-09-13 14:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=500)),
('pubdate', models.DateTimeField(auto_now_add=True)),
('person', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-pubdate'],
'permissions': (('can_change_status', 'Can see and change articles'),),
},
),
]
|
[
"pedro.alex.ribeiro@hotmail.com"
] |
pedro.alex.ribeiro@hotmail.com
|
f6185cf19a64bb63530d452c58615df8a2baaff6
|
ccd7ca1c542c7fc0c60cd1e2c5723f5c505ce418
|
/CEPACClusterLib.py
|
75d4ec98b815fc2cc5fe169a6f5cc167ebf208c0
|
[] |
no_license
|
fervion/Cluster-Tool
|
3860b8a5afe77e5e5dbcf2612e058231b036b2a7
|
2da4b3e8024b4cc6a0e7b4aab5b1b658155b6792
|
refs/heads/master
| 2020-03-28T00:24:13.343880
| 2018-09-04T20:04:41
| 2018-09-04T20:04:41
| 147,408,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,307
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 29 10:10:08 2015
CEPAC Cluster tool library
STD list:
* objectify things
* GUI
* make zipped download work
* fix jobname and folder name thing
* kill jobs
@author: Taige Hou (thou1@partners.org)
@author: Kai Hoeffner (khoeffner@mgh.harvard.edu)
"""
from __future__ import print_function
import os
import sys
import glob
import paramiko
import md5
import re
import zipfile
import threading
import time
from stat import S_ISDIR
import getpass
#A list of clusters
CLUSTER_NAMES = ("MGH", "Orchestra", "Custom")
#Maximum number of concurrent connections
MAX_CONNECTIONS = 8
#Mapping of cluster names to hostname, runfolder path and model folder path
#For run_folder use only relative path from home directory (this is required as lsf and cepac are picky about paths)
#For model_folder can use either absolute path or relative path from home directory
#do not use ~ in path to represent home directory as the ftp client cannot find the directory
CLUSTER_INFO = {"MGH":{'host':'erisone.partners.org',
'run_folder':'runs',
'model_folder':'/data/cepac/modelVersions',
'default_queues':("medium", "long", "vlong", "big")},
"Orchestra":{'host':'orchestra.med.harvard.edu',
'run_folder':'runs',
'model_folder':'/groups/freedberg/modelVersions',
'default_queues':("freedberg_2h", "freedberg_12h", "freedberg_1d", "freedberg_7d", "freedberg_unlim",
"short", "long")},
"Custom":{'host':'',
'run_folder':'runs',
'model_folder':'',
'default_queues':()},
}
#---------------------------------------------
class UploadThread(threading.Thread):
"""Thread used to upload runs and submit jobs"""
def __init__(self, cluster, dir_local, dir_remote, lsfinfo, update_func, glob_pattern="*.in" ):
threading.Thread.__init__(self)
self.cluster = cluster
self.args = [self, dir_local, dir_remote, lsfinfo, update_func, glob_pattern]
self.abort = False
def stop(self):
self.abort = True
def run(self):
while self.cluster.num_connections >= MAX_CONNECTIONS:
time.sleep(.2)
self.cluster.num_connections+=1
jobfiles = self.cluster.sftp_upload(*self.args)
if not self.abort:
self.cluster.pybsub(jobfiles)
self.cluster.num_connections-=1
#---------------------------------------------
class DownloadThread(threading.Thread):
"""Thread used to download runs"""
def __init__(self, cluster, run_folder, dir_remote, dir_local, update_func):
threading.Thread.__init__(self)
self.cluster = cluster
self.args = [self, dir_remote, dir_local, update_func]
self.abort = False
self.run_folder = run_folder
#Total number of files to download
self.total_files = 0
#current progress of download
self.curr_files = 0
def stop(self):
self.abort = True
def run(self):
while self.cluster.num_connections >= MAX_CONNECTIONS:
time.sleep(.2)
#counts total number of files in folder recursively
stdin, stdout, stderr = self.cluster.ssh.exec_command("find {} -type f | wc -l"
.format(clean_path(self.cluster.run_path+"/"+self.run_folder)))
#wait for command to finish
stdout.channel.recv_exit_status()
self.total_files = int(stdout.read().strip())
if self.total_files == 0:
self.total_files = 1
self.cluster.num_connections+=1
#self.cluster.sftp_get_compressed(*self.args)
self.cluster.sftp_get_recursive(*self.args)
self.cluster.num_connections-=1
#---------------------------------------------
class JobInfoThread(threading.Thread):
"""Thread used to get detailed job info"""
def __init__(self, cluster, jobid, post_func):
threading.Thread.__init__(self)
self.cluster = cluster
self.jobid = jobid
#function which tells thread how to post results
self.post_func = post_func
def run(self):
while self.cluster.num_connections >= MAX_CONNECTIONS:
time.sleep(.2)
self.cluster.num_connections+=1
job_info = self.cluster.get_job_info(self.jobid)
self.post_func(jobid = self.jobid, data = job_info)
self.cluster.num_connections-=1
#---------------------------------------------
class CEPACClusterApp:
"""Basic class for the desktop interface with the CEPAC cluster"""
def __init__(self,):
self.port = 22
#SSH Client
self.ssh = paramiko.SSHClient()
#Dictionary of available model versions with model type as keys
self.model_versions = None
#List of available run queues
self.queues = None
#number of currently open connections
self.num_connections = 0
#thread for uploading
self.upload_thread = None
#threads for downloads
self.download_threads = []
def bind_output(self, output=print):
"""
output is a function used to write messages from the app.
Defaults to the print function for the console version.
Any calls to print should use the self.output function instead
"""
self.output = output
#print initiation message
self.output("="*40, False)
self.output("Initiating Cepac Cluster App", False)
def connect(self, hostname='erisone.partners.org',
username=None, password=None,
run_path=None, model_path=None, clustername=None):
"""
Starts connection to host.
Should be called once per client.
"""
#Close any previous connections
self.close_connection()
#Need to convert to string for paramiko because input could be unicode
self.hostname = str(hostname)
self.username = str(username)
self.password = str(password)
self.run_path = str(run_path)
self.model_path = str(model_path)
self.clustername = str(clustername)
self.output("\nConnecting to {} as user: {}...".format(self.hostname, self.username), False)
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
self.ssh.connect(self.hostname,port=22,username=self.username,password=self.password)
except paramiko.AuthenticationException:
#Login failed
self.output("\tLogin Failed", False)
else:
#Get model and queue information
self.output("\tLogin Succesful", False)
self.update_cluster_information()
def create_upload_thread(self, *args, **kwargs):
self.upload_thread = UploadThread(self, *args, **kwargs)
self.upload_thread.start()
def create_download_thread(self, *args, **kwargs):
thread = DownloadThread(self, *args, **kwargs)
thread.start()
def sftp_get_recursive(self, thread, dir_remote, dir_local, progress_func, sftp = None):
"""Recursively Downloads folder including subfolders"""
if not sftp:
#Create sftp object. Should only do this once per download.
with paramiko.Transport((self.hostname, self.port)) as t:
t.connect(username=self.username, password=self.password)
t.use_compression()
#recomended window size from https://github.com/paramiko/paramiko/issues/175
t.window_size = 134217727
sftp = t.open_session()
sftp = paramiko.SFTPClient.from_transport(t)
progress_func(0, thread.run_folder)
self.output("\nDownloading from folder {} to folder {}...".format(dir_remote, dir_local))
self.sftp_get_recursive(thread, dir_remote, dir_local, progress_func, sftp)
self.output("\tDownload Complete")
else:
item_list = sftp.listdir(dir_remote)
dir_local = str(dir_local)
dir_local = os.path.join(dir_local, os.path.basename(dir_remote))
if not os.path.isdir(dir_local):
os.makedirs(dir_local)
for item in item_list:
item = str(item)
if isdir(dir_remote + "/" + item, sftp):
self.sftp_get_recursive(thread, dir_remote + "/" + item, dir_local, progress_func, sftp)
else:
sftp.get(dir_remote + "/" + item, os.path.join(dir_local,item))
thread.curr_files+=1
progress_func(thread.curr_files/float(thread.total_files)*100, thread.run_folder)
# def sftp_get_compressed(self, dir_remote, dir_local, sftp = None):
# """Download everything as one file"""
# compfile = "compfile.tar.gz"
# self.output("\nCompressing {}".format(dir_remote))
# stdin, stdout, stderr = self.ssh.exec_command("tar -zcf ~/{} ~/{} ".format(compfile, dir_remote))
#
#
# if not stdout.readlines():
# #Create sftp object
# with paramiko.Transport((self.hostname, self.port)) as t:
# t.connect(username=self.username, password=self.password)
# t.use_compression()
# #recomended window size from https://github.com/paramiko/paramiko/issues/175
# t.window_size = 134217727
# sftp = t.open_session()
# sftp = paramiko.SFTPClient.from_transport(t)
# # This should be stored somewhere locally for faster access!
# dir_temp_local='C:\Temp'
#
# sftp.get("~/{}".format(compfile), dir_temp_local+"\CEPACclusterdownload.zip")
# self.output("\nDownload complete!")
#
# stdin, stdout, stderr = ssh.exec_command("rm {}".format(compfile))
# self.output("\nExtracting files")
# with zipfile.ZipFile(dir_temp_local+"CEPACclusterdownload.zip", "r") as z:
# z.extractall(dir_local)
# else:
# self.output(stdout.readlines())
def sftp_upload(self, thread, dir_local, dir_remote, lsfinfo, progress_func, glob_pattern = "*.in"):
"""Uploads local directory to remote server and generates a job file per subfolder and returns the list of job files."""
files_copied = 0
jobfiles = []
#Create sftp object
with paramiko.Transport((self.hostname, self.port)) as t:
t.connect(username=self.username, password=self.password)
t.use_compression()
#recomended window size from https://github.com/paramiko/paramiko/issues/175
t.window_size = 134217727
sftp = t.open_session()
sftp = paramiko.SFTPClient.from_transport(t)
self.output("\nSubmitting runs from folder {} ...".format(dir_local))
#list of tuples (local_file, remote file) that will be uploaded
files_to_upload = []
for dirpath, dirnames, filenames in os.walk(dir_local):
matching_files = [f for f in glob.glob(dirpath + os.sep + glob_pattern) if not os.path.isdir(f)]
if not matching_files:
continue
# Fix foldername
remote_base = dir_remote + '/' + os.path.basename(dir_local)
if not os.path.relpath(dirpath, dir_local)=='.':
curr_dir_remote = remote_base + '/' + os.path.relpath(dirpath,dir_local).replace("\\","/")
else:
curr_dir_remote = remote_base
# Create folder and subfolders
stdin, stdout, stderr = self.ssh.exec_command("mkdir -p '{}'".format(curr_dir_remote))
#wait for command to finish
stdout.channel.recv_exit_status()
self.output("\tCreating {}".format(curr_dir_remote))
# Write and collect job files
if thread.abort:
return None
self.write_jobfile(curr_dir_remote, lsfinfo, sftp)
jobfiles.append(curr_dir_remote + '/job.info')
# Upload files
for fpath in matching_files:
is_up_to_date = False
fname = os.path.basename(fpath)
local_file = fpath
remote_file = curr_dir_remote + '/' + fname
# if remote file exists
try:
sftp.stat(remote_file)
except IOError:
pass
else:
local_file_data = open(local_file, "rb").read()
remote_file_data = sftp.open(remote_file).read()
md1 = md5.new(local_file_data).digest()
md2 = md5.new(remote_file_data).digest()
if md1 == md2:
is_up_to_date = True
if not is_up_to_date:
files_to_upload.append((local_file, remote_file))
progress_func(0)
#upload files
for local_file, remote_file in files_to_upload:
if thread.abort:
return None
self.output('\tCopying {} to {}'.format(local_file, remote_file))
sftp.put(local_file, remote_file)
files_copied += 1
#update progress bar
progress_func(files_copied/float(len(files_to_upload))*100)
self.output('\tFinished Upload')
return jobfiles
def write_jobfile(self, curr_dir_remote, lsfinfo, sftp):
"""
Write job files for the current folder.
lsfinfo is a dictionary which contains
queue - the queue to submit to
email - email address to send upon job completion (optional)
modeltype - should be either treatm, debug, or transm
modelversion - name of the model version to run
"""
self.output('\tWriting Job file: {}'.format(curr_dir_remote + '/job.info'))
with sftp.open(curr_dir_remote + '/job.info', 'wb') as f:
jobcommand = "#!/bin/bash\n" +\
"#BSUB -J \"" + lsfinfo['jobname'] + "\"\n" +\
"#BSUB -q " + lsfinfo['queue'] + "\n"
if 'email' in lsfinfo:
jobcommand += "#BSUB -u " + lsfinfo['email'] + "\n" + \
"#BSUB -N\n"
if lsfinfo['modeltype'] != "smoking":
jobcommand += self.model_path + "/" + lsfinfo['modeltype'] + "/" + lsfinfo['modelversion'] + " ~/" + clean_path(curr_dir_remote)
else:
jobcommand += "/data/cepac/python/bin/python3.6 " + self.model_path + "/" + lsfinfo['modeltype'] + "/"+ \
lsfinfo['modelversion'] + "/sim.py" + " ~/" + clean_path(curr_dir_remote)
f.write(jobcommand)
def pybsub(self, jobfiles):
"""Submit jobs for job list to LSF"""
for job in jobfiles:
stdin, stdout, stderr = self.ssh.exec_command("bash -lc bsub < '{}'".format(job))
stdout.read()
err = stderr.read()
if err.strip():
self.output('Error: {}'.format(err))
self.output('\tSubmitted :{}'.format(job))
def get_run_folders(self):
"""
Gets the names of all the folders in the run_folder on the cluster
and returs as a list
"""
self.output("\nRetrieving run folders ...", False)
#use ls -1 {}| awk '{$1=$2=""; print 0}' to get long form data but not very useful
stdin, stdout, stderr = self.ssh.exec_command("ls -1 {}".format(self.run_path))
run_folders = stdout.readlines()
self.output("\tFound {} run folders".format(len(run_folders)), False)
return run_folders
def delete_run_folders(self, folderlist):
"""Deletes the list of folders from the cluster"""
self.output("\nDeleting Run Folders ...", False)
for folder in folderlist:
self.output("\tDeleting {}".format(folder), False)
stdin, stdout, stderr = self.ssh.exec_command("rm -rf {}".format(self.run_path+"/"+clean_path(folder)))
self.output("\tFinished Deleting", False)
def get_job_list(self):
"""
Gets some basic information about currently running jobs
Returns jobid, status and queue
For detailed job info use get_job_info
"""
self.output("\nGetting job listing ...", False)
#Get job listing and format the result
stdin, stdout, stderr = self.ssh.exec_command("bash -lc bjobs | awk '{if (NR!=1) print $1,$3,$4}'")
#Each entry in Job data will be a list [jobid, status, queue]
job_data = [line.split() for line in stdout.readlines()]
return job_data
def get_job_info(self, jobid):
"""
Returns detailed job information by running bjobs -l
Returns a tuple of (jobname, modelname, runfolder)
"""
stdin, stdout, stderr = self.ssh.exec_command("bash -lc 'bjobs -l {}'".format(jobid))
#read here to add delay and avoid being blocked by server
#wait for command to finish
stdout.channel.recv_exit_status()
#read job info and get rid of extra spaces
job_data = re.sub("\n\s*","",stdout.read())
#get jobname, modelname, runfolder from job info
re_pattern ="Job Name <(.*?)>.*" +\
"Command <.*?{}/.*?/(.*?)".format(self.model_path) +\
"~/{}/(.*?)>".format(self.run_path)
match = re.search(re_pattern, job_data)
if match:
job_name, model_version, run_folder = match.groups()
run_folder = reverse_clean_path(run_folder)
model_version = model_version.strip()
return (job_name, model_version, run_folder)
else:
return None
def kill_jobs(self, joblist):
"""Kills jobs with jobids given in joblist"""
self.output("\nKilling Jobs...", False)
for jobid in joblist:
stdin, stdout, stderr = self.ssh.exec_command("bash -lc 'bkill {}'".format(jobid))
stdout.channel.recv_exit_status()
self.output("\t {} jobs killed".format(len(joblist)), False)
def update_cluster_information(self):
"""
Updates the names of all model versions along with model type(debug, treatm, transm)
Updates the lists of available queues
Should be called when logging in
"""
self.output("\tRetrieving model and queue information...", False)
stdin, stdout, stderr = self.ssh.exec_command("ls -1 {}".format(self.model_path))
model_types = [m_type.strip() for m_type in stdout.readlines()]
model_versions = {}
for m_type in model_types:
#For each model type get the associated model versions
stdin, stdout, stderr = self.ssh.exec_command("ls -1 {}".format(self.model_path+"/"+m_type))
model_versions[m_type] = [m_version.strip() for m_version in stdout.readlines()]
self.model_versions = model_versions
stdin, stdout, stderr = self.ssh.exec_command("ls -1 {}".format(self.model_path))
model_types = [m_type.strip() for m_type in stdout.readlines()]
model_versions = {}
#Gets a list of queues by calling bqueues and filtering the output
if CLUSTER_INFO[self.clustername]['default_queues']:
self.queues = CLUSTER_INFO[self.clustername]['default_queues']
else:
stdin, stdout, stderr = self.ssh.exec_command("bash -lc bqueues -w | awk '{if (NR!=1) print $1}'")
self.queues = [q.strip() for q in stdout.readlines()]
self.output("\tDone", False)
def close_connection(self):
self.ssh.close()
def __del__(self):
#closes SSH connection upon exit
self.close_connection()
#---------------------------------------------
# Helper function
def isdir(path, sftp):
try:
return S_ISDIR(sftp.stat(path).st_mode)
except IOError:
#Path does not exist, so by definition not a directory
return False
#---------------------------------------------
# Helper function
def clean_path(path):
"""Cleans a filepath for use on cluster by adding escape characters"""
esc_chars = ['&',';','(',')','$','`','\'',' ']
for c in esc_chars:
path = path.replace(c, "\\"+c)
return path
#---------------------------------------------
# Helper function
def reverse_clean_path(path):
"""Removes escape characters from path"""
return path.replace("\\","")
#---------------------------------------------
#---------------------------------------------
#----------------------------------------------------------------------
if __name__ == "__main__":
hostname = 'erisone.partners.org'
username = 'kh398'
password = getpass.getpass("Password: ")
port = 22
glob_pattern='*.*' # can be used to only copy a specific type of file, e.g. '.in'
lsfinfo = {
'email' : "khoeffner@mgh.harvard.edu",
'modelversion' : "cepac45c",
'jobname' : "R6",
'queue' : "medium"
}
dir_local = 'Z:\CEPAC - International\Projects\Hoeffner\Ongoing Projects\DTG-1stART-RLS\Analysis\DEV0\Run1_3\R6'
dir_remote = "runs/" + lsfinfo['jobname']
if sys.argv[1].lower() == 'upload':
with paramiko.Transport((hostname, port)) as t:
t.connect(username=username, password=password)
sftp = t.open_session()
sftp = paramiko.SFTPClient.from_transport(t)
jobfiles = sftp_upload(dir_local, dir_remote, glob_pattern, lsfinfo, sftp)
if len(sys.argv) > 2 and sys.argv[2].lower() == 'submit':
with paramiko.SSHClient() as ssh:
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname, username=username, password=password)
pybsub(jobfiles, ssh)
if sys.argv[1].lower() == 'status':
# Get job status
with paramiko.SSHClient() as ssh:
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname, username=username, password=password)
stdin, stdout, stderr = ssh.exec_command("bjobs")
if stdout.readlines():
for line in stdout.readlines():
print(line.split())
else:
print(stderr.readlines())
if sys.argv[1].lower() == 'download':
# Download everything - Use this after the runs are done
with paramiko.Transport((hostname, port)) as t:
t.connect(username=username, password=password)
t.use_compression()
sftp = t.open_session()
sftp = paramiko.SFTPClient.from_transport(t)
sftp_get_recursive(dir_remote, dir_local, sftp)
print("Download complete!")
# Download everything in a zip file - Still needs to be fixed because the path is wrong
# with paramiko.SSHClient() as ssh:
# ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# ssh.connect(hostname, username=username, password=password)
# stdin, stdout, stderr = ssh.exec_command("zip -9 -y -r -q ~/runs/R500.zip "+dir_remote)
#
# if not stdout.readlines():
# with paramiko.Transport((hostname, port)) as t:
# t.connect(username=username, password=password)
# t.use_compression()
# sftp = t.open_session()
# sftp = paramiko.SFTPClient.from_transport(t)
# # This should be stored somewhere locally for faster access!
# dir_local='C:\MyTemp'
# sftp.get("runs/R500.zip", dir_local+"\R500.zip")
# print("Download complete!")
#
# stdin, stdout, stderr = ssh.exec_command("rm runs/R500.zip")
#
# print("Extracting files")
# with zipfile.ZipFile(dir_local+"\R500.zip", "r") as z:
# z.extractall(dir_local)
|
[
"noreply@github.com"
] |
fervion.noreply@github.com
|
79fb2c539e4809067334c47c08ee9e341dbf6de2
|
16e01a94dc0d8f729be7684c9f0f249836fce359
|
/build/lib/netabio/__init__.py
|
38e23c1563bc75675aa55cce4affccf08b928708
|
[] |
no_license
|
Nurtal/NETABIO
|
959be136aee32229b01de78f3787eca6609a7f9c
|
dfd1d5d5f593c433b34634972d9f61197fb0a7e2
|
refs/heads/master
| 2021-01-20T21:16:37.423112
| 2021-01-13T09:21:50
| 2021-01-13T09:21:50
| 101,761,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
"""
"""
import os
__version__ = "0.0.1"
_ROOT = os.path.abspath(os.path.dirname(__file__))
def get_script_path(path):
return os.path.join(_ROOT, 'scripts', path)
def get_data_path(path):
return os.path.join(_ROOT, 'data', path)
## Get path for R script
CORRELATIONMATRIX_SCRIPT = get_script_path('fs_correlation_matrix_analysis.R')
ATTRIBUTEIMPORTANCE_SCRIPT = get_script_path('fs_attribute_importance_evaluation.R')
RFE_SCRIPT = get_script_path('fs_RFE_analysis.R')
BORUTA_SCRIPT = get_script_path('fs_Boruta_analysis.R')
## Get path for exemple data
TEST_DATA = get_data_path('test.txt')
|
[
"nathan.foulquier.pro@gmail.com"
] |
nathan.foulquier.pro@gmail.com
|
df74748eca1ff878707ff9e11abd7a90123f6957
|
510724320e1e4032f976fdc5c6e1cc20c632475d
|
/practica2-8.py
|
ed85c91d731badfd4f7a9ad181b1a953f9adfec1
|
[] |
no_license
|
beloplv/entrega_python
|
ca129b832c403b9b1484c0193ebeeed44dc89396
|
05dc7a84721627cdd86f1670698dad1dd6fc9e38
|
refs/heads/master
| 2021-05-25T16:31:22.073058
| 2020-04-07T16:10:35
| 2020-04-07T16:10:35
| 253,826,011
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 834
|
py
|
lista = []
letras_primas = []
palabra = input('ingrese una palabra: ').lower()
#limpiar caracteres especiales
palabra = palabra.replace(' ', ' ')#sacamos los espacios
letras_sin_repetir = set(palabra)
for l in letras_sin_repetir:
lista.append([l,palabra.count(l)])
print(lista)
def es_primo (num):
if num <= 1:
return False
else:
for i in range(2,num):
if num % i == 0 and 1 != num:
return False
return True
aux=0
for i in lista:
ok= es_primo(lista[aux][1])
if ok:
letras_primas.append(lista[aux][0])
print ('la letra ',lista[aux][0],' aparecio ', lista[aux][1], 'veces')
aux+=1
for y in letras_primas:
dato =' - '.join(letras_primas)
print ('las letras ',dato,' aparareciones un numero primo de veces')
|
[
"belo.lp@hotmail.com"
] |
belo.lp@hotmail.com
|
cf17aa81d5fd56cbaddfd67b56f8347f0dcc4ba9
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/sql/azure-mgmt-sql/azure/mgmt/sql/aio/operations/_managed_database_tables_operations.py
|
80cd58d73f50f63f89756880a71ad6973b217cc9
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080
| 2023-06-08T02:53:04
| 2023-06-08T02:53:04
| 222,384,897
| 1
| 0
|
MIT
| 2023-09-08T08:38:48
| 2019-11-18T07:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 10,127
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._managed_database_tables_operations import build_get_request, build_list_by_schema_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagedDatabaseTablesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.sql.aio.SqlManagementClient`'s
:attr:`managed_database_tables` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_schema(
self,
resource_group_name: str,
managed_instance_name: str,
database_name: str,
schema_name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.DatabaseTable"]:
"""List managed database tables.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance. Required.
:type managed_instance_name: str
:param database_name: The name of the database. Required.
:type database_name: str
:param schema_name: The name of the schema. Required.
:type schema_name: str
:param filter: An OData filter expression that filters elements in the collection. Default
value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DatabaseTable or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.sql.models.DatabaseTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-11-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
)
cls: ClsType[_models.DatabaseTableListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_schema_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
database_name=database_name,
schema_name=schema_name,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list_by_schema.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DatabaseTableListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_schema.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/schemas/{schemaName}/tables"
}
@distributed_trace_async
async def get(
self,
resource_group_name: str,
managed_instance_name: str,
database_name: str,
schema_name: str,
table_name: str,
**kwargs: Any
) -> _models.DatabaseTable:
"""Get managed database table.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance. Required.
:type managed_instance_name: str
:param database_name: The name of the database. Required.
:type database_name: str
:param schema_name: The name of the schema. Required.
:type schema_name: str
:param table_name: The name of the table. Required.
:type table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DatabaseTable or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.DatabaseTable
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-11-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
)
cls: ClsType[_models.DatabaseTable] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
database_name=database_name,
schema_name=schema_name,
table_name=table_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DatabaseTable", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}"
}
|
[
"noreply@github.com"
] |
openapi-env-test.noreply@github.com
|
428eee51e5773f507ac8d5e499d588f20cd89741
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02239/s467624321.py
|
e99fe13b3bd0e7848a7182a747adec5f254305d8
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
n = int(input())
G = []
G.append([])
for i in range(n):
v = list(map(int, input().split()))
G.append(v[2:])
q = []
q.append(1)
checked = [False] * (n + 1)
checked[1] = True
d = [-1] * (n + 1)
d[1] = 0
while q:
current = q.pop(0)
for v in G[current]:
if not checked[v]:
q.append(v)
d[v] = d[current] + 1
checked[v] = True
for i in range(1, n + 1):
print(i, d[i])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
34c7f9943b99e7e6a7c9523b927c981eb06cb18f
|
6b3a706484d1ea07b595846f965f172f9be2dd3b
|
/flaskr/auth.py
|
0c8770ce46adb492ecd603402801caafb5e5cb04
|
[] |
no_license
|
tflucker/python-blog
|
1b97c79a72cc79e51bb7447a10a687d1f35be817
|
39de62c4ef6f7ac693b1cf7933a7da6299942b5f
|
refs/heads/main
| 2023-05-27T11:57:36.553086
| 2021-06-17T01:55:32
| 2021-06-17T01:55:32
| 377,334,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,408
|
py
|
import functools
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
from werkzeug.security import check_password_hash, generate_password_hash
from flaskr.db import get_db
bp = Blueprint('auth', __name__, url_prefix='/auth')
@bp.route('/register', methods=('GET', 'POST'))
def register():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
db = get_db()
error = None
if not username:
error = 'Username is required'
elif not password:
error = 'Password is required'
elif db.execute('SELECT id FROM user WHERE username = ?', (username,)).fetchone() is not None:
error = f"User {username} is already registered."
if error is None:
db.execute('INSERT INTO user (username, password) VALUES (?, ?)', (username, generate_password_hash(password)))
db.commit()
return redirect(url_for('auth.login'))
flash(error)
return render_template('auth/register.html')
@bp.route('/login', methods=('GET', 'POST'))
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
db = get_db()
error = None
user = db.execute('SELECT * FROM user WHERE username = ?', (username,)).fetchone()
if user is None:
error = 'Incorrect username.'
elif not check_password_hash(user['password'], password):
error = 'Incorrect password'
if error is None:
session.clear()
session['user_id'] = user['id']
return redirect(url_for('index'))
flash(error)
return render_template('auth/login.html')
@bp.before_app_request
def load_logged_in_user():
user_id = session.get('user_id')
if user_id is None:
g.user = None
else:
g.user = get_db().execute('SELECT * FROM user WHERE id = ?', (user_id,)).fetchone()
@bp.route('/logout')
def logout():
session.clear()
return redirect(url_for('index'))
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None:
return redirect(url_for('auth.login'))
return view(**kwargs)
return wrapped_view
|
[
"tflucker@gwmail.gwu.edu"
] |
tflucker@gwmail.gwu.edu
|
e5f226f2a01768eceeb8b8f7eeb9c4302a6a0c45
|
7aab309dee577d4df80f69b47dc4ec48d2ed451a
|
/python_tasks_advanced/cels.py
|
035d80e282735e05854446a90eea799be114e058
|
[] |
no_license
|
omitiev/python_lessons
|
64f657e32d4414e75c6510ec111240018ed6b7d5
|
97561e56814c514756e207c2b323ca8e1366994e
|
refs/heads/master
| 2021-10-09T06:32:30.154748
| 2018-12-22T12:02:28
| 2018-12-22T12:02:28
| 97,019,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,285
|
py
|
'''
реализовать дескриптор Celsius для преобразования градусов фаренгейта в градусы цельсия
class Temperature:
celsius = Celsius()
def __init__(self, initial):
self.fahrenheit = initial
Для перевода температуры из шкалы Фаренгейта в шкалу Цельсия нужно от исходного числа отнять 32 и умножить результат на 5/9. ((f-32) / 1.8)
Для перевода температуры из шкалы Цельсия в шкалу Фаренгейта нужно умножить исходное число на 9/5 и прибавить 32. (c * 1.8 + 32)
100 = 37.78
'''
class Celsius:
def __get__(self, instance, owner):
return (float(instance.fahrenheit) - 32) / 1.8
def __set__(self, instance, value):
instance.fahrenheit = float(value) * 1.8 + 32
return instance.fahrenheit
class Temperature:
celsius = Celsius()
def __init__(self, initial):
self.fahrenheit = initial
def transform_to_celsius(self, obj):
self.celsius = obj
temp = Temperature(100)
print(temp.fahrenheit)
print(temp.celsius)
temp.celsius = 37.78
print(temp.fahrenheit)
|
[
"oleksii.mitiev@gmail.com"
] |
oleksii.mitiev@gmail.com
|
d599a32015509383a8cc397300fcaa9ee27645c3
|
0f7e3f0a74192bdf48810325469b8acd7e9b5541
|
/rent.py
|
aaad8113fd337f4762099caf28c0afcfdf7d1232
|
[] |
no_license
|
TelmanH/RentVehicleSystem
|
d97279cba970df5cee27ff7aecf4edc8476c6a46
|
01750a8d7c690699525efa5780b0c450a6f47d5e
|
refs/heads/master
| 2023-03-29T14:25:47.916959
| 2021-04-04T22:50:00
| 2021-04-04T22:50:00
| 322,725,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,763
|
py
|
import datetime
# parent class
class VehicleRent:
def __init__(self, stock):
self.stock = stock
self.now = 0
def displayStock(self):
"""
display stock
"""
print("{} vehicle available to rent".format(self.stock))
return self.stock
def rentHourly(self, n):
"""
rent hourly
"""
if n <= 0:
print("Number should be positive")
return None
elif n > self.stock:
print("Sorry, {} vehicle available to rent".format(self.stock))
return None
else:
self.now = datetime.datetime.now()
print("Rented a {} vehicle for hourly at {} hours".format(n, self.now.hour))
self.stock -= n
return self.now
def rentDaily(self, n):
"""
rent daily
"""
if n <= 0:
print("Number should be positive")
return None
elif n > self.stock:
print("Sorry, {} vehicle available to rent".format(self.stock))
return None
else:
self.now = datetime.datetime.now()
print("Rented a {} vehicle for daily at {} hours".format(n, self.now.hour))
self.stock -= n
return self.now
def returnVehicle(self, request, brand):
"""
return a bill
"""
car_h_price = 10
car_d_price = car_h_price * 8 / 10 * 24
bike_h_price = 5
bike_d_price = bike_h_price * 7 / 10 * 24
rentalTime, rentalBasis, numOfVehicle = request
bill = 0
if brand == "car":
if rentalTime and rentalBasis and numOfVehicle:
self.stock += numOfVehicle
now = datetime.datetime.now()
rentalPeriod = now - rentalTime
if rentalBasis == 1: # hourly
bill = rentalPeriod.seconds / 3600 * car_h_price * numOfVehicle
elif rentalBasis == 2: # daily
bill = rentalPeriod.seconds / (3600 * 24) * car_d_price * numOfVehicle
if (2 <= numOfVehicle):
print("You have extra 20% discount")
bill = bill * 0.8
print("Thank you for returning your car")
print("Price: $ {}".format(bill))
return bill
elif brand == "bike":
if rentalTime and rentalBasis and numOfVehicle:
self.stock += numOfVehicle
now = datetime.datetime.now()
rentalPeriod = now - rentalTime
if rentalBasis == 1: # hourly
bill = rentalPeriod.seconds / 3600 * bike_h_price * numOfVehicle
elif rentalBasis == 2: # daily
bill = rentalPeriod.seconds / (3600 * 24) * bike_d_price * numOfVehicle
if (4 <= numOfVehicle):
print("You have extra 20% discount")
bill = bill * 0.8
print("Thank you for returning your bike")
print("Price: $ {}".format(bill))
return bill
else:
print("You do not rent a vehicle")
return None
# child class 1
class CarRent(VehicleRent):
global discount_rate
discount_rate = 15
def __init__(self, stock):
super().__init__(stock)
def discount(self, b):
"""
discount
"""
bill = b - (b * discount_rate) / 100
return bill
# child class 2
class BikeRent(VehicleRent):
def __init__(self, stock):
super().__init__(stock)
# customer
class Customer:
def __init__(self):
self.bikes = 0
self.rentalBasis_b = 0
self.rentalTime_b = 0
self.cars = 0
self.rentalBasis_c = 0
self.rentalTime_c = 0
def requestVehicle(self, brand):
"""
take a request bike or car from customer
"""
if brand == "bike":
bikes = input("How many bikes would you like to rent?")
try:
bikes = int(bikes)
except ValueError:
print("Number should be Number")
return -1
if bikes < 1:
print("Number of Bikes should be greater than zero")
return -1
else:
self.bikes = bikes
return self.bikes
elif brand == "car":
cars = input("How many cars would you like to rent?")
try:
cars = int(cars)
except ValueError:
print("Number should be Number")
return -1
if cars < 1:
print("Number of cars should be greater than zero")
return -1
else:
self.cars = cars
return self.cars
else:
print("Request vehicle error")
def returnVehicle(self, brand):
"""
return bikes or cars
"""
if brand == "bike":
if self.rentalTime_b and self.rentalBasis_b and self.bikes:
return self.rentalTime_b, self.rentalBasis_b, self.bikes
else:
return 0, 0, 0
elif brand == "car":
if self.rentalTime_c and self.rentalBasis_c and self.cars:
return self.rentalTime_c, self.rentalBasis_c, self.cars
else:
return 0, 0, 0
else:
print("Return vehicle Error")
|
[
"noreply@github.com"
] |
TelmanH.noreply@github.com
|
2dc7e2e97203b4b33219cb69dc4ddbb3662c6f49
|
c6a96dedabf27b581a7378123a7124d7d054991c
|
/tester.py
|
ea2b573df178b7443f853bf8115d6b66dfeaf08b
|
[] |
no_license
|
redeye93/ViterbiAlgorithm
|
b445d154d8e0aba812a3445bba2b4fc1dff4af4e
|
f5ca7c927d06685bb4cb3fe9a091b61085a64cdb
|
refs/heads/master
| 2021-04-03T09:12:52.920140
| 2018-03-10T21:46:37
| 2018-03-10T21:46:37
| 124,626,968
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,070
|
py
|
import sys
import os
from collections import Counter
def count_c(x, y):
if len(x)<len(y):
(x, y) = (y, x)
a = Counter(x)
b = Counter(y)
return sum(min(b[key], value) for (key, value) in a.items())
if len(sys.argv) == 3:
file1 = sys.argv[1]
file2 = sys.argv[2]
if not os.path.exists(file1) or not os.path.exists(file2):
print('One of the files is missing')
exit(1)
correct = 0
incorrect = 0
with open(file1, encoding='utf8') as text1:
with open(file2, encoding='utf8') as text2:
for (x, y) in zip(text1, text2):
x = x.strip()
y = y.strip()
x = x.split()
y = y.split()
matching = count_c(x, y)
correct += matching
incorrect += len(x) - matching
print(correct)
print(incorrect)
print(correct + incorrect)
print(1.0 * correct / (correct + incorrect))
text1.close()
text2.close()
else:
print('Insufficient number of arguments')
exit(1)
|
[
"utkarshgera77@gmail.com"
] |
utkarshgera77@gmail.com
|
f05336e22c0ecd4e51379312cb55a58e69b04c87
|
02952ddf96e7960a3faef74485f4ffc12bcf2973
|
/tests/test_projects/test_parallelSDC/test_preconditioner.py
|
721bb0679d9f43391103863777497ff76214f947
|
[
"BSD-2-Clause"
] |
permissive
|
danielru/pySDC
|
5decca37e1ecea643fe21dac0f978e3fdaa24ac6
|
558b2b4db3aeb97e6a87e41cd4958a8a948af37a
|
refs/heads/master
| 2020-12-25T10:58:57.215298
| 2017-03-21T06:45:59
| 2017-03-21T06:45:59
| 31,062,846
| 0
| 0
| null | 2015-02-20T11:52:33
| 2015-02-20T11:52:33
| null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
from projects.parallelSDC.preconditioner_playground import main, plot_iterations
def test_main():
main()
plot_iterations()
|
[
"r.speck@fz-juelich.de"
] |
r.speck@fz-juelich.de
|
1e13f89a1a8a0b4e88abe02f70cdd50a0a87215a
|
b9fe15c72998488a9dfe73469cf7eb30659dcfd4
|
/snake_eyes/buffered_distribution.py
|
18a15f102ffa6609fceca4df86e9464e9eba20c1
|
[
"MIT"
] |
permissive
|
bentheiii/snake_eyes
|
d8ea4e76aad5f060b64c55eed3d129a6d3ede01e
|
a5f53c203c6bb536592d9b3dcc270f924cd9ef6a
|
refs/heads/master
| 2020-09-08T00:06:24.856647
| 2020-01-22T08:44:55
| 2020-01-22T08:44:55
| 220,951,860
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,676
|
py
|
from __future__ import annotations
from itertools import chain
from numbers import Number
from typing import Generic, TypeVar, Iterable, Type, Any, Tuple, Dict, Optional, Mapping, Union
from dyndis import Self
import numpy as np
from snake_eyes.support_space import DiscreteFiniteSupportSpace
try:
from scipy import stats
except ImportError:
stats = None
from snake_eyes.bufferer import Bufferer, ChoiceBufferer
from snake_eyes.distribution import Distribution, add, mul, div, ConstDistribution, ReciprocalDistribution, \
SumDistribution, ProductDistribution, _maybe_parenthesise
from snake_eyes.util import prod
T = TypeVar('T')
class BufferedDistribution( Distribution[T], Generic[T]):
"""
A generic distribution that takes a bufferer and adapts it into a distribution
"""
def __init__(self, bufferer: Bufferer):
self.bufferer = bufferer
def get(self) -> T:
return next(self.bufferer)
def reciprocal(self):
return ReciprocalBufferedDistribution(self)
@add.implementor()
def add(self, other: Self):
return SumBufferedDistribution((self, other))
@add.implementor()
def add(self, other: Number):
return SumConstBufferedDistribution(self, other)
@add.implementor()
def add(self, other: ConstDistribution):
return self + other.value
@mul.implementor()
def mul(self, other: Self):
return ProductBufferedDistribution((self, other))
@mul.implementor()
def mul(self, other: Number):
return ProductConstBufferedDistribution(self, other)
@mul.implementor()
def mul(self, other: ConstDistribution):
return self * other.value
def get_n(self, n):
return self.bufferer.get_n(n)
class ReciprocalBufferedDistribution(BufferedDistribution[T], ReciprocalDistribution, Generic[T]):
def __init__(self, inner: BufferedDistribution[T]):
BufferedDistribution.__init__(self, inner.bufferer.reciprocal())
ReciprocalDistribution.__init__(self, inner)
class SumBufferedDistribution(BufferedDistribution[T], SumDistribution, Generic[T]):
def __init__(self, parts: Iterable[BufferedDistribution[T]]):
SumDistribution.__init__(self, parts)
BufferedDistribution.__init__(self, sum(p.bufferer for p in self.parts))
@add.implementor(symmetric=True)
def add(self, other: BufferedDistribution):
p = self.parts + (other,)
return type(self)(p)
class SumConstBufferedDistribution(BufferedDistribution[T], Generic[T]):
"""
A distribution that is the sum of a buffered distribution and a constant value
"""
def __init__(self, inner: BufferedDistribution[T], const: T):
self.inner = inner
self.const = const
super().__init__(inner.bufferer + const)
def mean(self):
m = self.inner.mean()
if m is None:
return None
return m + self.const
def variance(self):
return self.inner.variance()
def cumulative_density(self, k):
return self.inner.cumulative_density(k - self.const)
def probability(self, k):
return self.inner.probability(k - self.const)
def support_space(self):
iss = self.inner.support_space()
return iss and (iss + self.const)
@add.implementor(symmetric=True)
def add(self, other: BufferedDistribution):
return (self.inner + other) + self.const
@add.implementor(symmetric=True)
def add(self, other: Any):
if isinstance(other, Distribution):
return NotImplemented
return self.inner + (self.const + other)
@add.implementor()
def add(self, other):
return (self.inner + other.inner) + (self.const + other.const)
@add.implementor(symmetric=True)
def add(self, other: SumBufferedDistribution):
return (other + self.inner) + self.const
@mul.implementor(symmetric=True)
def mul(self, other: Any):
return self.inner * other + self.const * other
@div.implementor()
def truediv(self, other: Any):
return self.inner / other + self.const / other
def __eq__(self, other):
return type(self) is type(other) and (self.inner, self.const) == (other.inner, other.const)
def __str__(self):
return f'{_maybe_parenthesise(self.inner)} + {self.const}'
def __hash__(self):
return hash((type(self), self.inner, self.const))
class ProductBufferedDistribution( BufferedDistribution[T], ProductDistribution, Generic[T]):
def __init__(self, parts: Iterable[BufferedDistribution[T]]):
ProductDistribution.__init__(self, parts)
BufferedDistribution.__init__(self, prod(p.bufferer for p in self.parts))
@mul.implementor(symmetric=True)
def mul(self, other: BufferedDistribution):
p = self.parts + (other,)
return type(self)(p)
class ProductConstBufferedDistribution( BufferedDistribution[T], Generic[T]):
"""
A distribution that is the product of a buffered distribution and a constant value
"""
def __init__(self, inner: BufferedDistribution[T], const: T):
self.inner = inner
self.const = const
super().__init__(inner.bufferer * const)
def mean(self):
m = self.inner.mean()
if m is None:
return None
return m * self.const
def variance(self):
m = self.inner.variance()
if m is None:
return None
return m * self.const ** 2
def support_space(self):
iss = self.inner.support_space()
return iss and (iss * self.const)
def cumulative_density(self, k):
return self.inner.cumulative_density(k / self.const)
def probability(self, k):
return self.inner.probability(k / self.const)
@mul.implementor(symmetric=True)
def mul(self, other: BufferedDistribution):
return (self.inner * other) * self.const
@mul.implementor(symmetric=True)
def mul(self, other: Any):
if isinstance(other, Distribution):
return NotImplemented
return self.inner * (self.const * other)
@mul.implementor()
def mul(self, other):
return (self.inner * other.inner) * (self.const * other.const)
@mul.implementor(symmetric=True)
def mul(self, other: ProductBufferedDistribution):
return (other * self.inner) * self.const
def __eq__(self, other):
return type(self) is type(other) and (self.inner, self.const) == (other.inner, other.const)
def __str__(self):
return f'{_maybe_parenthesise(self.inner)} * {self.const}'
def __hash__(self):
return hash((type(self), self.inner, self.const))
class BuffererMakerDistribution( BufferedDistribution[T], Generic[T]):
"""
A specialized bufferer distribution that makes use of already created and cached bufferers using the
bufferer's make method.
"""
def __init__(self, bufferer_cls: Type[Bufferer], args, kwargs=None):
super().__init__(bufferer_cls.make(args, kwargs))
self.args: Tuple[Tuple, Optional[Dict[str, Any]]] = (args, kwargs)
def __repr__(self):
args_str = self.args_str()
args = ()
kwargs = None
if isinstance(args_str, Mapping):
kwargs = args_str
elif len(args_str) != 2 or not isinstance(args_str[1], Mapping) or not isinstance(args_str[0], Iterable):
args = args_str
else:
args, kwargs = args_str
args_str = (repr(a) for a in args)
if kwargs:
args_str = chain(args_str, (f'{k}={v!r}' for (k, v) in kwargs.items()))
return type(self).__name__ + "(" + ", ".join(args_str) + ")"
def args_str(self) -> Union[Tuple[Iterable, Optional[Mapping[str, Any]]], Iterable, Mapping[str, Any]]:
return self.args
def __eq__(self, other):
return type(self) is type(other) and self.args == other.args
def __hash__(self):
return hash(repr(self))
class ChoiceDistribution(BuffererMakerDistribution):
"""
A discrete distribution that chooses from a numpy array as np.random.choice
"""
def __init__(self, choices, p=None):
choices = tuple(choices)
if p is not None:
p = tuple(p)
super().__init__(ChoiceBufferer, (choices,), {'p': p})
self.choices = choices
self.p = p
def mean(self):
if self.p is not None:
return sum(
i * p for (i, p) in zip(self.choices, self.p)
)
return sum(
i for i in self.choices
) / len(self.choices)
def variance(self):
if self.p is not None:
return sum(
i * p for (i, p) in zip(self.choices, self.p)
) - self.mean()
return sum(
i for i in self.choices
) / len(self.choices) - self.mean()
def support_space(self):
return DiscreteFiniteSupportSpace(self.choices)
def cumulative_density(self, k):
if self.p is not None:
return np.sum(self.p[self.choices <= k])
return np.sum(self.choices <= k) / len(self.choices)
def probability(self, k):
if self.p is not None:
return np.sum(self.p[self.choices == k])
return np.sum(self.choices == k) / len(self.choices)
@add.implementor(symmetric=True)
def add(self, other: Number):
choices = [c + other for c in self.choices]
return type(self)(choices, p=self.p)
@mul.implementor(symmetric=True)
def mul(self, other: Number):
choices = [c * other for c in self.choices]
return type(self)(choices, p=self.p)
def reciprocal(self):
choices = [1 / c for c in self.choices]
return type(self)(choices, p=self.p)
def truncate(self, min=None, max=None):
if self.p is not None:
choices = []
probs = []
prob_sum = 0
for c, p in zip(self.choices, self.p):
if (min is None or min <= c) and (max is None or max >= c):
choices.append(c)
probs.append(p)
prob_sum += p
if not prob_sum:
raise ValueError("can't truncate all options")
probs = [p / prob_sum for p in probs]
return type(self)(choices, p=probs)
else:
choices = []
for c in self.choices:
if (min is None or min <= c) and (max is None or max >= c):
choices.append(c)
if not choices:
raise ValueError("can't truncate all options")
return type(self)(choices)
|
[
"sample@notreal.fake"
] |
sample@notreal.fake
|
6e91dd8602f65435370b4eed75287e0436e66dba
|
4d1f9c7253d7351227d5d9be45d650d88c2ee75b
|
/dsgd.py
|
590c52f89d8091828e921f27e0577acd1fefc6cb
|
[] |
no_license
|
shaw-stat/MF-under-attack-model
|
0a9a1f64840b050ee3e750b5e065a66ed3a5b844
|
dea44b4d65711125f3468635cca96775a4f6bb4f
|
refs/heads/master
| 2023-02-28T10:23:40.548452
| 2021-02-03T04:34:49
| 2021-02-03T04:34:49
| 335,347,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,149
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 12 20:50:10 2020
@author: shaw
"""
import sys
import math
from time import time
import random
import csv
import numpy
from pyspark import SparkContext
from scipy import sparse
from sklearn.preprocessing import normalize,scale
import numpy as np
import pandas as pd
from functions import *
#%matplotlib inline
rho0 = 0.2
C = 10 # Number of factors
nbr_iter = 50# number of iterations
block_number = 4 # number of blocks to take from the matrix
sc= SparkContext.getOrCreate()
#mytest = np.loadtxt("D:\\新建文件夹2019\\SG_MCMC\\R3_test.txt", dtype=int)
mytest = np.loadtxt("D:\\新建文件夹2019\\SG_MCMC\\ua.test", dtype=int)
mytrain =np.loadtxt("D:\\新建文件夹2019\\SG_MCMC\\ua.base", dtype=int)
test_mean = mytest[:,2].mean()
train_mean = mytrain[:,2].mean()
mean_rate = (test_mean+train_mean)/2
# eta0 = 0.1
eta0=0.9
def SGD(R, Q, P, mask, Ni, Nj, blockRange):
"""
This function is an implementation of the SGD algorithm described above.
Input : R, Q, P, mask, Ni, Nj, blockRange
Output : Q, P, n, blockRange
"""
global rho0,eta0
#eta = 0.01#first step size
R_new = R.nonzero()
n = R_new[0].size
#eta=eta0
rho=rho0
for i in range(n):
# if i% 10000 == 0:
# eta=eta0/(2**(i/10000))
# rho=rho0/(2**(i/10000))
# if i<n:
# tau = i/n
# eta = eta0*(1-tau)+tau*0.01*eta0
# else:
# eta = 0.01*eta0
eta = eta0/(i+1)
j = random.randint(0, n-1) # Pick randomly an element j
row, col = R_new[0][j], R_new[1][j] # retrieve the row and column of the random j
# take a small blocks from R, mask, Q and P
Ri = R[row,col]
maski = mask[row,col]
Qi = Q[row,:]
Pi = P[:,col]
# compute the gradient of Qi and Pi
_, grad_Q = objective_Q(Pi, Qi, Ri, maski, rho)
_, grad_P = objective_P(Pi, Qi, Ri, maski, rho)
#eta = eta0 * (1 + i) ** (- 0.5)
#eta=eta*0.96
#eta=eta0
# update the blocks of P and Q
Q[row,:] = Qi - eta * grad_Q
P[:,col] = Pi - eta * grad_P
#print(np.linalg.norm(Q[row,:]))
return (Q, P, n, blockRange)
def SGD2(R,mask, test,mask2):
"""
This function is an implementation of the SGD algorithm described above.
Input : R, Q, P, mask, Ni, Nj, blockRange
Output : Q, P, n, blockRange
"""
# Q = numpy.random.random_sample((R.shape[0], C))
# P = numpy.random.random_sample((C, R.shape[1]))
#Q =np.loadtxt('Q3_sgd_new3.csv',delimiter=',')
#P =np.loadtxt('P3_sgd_new3.csv',delimiter=',')
Q = np.ones([R.shape[0],C])*0.3
P = np.ones([C,R.shape[1]])*0.3
global eta0,rho0
#eta = 0.01#first step size
R_new = R.nonzero()
n = R_new[0].size
Rmse = []
T=[]
t0=time()
eta=eta0
rho=rho0
for i in range(10000):
if i<50000:
tau = i/50000
eta = eta0*(1-tau)+tau*0.01*eta0
else:
eta = 0.01*eta0
#eta=eta0
# if i% 20000 == 0:
# eta=eta0/(2**(i/20000))
# #rho=rho0/(2**(i/20000))
# #eta=eta0*(0.96**(i/10000))
# print("... iteration %s, eta %f,rho%f"%(i,eta,rho))
j = random.randint(0, n-1) # Pick randomly an element j
row, col = R_new[0][j], R_new[1][j] # retrieve the row and column of the random j
# take a small blocks from R, mask, Q and P
Ri = R[row,col]
maski= mask[row,col]
Qi = Q[row,:]
Pi = P[:,col]
# compute the gradient of Qi and Pi
_, grad_Q = objective_Q(Pi, Qi, Ri, maski, rho)
_, grad_P = objective_P(Pi, Qi, Ri, maski, rho)
#eta = eta0 * (1 + i) ** (- 0.5)
#eta=eta*0.96
#eta=eta0
#if ((t>0)and(Rmse<))
# update the blocks of P and Q
Q[row,:] = Qi - eta * grad_Q
P[:,col] = Pi - eta * grad_P
#print(np.linalg.norm(Q[row,:]))
nuser = test.shape[0]
nitem = test.shape[1]
pre = np.dot(Q[:nuser,:], P[:,:nitem])
#pre[np.where((pre>0)&(pre<1))] = 1
#pre[np.where(pre>5)] = 5
temp = mask2*(test-pre)
rows, cols = np.nonzero(temp)
Rmse.append(np.sqrt(np.power(temp[rows,cols],2).mean()))
T.append(time()-t0)
return (Q, P, Rmse,T)
def Parallelized_SGD(R, mask,test,mask2):
"""
This function performs the Parallelized SGD algorithm
Input : R, mask
Output : Q, P
"""
T=[]
t0=time()
global nbr_iter, block_number, C,eta0,rho0
# Q = np.ones([R.shape[0],C])*0.5
# P = np.ones([C,R.shape[1]])*0.5
Q = numpy.random.random_sample((R.shape[0], C))
P = numpy.random.random_sample((C, R.shape[1]))
#Q =np.loadtxt('Q3_sgd5.csv',delimiter=',')
#P =np.loadtxt('P3_sgd5.csv',delimiter=',')
block_i = (int(R.shape[0]/block_number), int(R.shape[1]/block_number))
rowRangeList = [[k*block_i[0],(k+1)*block_i[0]] for k in range(block_number)]
colRangeList = [[k*block_i[1],(k+1)*block_i[1]] for k in range(block_number)]
rowRangeList[-1][1] += R.shape[0]%block_number
colRangeList[-1][1] += R.shape[1]%block_number
Rmse = []
for iter_ in range(nbr_iter):
if iter_ % 10 == 0:
print("... iteration %s"%(iter_))
for epoch in range(block_number):
grid = []
for block in range(block_number):
rowRange = [int(rowRangeList[block][0]), int(rowRangeList[block][1])]
colRange = [int(colRangeList[block][0]), int(colRangeList[block][1])]
# The subsamples in each matrix and vector
Rn = R[rowRange[0]:rowRange[1], colRange[0]:colRange[1]]
maskn = mask[rowRange[0]:rowRange[1], colRange[0]:colRange[1]]
Qn = Q[rowRange[0]:rowRange[1],:]
Pn = P[:,colRange[0]:colRange[1]]
Ni = {}
for i in range(rowRange[0],rowRange[1]):
Ni[int(i-int(rowRange[0]))] = R[i,:].nonzero()[0].size
Nj = {}
for i in range(colRange[0],colRange[1]):
Nj[i-colRange[0]] = R[:,i].nonzero()[0].size
if (Rn.nonzero()[0].size != 0):
grid.append([Rn, Qn, Pn, maskn, Ni, Nj, (rowRange, colRange)])
rdd = sc.parallelize(grid, block_number).\
map(lambda x: SGD(x[0],x[1],x[2],x[3],x[4],x[5],x[6])).collect()
for elem in rdd:
rowRange,colRange = elem[3]
Q[rowRange[0]:rowRange[1],:] = elem[0]
P[:,colRange[0]:colRange[1]] = elem[1]
colRangeList.insert(0,colRangeList.pop())
nuser = test.shape[0]
nitem = test.shape[1]
pre = np.dot(Q[:nuser,:], P[:,:nitem])
#pre[np.where((pre>0)&(pre<1))] = 1
#pre[np.where(pre>5)] = 5
temp = mask2*(test-pre)
rows, cols = np.nonzero(temp)
Rmse.append(np.sqrt(np.power(temp[rows,cols],2).mean()))
T.append(time()-t0)
return Q,P,Rmse,T
def outputMatrix(A, path):
"""
This function outputs a matrix to a csv file
"""
f = open(path, 'w', 100)
rows= A.shape[0]
cols = A.shape[1]
for row in range(rows):
for col in range(cols):
if col == cols-1:
f.write(str(A[row,col]))
else:
f.write(str(A[row,col]) + ",")
f.write("\n")
f.flush()
f.close()
def load_data(filename="u.data",scale = True,small_data=False):
"""
This function returns :
R : the matrix user-item containing the ratings
mask : matrix is equal to 1 if a score existes and 0 otherwise
"""
global mean_rate
data = np.loadtxt(filename, dtype=int)[:,:3]
#data = data_norm(data0)
if filename=="D:\\新建文件夹2019\\SG_MCMC\\ua.base":
R = sparse.csr_matrix((data[:, 2], (data[:, 0]-1, data[:, 1]-1)),dtype=float)
else:
R = sparse.csr_matrix((data[:, 2], (data[:, 0]-1, data[:, 1]-1)),dtype=float)
mask = sparse.csr_matrix((np.ones(data[:, 2].shape),(data[:, 0]-1, data[:, 1]-1)), dtype=bool )
# #normalization
# R= (R - np.mean(R, axis=0))
# R= (R - np.mean(R, axis=1)) / np.std(R, axis=1)
# take a small part of the whole data for testing
if scale==True:
if filename=="D:\\新建文件夹2019\\SG_MCMC\\ua.base":
R = np.loadtxt('R_a_base_scale.txt',delimiter=',')
mask = sparse.csr_matrix((np.ones(R.nonzero()[0].shape[0]),(R.nonzero()[0], R.nonzero()[1])), dtype=bool )
elif filename=="D:\\新建文件夹2019\\SG_MCMC\\ua.test":
R = np.loadtxt('R_a_test_scale.txt',delimiter=',')
mask = sparse.csr_matrix((np.ones(R.nonzero()[0].shape[0]),(R.nonzero()[0], R.nonzero()[1])), dtype=bool )
else:
print('not scaling')
if small_data == True:
R = (R[0:100, 0:100].copy())
mask = (mask[0:100, 0:100].copy())
# R = R.toarray()
# mask = mask.toarray()
return R, mask
def data_norm(data,mode):
f_data = pd.DataFrame(data)
if mode==1:
data[:,2] = data[:,2]-np.mean(data[:2])
return data
def scale_sparse_vector(x):
if x[x!=0].shape[0]>0:
x[x!=0]=scale(x[x!=0],with_mean=True,with_std=True)
def scale_matrix(R):
RR = R.copy()
d_R = pd.DataFrame(RR)
d_R.apply(scale_sparse_vector,axis=1)
return np.array(d_R)
|
[
"1653519@tongji.edu.cn"
] |
1653519@tongji.edu.cn
|
41f32d17361896607a0dbb0526f28df14fc0dd44
|
f3304ceb4407e818d30407937fec1fac2c212307
|
/run2.py
|
142d9ad7bf2bbdd68be68cfc36e6f7405590a2be
|
[] |
no_license
|
kkirsanov/avito-parser
|
d6dc621a93ec54ca988ffd134a48b7cb83260e5c
|
d00f54f706da4734f0a12882ffc59a882fad850a
|
refs/heads/master
| 2020-03-24T21:04:37.376646
| 2018-07-31T12:43:08
| 2018-07-31T12:43:08
| 143,012,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
#!/usr/bin/python
# Run unix date command 3 times
import leveldb
import json
cnt = 0
f = open("z.csv", "r")
f2 = open("z3.csv", "w")
ph = set()
for l in f:
d = l.split('\t')
try:
if d[-5][0] == '8':
d[-5] = "7"+d[-5][1:]
p = d[-5]
if p not in ph:
st = "\t".join(d)
# print st
f2.write(st)
ph.add(d[-5])
else:
pass
except:
pass
f2.close()
print len(ph)
|
[
"kkirsanov@gmail.com"
] |
kkirsanov@gmail.com
|
65bbdc9338d1742bfe07263040119d0da97205ce
|
d3af72e4c623dffeda95e662d495a95c8f2e317a
|
/scripts/gene_checker/annotations/utils/utils.py
|
ebd9775887739a4562e0953e798ff45a0f24a331
|
[] |
no_license
|
bioinf/bi2014-mycoplasma-genitalium
|
0e2fbf095a461339064ea38f1be4586897f7c2ac
|
bd8eb82bb8d883faeb0492d74deb7a396577b782
|
refs/heads/master
| 2016-09-05T11:34:00.325602
| 2014-12-06T12:37:12
| 2014-12-06T12:37:12
| 24,504,082
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,960
|
py
|
__author__ = 'nikita_kartashov'
NUCLEOTIDE_COMPLIMENTS = {'A': 'T', 'C': 'G', 'T': 'A', 'G': 'C'}
CODON_LENGTH = 3
START_CODON = 'AUG'
STOP_CODONS = ['UAA', 'UAG']
PURINES = ['A', 'G']
SHINE_DALGARNO = 'AGGAGG'
def split_into_ns(data, n):
return zip(*(iter(data),) * n)
def fst(x):
return x[0]
def snd(x):
return x[1]
def nucleotide_compliment(nucleotide):
return NUCLEOTIDE_COMPLIMENTS[nucleotide]
def compliment(dna):
return ''.join(map(nucleotide_compliment, dna))
def dna_to_mrna(dna):
def mapper(nucleotide):
return 'U' if nucleotide == 'T' else nucleotide
return ''.join(mapper(nucleotide_compliment(nucleotide)) for nucleotide in dna)
def rna_to_dna(rna):
def mapper(nucleotide):
return 'T' if nucleotide == 'U' else nucleotide
return ''.join(map(mapper, rna))
def reverse_compliment(dna):
return compliment(reversed(dna))
def ORF(code):
mrna = dna_to_mrna(code)
try:
start_index = mrna.index(START_CODON)
def stop_index(codon, starting):
try:
data = map(lambda x: ''.join(x), split_into_ns(mrna[starting:], CODON_LENGTH))
return starting + data.index(codon) * 3
except ValueError:
return len(mrna) + 1
return start_index, min(stop_index(codon, start_index) for codon in STOP_CODONS)
except ValueError:
return False
DEFAULT_WINDOW = 6
DEFAULT_DISTANCE = 10
DEFAULT_STEPS = [step - 5 for step in range(0, 10)]
DEFAULT_RICHNESS = 0.7
def purine_richness(area):
if not area:
return 0
return sum((1 if nucleotide in PURINES else 0 for nucleotide in area)) * 1.0 / len(area)
def is_purine_rich(area, richness=DEFAULT_RICHNESS):
if not area:
return False
area_richness = purine_richness(area)
print(area_richness)
return area_richness >= richness
def has_Shine_Dalgarno(code, start, spacer = 6):
return True
|
[
"snailandmail@gmail.com"
] |
snailandmail@gmail.com
|
da8bd8f4a014138a3d91dc7d39ac778710cd0c9a
|
e2faae27d29a82c02ccbea3170b6b86d033c2318
|
/bmi_calculator.py
|
4c1ac31d396ece5a12ef26858e8ad4ea94142c46
|
[] |
no_license
|
chrynx/python
|
526087f8a76969e5909146021fe8b2b684f51b9d
|
b4ce4960f7d7d7d2ccdf223adc0cc781a6074c8b
|
refs/heads/master
| 2023-01-11T00:16:43.341877
| 2017-12-16T14:28:46
| 2017-12-16T14:28:46
| 110,001,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 894
|
py
|
user = raw_input('Hello, What is your name? -> ')
print 'Welcome, ' + user
weight = float(raw_input('Can you please tell me your weight in kilograms? -> '))
height = float(raw_input('And your height in meters? -> '))
def bmi_calculator(w, h):
bmi = w / (h ** 2)
print 'Hello, Your BMI is -> ' + str(bmi)
if bmi < 18.5:
print 'Based on our charts, you are considered underweight, please contact your doctor for a weight gain plan'
if bmi >= 18.5 and bmi < 25:
print 'Based on our charts, you are considered healthy, keep up the good work'
if bmi >= 25 and bmi < 30:
print 'Based on our charts, you are considered overweight, please contact your doctor for a weight loss plan'
if bmi >= 30:
print 'Based on our charts, you are considered obese, please contact your doctor for an immediate weight loss plan'
print 'Thank you for using this program '
bmi_calculator(weight, height)
|
[
"ralphmadriaga@gmail.com"
] |
ralphmadriaga@gmail.com
|
f980dc402cb2aeb058ee68f17487aaa041dc20ba
|
a02a0e814dbb52753def1b62b76c772506578e75
|
/face_tag_video.py
|
1b47bbcd0547ed325a102ba6edb1a5e93a9d19f5
|
[] |
no_license
|
rashmibhaty/Face_Tag_Generator
|
0b7ccb85f1bc665a16729432c85867544d3ada6e
|
527729a68128b2539fcff7d0229fb9e4e6547ad4
|
refs/heads/master
| 2022-11-22T20:32:04.969325
| 2020-07-28T07:07:31
| 2020-07-28T07:07:31
| 267,585,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,592
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 27 18:11:42 2020
@author: rashmibh
"""
import cv2
from keras.models import load_model
import numpy as np
import os
#Video to use for face tagging
Video_File_Name='VID_20200518_191839.mp4'
MODEL_FILE='model.facedetect_family'
INDICES_FILE='class_indices_saved_family.npy'
list_indices=[]
if os.path.isfile(INDICES_FILE):
class_indices = np.load(INDICES_FILE,allow_pickle=True).item()
[list_indices.extend([[k,v]]) for k,v in class_indices.items()]
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
#Load the saved model
model = load_model(MODEL_FILE)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
#Setup face detection part
cascPath = 'haarcascade_frontalface_default.xml'
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(Video_File_Name)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
#print(frame)
(h, w,d)= frame.shape
#Detection not working well on large images
r = 600.0 / w
dim = (600, int(h * r))
resized = cv2.resize(frame, dim)
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (11, 11), 0)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=7,
minSize=(50, 50),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(resized, (x, y), (x+w, y+h), (0, 255, 0), 2)
roi = resized[y:(y+h), (x):(x+w)]
roi = cv2.resize(roi,(64,64))
roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
#Get the prection value for the current face
pred = model.predict(roi[np.newaxis, :, :, np.newaxis]/255)
print(pred)
for item in list_indices:
if item[1] == np.argmax(pred):
name=item[0]
break
cv2.putText(resized, name, (x, y-5),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 255), 2)
cv2.putText(resized, str(pred.max()), (x, y+h+5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow(Video_File_Name, resized)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
rashmibhaty.noreply@github.com
|
792b27211442dbcce6022c3d2d48273e4a5ec3ed
|
6ab073e94e4b3241c65614c7a8eecb2afa3d3dc9
|
/app/core/migrations/0003_ingredient.py
|
b8c63d2f176c3e87e3b4ce17236650da531d001a
|
[
"MIT"
] |
permissive
|
shaquibquraish/recipie-app-api
|
c05f90e4c9e0b428c80c1d9d7b96e5159b7199a2
|
e90e63ba053ffea6ff801d476ca4e4c77c5ff946
|
refs/heads/master
| 2020-12-15T08:25:50.796182
| 2020-01-28T14:55:47
| 2020-01-28T14:55:47
| 235,045,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
# Generated by Django 3.0.2 on 2020-01-22 19:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"shaquibquraishi@gmail.com"
] |
shaquibquraishi@gmail.com
|
f99d52f317bd6aca8f1627b1732eab6e46b1c580
|
f62ff90d7850af458d8f12386fc9ee9134dbe7c1
|
/Plots/Showplots/Model_4/Slopes.py
|
4a50fda1d37fcada8edb14639b508ef35b6ea07e
|
[] |
no_license
|
AlexSchmid22191/EIS_R_Sim
|
51b431f078cb455fc38637c192436c0523449565
|
851b061e60811e1e58a5b2fd4e393e529c3f86ac
|
refs/heads/master
| 2023-06-27T17:40:59.177270
| 2021-07-22T11:50:27
| 2021-07-22T11:50:27
| 380,768,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,421
|
py
|
from matplotlib.pyplot import subplots, show
from matplotlib.style import use
from numpy import load
from Equations import e, k, T
use('../Show.mplstyle')
data = load('../../../Currents_Resistances_Model_4/Slope_Data_Model_4.npy')
fig, axes = subplots(nrows=2, ncols=2, figsize=(12, 8))
for ax in axes.flatten():
ax.set_xscale('log')
ax.set_xlabel('Oxygen partial pressure (bar)')
ax.set_ylabel('Overpotential (V)')
colmesh_p = axes[0, 0].pcolormesh(data['pressure'], data['overpotential'], data['p_slope'], vmin=0, vmax=1)
cbar_p = fig.colorbar(colmesh_p, ax=axes[0, 0], label=r'$\frac{d\ln j}{d\ln p}$')
cbar_p.ax.minorticks_off()
colmesh_n = axes[0, 1].pcolormesh(data['pressure'], data['overpotential'], data['n_slope']*k*T/e, vmin=0, vmax=4)
cbar_n = fig.colorbar(colmesh_n, ax=axes[0, 1], label=r'$\frac{d\ln j}{d\eta} (\frac{e}{kT})$')
cbar_n.ax.minorticks_off()
colmesh_rp = axes[1, 0].pcolormesh(data['pressure'], data['overpotential'], data['rp_slope'], vmin=-1.25, vmax=0.5)
cbar_rp = fig.colorbar(colmesh_rp, ax=axes[1, 0], label=r'$\frac{d\ln R}{d\ln p}$')
cbar_rp.ax.minorticks_off()
colmesh_rn = axes[1, 1].pcolormesh(data['pressure'], data['overpotential'], data['rn_slope']*k*T/e, vmin=-4, vmax=4)
cbar_rn = fig.colorbar(colmesh_rn, ax=axes[1, 1], label=r'$\frac{d\ln R}{d\eta} (\frac{e}{kT})$')
cbar_rn.ax.minorticks_off()
fig.tight_layout()
fig.savefig('Plots/Slopes.png')
show()
|
[
"Alex.Schmid91@gmail.com"
] |
Alex.Schmid91@gmail.com
|
873cd6116a82e9e22ce7abe0b328cb92468b58be
|
ca970e84e2138e3fb57a95706a7ecfed4879bc44
|
/Advanced_python/Object_internals_and_custom_attributes/vector_2.py
|
22e1a671523ae3123202dca29da7a1ad7837f76c
|
[] |
no_license
|
thxa/test_python
|
b67854ddfa2d89c315917e0739a419d085ff7845
|
d8c1e0204d54e2c9e0799a5b41c81399f1487e45
|
refs/heads/master
| 2020-06-26T19:07:28.570894
| 2020-03-10T20:34:55
| 2020-03-10T20:34:55
| 199,723,985
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,517
|
py
|
class Vector:
def __init__(self, **coords):
private_coords = {'_' + k: v for k, v in coords.items()}
self.__dict__.update(private_coords)
# def __getattr__(self, name):
# return "name = %s" %name
# def __getattr__(self, name):
# private_name = '_' + name
# return getattr(self, private_name)
# https://docs.python.org/3/reference/datamodel.html#object.__getattr__
def __getattr__(self, name):
private_name = '_' + name
try:
return self.__dict__[private_name]
except KeyError:
raise AttributeError("{!r} object has no attribute {!r}".format(
self.__class__, name))
# https://docs.python.org/3/reference/datamodel.html#object.__setattr__
def __setattr__(self, name, value):
raise AttributeError("Can't set attribute {!r}".format(name))
# https://docs.python.org/3/reference/datamodel.html#object.__delattr__
def __delattr__(self, name):
raise AttributeError("Can't delete attribute {!r}".format(name))
def __repr__(self):
return "{}({})".format(
self.__class__.__name__,
', '.join("{k}={v}".format(
k=k[1:],
v=self.__dict__[k])
for k in sorted(self.__dict__.keys())))
class ColoredVector(Vector):
COLOR_INDEXES = ("red", "green", "blue")
def __init__(self, red, green, blue, **coords):
super().__init__(**coords)
self.__dict__["color"] = [red, green, blue]
def __getattr__(self, name):
try:
channel = ColoredVector.COLOR_INDEXES.index(name)
except ValueError:
return super().__getattr__(name)
else:
return self.__dict__["color"][channel]
def __setattr__(self, name, value):
try:
channel = ColoredVector.COLOR_INDEXES.index(name)
except ValueError:
super().__setattr__(name, value)
else:
self.__dict__["color"][channel] = value
# def __delattr__(self, name):
def __repr__(self):
keys = set(self.__dict__.keys())
keys.discard("color")
coords = ', '.join(
"{k}={v}".format(
k=k[1:],
v=self.__dict__[k])
for k in sorted(keys))
return "{cls}({red}, {green}, {blue}, {coords})".format(
cls=self.__class__.__name__,
red=self.red,
green=self.green,
blue=self.blue,
coords=coords)
def main():
v = Vector(p=4, q=2)
print(v)
print(v.__dict__)
print(v.p)
# v.p = 2
print(v._p)
# print(v.x)
# del v.p
# del v._p
# v._p = 1
# v.__dict__['+p']
cv = ColoredVector(red=23, green=44, blue=238, p=9, q=14)
print(cv)
print(cv.red)
print(cv.green)
print(cv.blue)
print(cv.p)
print(cv.q)
print(dir(cv))
print(cv.__dict__)
if __name__ == '__main__':
main()
|
[
"33045743+thxa@users.noreply.github.com"
] |
33045743+thxa@users.noreply.github.com
|
d22257ae400edba16284923f650c9396517b0122
|
f40cd3359ec78bd2723cfdb80de8fe3c77003681
|
/image_processing/process_images.py
|
d931281237ac58982417392c480c0358845f8b85
|
[] |
no_license
|
LanaSina/prednet_gol
|
be1c72173bae171c527483baf36d773bbc9f3033
|
1fdc853e1241a1e4dad782a8d9c48279042cb60f
|
refs/heads/master
| 2020-04-27T17:16:05.842755
| 2019-09-02T07:22:08
| 2019-09-02T07:22:08
| 174,510,932
| 0
| 3
| null | 2019-09-02T07:22:09
| 2019-03-08T09:47:19
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,857
|
py
|
# Create image datasets.
import argparse
import importlib
import numpy as np
import os
import requests
import urllib.request
import sys
from bs4 import BeautifulSoup
import hickle as hkl
from imageio import imread
from scipy.misc import imresize
usage = 'Usage: python {} DATA_DIR [N_IMAGES] [ORDER] [--help]'.format(__file__)
parser = argparse.ArgumentParser(description='This script is to generate .hkl files for train, test and val images',
usage=usage)
parser.add_argument('data_dir', action='store', nargs=None,
type=str, help='path to directory containing the image _folder_.')
parser.add_argument('n_images', action='store', nargs='?', default=-1,
type=int, help='optional: total number of images to use.')
parser.add_argument('order', action='store', nargs='?', default=0,
type=int, help='optional: 0 for regular order, 1 for inverse ordering of frames.')
args = parser.parse_args()
DATA_DIR = args.data_dir
desired_im_sz = (128, 160)
#train, val, test
split_ratio = np.array([0.8,0.1,0.1])
splits = ["train", "val", "test"]
# Processes images and saves them in train, val, test splits.
# Order : 0 for normal, 1 for reverse
def process_data(n_images=-1, order=0):
im_dir = DATA_DIR + "/images/"
image_list = sorted(os.listdir(im_dir))
if order == 1:
image_list.reverse()
if n_images==-1:
n_images = len(image_list)
s = 0
im_list = []
source_list = []
print(n_images, " images")
limits = split_ratio*n_images
print(limits)
i = 0
for image_name in image_list:
while limits[s] == 0 :
s = s + 1
if(s>len(limits)) :
break
im_list += [im_dir + image_name]
# print(image_name)
source_list += [im_dir]
i = i + 1
if i==limits[s]:
split = splits[s]
s = s + 1
# save
print( 'Creating ' + split + ' data: ' + str(len(im_list)) + ' images')
X = np.zeros((len(im_list),) + desired_im_sz + (3,), np.uint8)
for i, im_file in enumerate(im_list):
im = imread(im_file)
X[i] = process_im(im, desired_im_sz)
hkl.dump(X, os.path.join(DATA_DIR, 'X_' + split + '.hkl'))
hkl.dump(source_list, os.path.join(DATA_DIR, 'sources_' + split + '.hkl'))
# create empty lists
im_list = []
source_list = []
# resize and crop image
def process_im(im, desired_sz):
target_ds = float(desired_sz[0])/im.shape[0]
im = imresize(im, (desired_sz[0], int(np.round(target_ds * im.shape[1]))))
d = int((im.shape[1] - desired_sz[1]) / 2)
im = im[:, d:d+desired_sz[1]]
return im
if __name__ == '__main__':
process_data(args.n_images, args.order)
|
[
"lana.sinapayen@gmail.com"
] |
lana.sinapayen@gmail.com
|
f26aa2b147563cfbc91ff87d0a1e57cc576b9ead
|
60606b2665de5fdc4a099c7b4489ef9df5f742d8
|
/appBank/src/main.py
|
30882feda9535a39a133b4a5776d30ce26f2b832
|
[] |
no_license
|
frdanwrhdyt/appllikasi-perbankan-dg-python
|
3e95589d3eeb00e900ae3e26cb59beb83f279be8
|
100bf1d8023283729bce4055f442e48f985d8999
|
refs/heads/master
| 2022-07-23T03:23:17.980300
| 2020-05-21T17:57:30
| 2020-05-21T17:57:30
| 265,915,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,276
|
py
|
import json
import os
from collections import Counter
def clearScreen():
input()
os.system('cls')
class Login:
def __init__(self,username,password):
self.__username = username
self.__password = password
def getName(self):
return self.__username
def getLogin(self):
with open ('data.json') as json_file:
data=json.load(json_file)
for i in data['user'] :
if self.__username in i['username'] and self.__password in i['password']:
return True
def getConfirm(self):
with open ('data.json') as json_file:
data=json.load(json_file)
for i in data['user'] :
if self.__username in i['username'] and self.__password in i['password']:
if (i['status'] == False):
return False
else:
return True
def getDebit(self):
with open ('data.json') as json_file:
data=json.load(json_file)
for i in data['user'] :
if self.__username in i['username'] and self.__password in i['password']:
return i['debit']
class Sighup:
def __init__(self,username,password,email):
self.__username = username
self.__password = password
self.__email = email
if not os.path.isfile('data.json'):
data={}
data['user']=[]
data['user'].append({
'username' : self.__username,
'password' : self.__password,
'email' : self.__email,
'debit' : 0,
'status' : False,
})
with open ('data.json','w') as json_file :
json.dump(data,json_file)
else:
data={
'username' : self.__username,
'password' : self.__password,
'email' : self.__email,
'debit' : 0,
'status' : False,
}
with open ('data.json') as json_file :
jsonData=json.load(json_file)
temp=jsonData['user']
temp.append(data)
with open ('data.json','w') as json_file:
json.dump(jsonData,json_file)
class admin:
def __init__(self):
j=0
with open ('data.json') as json_file:
data=json.load(json_file)
for i in data['user'] :
if (i['status']==True):
j+=1
self.__user={
i['username']
}
print(str(j)+'. '+ i['username'])
def eraserUser(self,nomor):
a=Counter(self.__user)
if nomor in a:
with open ('data.json') as json_file:
data=json.load(json_file)
for i in data['user'] :
if (i['status']==True):
print(i['username'])
else:
return "Nasabah tidak ada"
def dashboardUser(user):
print('Selamat datang ' + user.getName())
print("---"*8)
print('1. Lihat saldo\n2. Tambah Saldo\n3. Ganti Password\n4. Keluar')
print('---'*8)
pilihan=input('Masukkan pilihan : ')
if(pilihan == '1'):
print('Saldo anda : Rp' + str(user.getDebit()))
def dashboardAdmin():
print('Selamat datang di dashboard admin')
print('---'*8)
print('1. Lihat nasabah\n2. Lihat pendaftar\n3. Keluar')
print('---'*8)
pilihan1=input('Masukkan pilihan : ')
if (pilihan1=='1'):
Admin=admin()
print('\n1. Hapus nasabah\n2. Kembali')
pilihan2=input('Masukkan pilihan : ')
if (pilihan2=='1'):
pilihan3=int(input('Masukkan nomer nasabah'))
pilihan3-=1
Admin.eraserUser(pilihan3)
elif (pilihan2=='2'):
clearScreen()
dashboardAdmin()
def loginDashboard():
clearScreen()
print('Tekan 9 untuk batal')
username = input("Username : ")
if (username=='9'):
return True
password = input("Password : ")
if (password=='9'):
return True
user = Login(username,password)
if user.getLogin():
if user.getConfirm():
clearScreen()
dashboardUser(user)
else:
print('Username belum dikonfirmasi oleh admin')
elif (username == 'admin' and password == 'admin'):
clearScreen()
dashboardAdmin()
else:
print('Username atau password salah')
def daftarDashboard():
clearScreen()
print('Tekan 9 untuk batal')
username = input("Username : ")
if (username=='9'):
return True
password = input("Password : ")
if (password=='9'):
return True
email = input("Email : ")
if (email=='9'):
return True
user = Sighup(username,password,email)
while (True):
print('Selamat datang di applikasi perbankan')
print("---"*8)
print("1. Login\n2. Daftar")
print("---"*8)
pilihan=input("Masukkan Pilihan : ")
if (pilihan=='1'):
loginDashboard()
elif (pilihan=='2'):
daftarDashboard()
else:
print("Pilihan tidak ada")
clearScreen()
|
[
"farid780anwar@gmail.com"
] |
farid780anwar@gmail.com
|
8b5e76af94f8db48d834469d1bd897444bf4d7bf
|
a6172e91bad7c17e699a1253360b82f9cb6188aa
|
/envs.py
|
7d1757888faf1cfc1c9bb9a299972a4989aa9ce7
|
[] |
no_license
|
wangyouzhuo/pytorch-a3c-attention
|
7f2af43869eb9cd20f35d882eeca3828dd718928
|
142da0a377e861bc7226a4a86bdc53b359629941
|
refs/heads/master
| 2020-04-28T05:19:06.695210
| 2019-02-16T14:51:32
| 2019-02-16T14:51:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,765
|
py
|
import cv2
import gym
import numpy as np
from gym.spaces.box import Box
# Taken from https://github.com/openai/universe-starter-agent
def create_atari_env(env_id):
env = gym.make(env_id)
env = AtariRescale84x84(env)
env = NormalizedEnv(env)
return env
def _process_frame84(frame):
frame = frame[34:34 + 160, :160]
# Resize by half, then down to 84x84 (essentially mipmapping). If
# we resize directly we lose pixels that, when mapped to 84x84,
# aren't close enough to the pixel boundary.
frame = cv2.resize(frame, (84, 84))
# frame = frame.mean(2, keepdims=True)
frame = frame.astype(np.float32)
frame *= (1.0 / 255.0)
frame = np.moveaxis(frame, -1, 0)
return frame
class AtariRescale84x84(gym.ObservationWrapper):
def __init__(self, env=None):
super(AtariRescale84x84, self).__init__(env)
self.observation_space = Box(0.0, 1.0, [3, 84, 84])
def _observation(self, observation):
return _process_frame84(observation)
class NormalizedEnv(gym.ObservationWrapper):
def __init__(self, env=None):
super(NormalizedEnv, self).__init__(env)
self.state_mean = 0
self.state_std = 0
self.alpha = 0.9999
self.num_steps = 0
def _observation(self, observation):
self.num_steps += 1
self.state_mean = self.state_mean * self.alpha + \
observation.mean() * (1 - self.alpha)
self.state_std = self.state_std * self.alpha + \
observation.std() * (1 - self.alpha)
unbiased_mean = self.state_mean / (1 - pow(self.alpha, self.num_steps))
unbiased_std = self.state_std / (1 - pow(self.alpha, self.num_steps))
return (observation - unbiased_mean) / (unbiased_std + 1e-8)
|
[
"gamrianshani@gmail.com"
] |
gamrianshani@gmail.com
|
a7cc97b5e49ca2c7859a2cb2f70472934c5ad299
|
e00186e71a1f52b394315a0cbc27162254cfffb9
|
/durga/tut/sample/contact/urls.py
|
cbd927b2b80e5d9ff7cf2904f6a45474adb1c169
|
[] |
no_license
|
anilkumar0470/git_practice
|
cf132eb7970c40d0d032520d43e6d4a1aca90742
|
588e7f654f158e974f9893e5018d3367a0d88eeb
|
refs/heads/master
| 2023-04-27T04:50:14.688534
| 2023-04-22T05:54:21
| 2023-04-22T05:54:21
| 100,364,712
| 0
| 1
| null | 2021-12-08T19:44:58
| 2017-08-15T10:02:33
|
Python
|
UTF-8
|
Python
| false
| false
| 137
|
py
|
from . import views
from django.urls import path, include
urlpatterns = [
path('contactme/', views.contactus, name='contact-me'),
]
|
[
"anilkumar.0466@gmail.com"
] |
anilkumar.0466@gmail.com
|
ca4ca0b26337918f518b836b293d681197779508
|
9c7bc80c99cfa42bb5fca431e3fcca6bb712bba7
|
/tests/unit/modules/test_xfs.py
|
4b423d69d124c028313cb910588169ab2bb05064
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
darix/salt
|
678856d84cb53e3390998306ec09de64220a8fc7
|
fae3dfc4f417988f431514f89e398a86fe5541a4
|
refs/heads/openSUSE-2019.2.0
| 2023-01-21T05:38:58.088830
| 2020-02-12T09:05:45
| 2020-02-19T13:37:56
| 241,922,575
| 0
| 1
|
NOASSERTION
| 2022-06-01T18:26:13
| 2020-02-20T15:42:58
| null |
UTF-8
|
Python
| false
| false
| 1,407
|
py
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import textwrap
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch)
# Import Salt Libs
import salt.modules.xfs as xfs
@skipIf(NO_MOCK, NO_MOCK_REASON)
@patch('salt.modules.xfs._get_mounts', MagicMock(return_value={}))
class XFSTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.xfs
'''
def setup_loader_modules(self):
return {xfs: {}}
def test__blkid_output(self):
'''
Test xfs._blkid_output when there is data
'''
blkid_export = textwrap.dedent('''
DEVNAME=/dev/sda1
UUID=XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
TYPE=xfs
PARTUUID=YYYYYYYY-YY
DEVNAME=/dev/sdb1
PARTUUID=ZZZZZZZZ-ZZZZ-ZZZZ-ZZZZ-ZZZZZZZZZZZZ
''')
# We expect to find only data from /dev/sda1, nothig from
# /dev/sdb1
self.assertEqual(xfs._blkid_output(blkid_export), {
'/dev/sda1': {
'label': None,
'partuuid': 'YYYYYYYY-YY',
'uuid': 'XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX'
}
})
|
[
"dincamihai@users.noreply.github.com"
] |
dincamihai@users.noreply.github.com
|
57b74c58c636552617a86ca776cdc6f914fe0984
|
0c5b84db768690c304e26552a765b277601f6393
|
/jim/config.py
|
d7344285fceac051dfbda5219282afc026ad7b88
|
[] |
no_license
|
Implexx/Messenger
|
09d43d4b3412734e23e1bad8ee74866ae9ee510f
|
75bf09331cc28eb88ee13ff1725ab87351d40d4d
|
refs/heads/master
| 2020-03-26T01:34:32.952377
| 2018-08-13T09:43:58
| 2018-08-13T09:43:58
| 144,373,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
"""Константы (ключи, действия, значения, коды ответов) и настройки"""
ENCODING = 'utf-8'
USERNAME_MAX_LENGTH = 25
MESSAGE_MAX_LENGTH = 500
# Ключи протокола (действия)
ACTION = 'action'
TIME = 'time'
USER = 'user'
ERROR = 'error'
ACCOUNT_NAME = 'account_name'
RESPONSE = 'response'
AUTH = 'authenticate'
USER_ID = 'user_id'
ALERT = 'alert'
QUANTITY = 'quantity'
REQUIRED_MESSAGE_KEYS = (ACTION, TIME)
REQUIRED_RESPONSE_KEYS = (RESPONSE,)
# Значения протокола
PRESENCE = 'presence'
MSG = 'msg'
QUIT = 'quit'
TO = 'to'
FROM = 'from'
MESSAGE = 'message'
GET_CONTACTS = 'get_contacts'
ADD_CONTACT = 'add_contact'
DEL_CONTACT = 'del_contact'
CONTACT_LIST = 'contact_list'
ACTIONS = (PRESENCE, MSG, GET_CONTACTS, DEL_CONTACT, ADD_CONTACT, CONTACT_LIST)
# Коды ответов сервера
BASIC_NOTICE = 100
OK = 200
ACCEPTED = 202
WRONG_REQUEST = 400 # неправильный запрос или джейсон обьект
SERVER_ERROR = 500 # ошибка на стороне сервера
RESPONSE_CODES = (BASIC_NOTICE, OK, ACCEPTED, WRONG_REQUEST, SERVER_ERROR)
|
[
"hurtmind@mail.ru"
] |
hurtmind@mail.ru
|
fcbba6b6aeedfe4d2b974334b93de13ca8113414
|
93634959a1873580b7b9f449f02b338bac9b4dab
|
/php-fpm/php-fpm.py
|
5f6d0a8e6407bc22cc2a632ebcadf68a94548442
|
[] |
no_license
|
saranshjain1/Datadog-Plugins
|
5b12638c1f3f8b77b91b633c49465748d369ba80
|
0d8f8645c07dfd103976024cdd16052ad5473269
|
refs/heads/master
| 2021-01-22T11:47:43.550438
| 2014-04-16T19:14:34
| 2014-04-16T19:14:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,579
|
py
|
#!/usr/bin/python2.6
import sys
import os
import subprocess
from checks import AgentCheck
class details(AgentCheck):
GAUGES = {
'start time':'php-fpm.start time',
'start since':'php-fpm.start since',
'accepted conn': 'php-fpm.connections',
'listen queue': 'php-fpm.listen queue',
'max listen queue': 'php-fpm.max listen queue',
'active processes': 'php-fpm.active processes',
'total processes': 'php-fpm.total processes',
'max active processes': 'php-fpm.max active processes',
'max children reached': 'php-fpm.max children reached',
}
def check(self,instance):
default_timeout = self.init_config.get('default_timeout', 5)
os.environ["SCRIPT_NAME"] = "/status"
os.environ["SCRIPT_FILENAME"] = "/status"
os.environ["REQUEST_METHOD"] = "GET"
req = subprocess.Popen(["cgi-fcgi", "-bind", "-connect", "127.0.0.1:9000"], stdout=subprocess.PIPE).communicate()[0]
metric_count = 0
line = req
for queue in line.split('\n'):
values = queue.split(': ')
if len(values) == 2:
metric, value = values
try:
value = float(value)
except ValueError:
continue
if metric in self.GAUGES:
metric_count +=1
check_fpm = self.GAUGES[metric]
self.gauge(check_fpm, value, tags=['check_php-fpm'])
if __name__ == '__main__':
check, instances = details.from_yaml('/etc/dd-agent/conf.d/php-fpm.yaml')
for instance in instances:
print "\nRunning the check"
check.check(instances)
print 'Metrics: %s' % (check.get_metrics())
|
[
"saransh@saransh.(none)"
] |
saransh@saransh.(none)
|
52b12b817990f295f9c1dc821dfbaaae87f185e5
|
46548aac69803470361503b005d64bb2ec8cb24c
|
/EulerProjects/project10.py
|
8c934240bbe2732541a74f4f99680c2e0e43f764
|
[] |
no_license
|
hoangnhancs/CS112.L11.KHTN
|
e425141203f2c09e7e45c0125b2241eb5c51598e
|
d1ac96ce5953d1a0b621b802a2bbe1c468ea07c1
|
refs/heads/master
| 2023-02-21T07:35:22.859211
| 2021-01-25T02:23:31
| 2021-01-25T02:23:31
| 294,288,315
| 4
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
def check (n, arr_prime):
for i in arr_prime:
if n%i==0:
return False
return True
arr_prime = [2]
sum =2
for i in range(3, 2000001):
if check(i, arr_prime)==True:
arr_prime.append(i)
sum+=i
print(sum)
#print(arr_prime)
# #print(check(3,arr_prime))
# print(check(9, arr_prime))
|
[
"thaihoangnhantk17lqd@gmail.com"
] |
thaihoangnhantk17lqd@gmail.com
|
2fc719b1fdba27a69f938d9dbb23e5487b3a8aaf
|
6c89cc53812a3d819c5f701b10a7b57343e4d47e
|
/config.py
|
77e679ee2a28a00973a53b6ed25c3777a73d947b
|
[] |
no_license
|
mmg1/DefaultLogExpress-DLE-
|
72f4f85c705a0f4028651701d84b01aa07d7a148
|
6ed93e366be7744b7b7e8f47293e3763aca1691e
|
refs/heads/master
| 2021-09-27T16:01:59.581145
| 2018-11-09T10:53:29
| 2018-11-09T10:53:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
# coding: utf-8
WARNING = "\033[31m[!] \033[0m"
FORBI = "\033[31m[x] \033[0m"
PLUS = "\033[32m[+] \033[0m"
INFO = "\033[34m[?] \033[0m"
LESS = "\033[33m[-] \033[0m"
LINE = "\033[34m=\033[0m" * 20
|
[
"noreply@github.com"
] |
mmg1.noreply@github.com
|
714e831e1d51a913ff27d8ffbcf275abb01e4997
|
cbfb679bd068a1153ed855f0db1a8b9e0d4bfd98
|
/leet/facebook/strings_arrays/498_diagonal_traverse.py
|
49810dc7b8f714f7e2fc3ac3da9c6b1fedb5b7de
|
[] |
no_license
|
arsamigullin/problem_solving_python
|
47715858a394ba9298e04c11f2fe7f5ec0ee443a
|
59f70dc4466e15df591ba285317e4a1fe808ed60
|
refs/heads/master
| 2023-03-04T01:13:51.280001
| 2023-02-27T18:20:56
| 2023-02-27T18:20:56
| 212,953,851
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,987
|
py
|
from typing import List
# diagonal length
class Solution:
def findDiagonalOrder(self, matrix: List[List[int]]) -> List[int]:
if not matrix:
return []
n = len(matrix)
m = len(matrix[0])
i, j, di, dj = 0, 0, 0, 1
direction = 0
res = [0] * (m * n)
l = 0
while l < m * n:
# this is diagonal length
cnt = min(j + 1, n - i)
x, y = i, j
rng = range(cnt) if direction == 1 else range(cnt)[::-1]
for k in rng:
res[l + k] = matrix[x][y]
x += 1
y -= 1
matrix[i][j] = None
# this is how we go over the top and right border
if matrix[(i + di) % n][(j + dj) % m] is None:
di, dj = dj, di
i += di
j += dj
direction ^= 1
l += cnt
return res
# this is solution with reverse
class Solution:
def findDiagonalOrder(self, matrix: List[List[int]]) -> List[int]:
if not matrix:
return []
n = len(matrix)
m = len(matrix[0])
length = n + m - 1
i = 0
j = 0
direction = 0
di, dj = 0, 1
res = []
l = 0
while l < length:
x, y = i, j
arr = []
while 0<=x<n and 0<=y<m:
arr.append(matrix[x][y])
x+=1
y-=1
if direction == 0:
arr = arr[::-1]
res.extend(arr)
matrix[i][j] = None
if matrix[(i + di) % n][(j + dj) % m] == None:
di, dj = dj, di
i += di
j += dj
direction ^= 1
l+=1
return res
if __name__ == '__main__':
s = Solution()
s.findDiagonalOrder([[2,5],[8,4],[0,-1]])
s.findDiagonalOrder([[3],[2]])
s.findDiagonalOrder([[2,3]])
s.findDiagonalOrder([[1,2,3],[4,5,6],[7,8,9]])
|
[
"ar.smglln@gmail.com"
] |
ar.smglln@gmail.com
|
665b1be081ab338d26b6231b4e8c856ff8a1b2dd
|
0faf534ebb6db6f32279e5bee25b968bd425ce3a
|
/tests/extension/thread_/axi_slave_lite/thread_axi_slave_lite.py
|
1bda522df6d89d386a8ef2f35d2313d7523cd9d5
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
PyHDI/veriloggen
|
e8647cb2d40737d84e31d6b89c5799bab9cbd583
|
f2b1b9567150af097eed1b5e79ba2b412854ef43
|
refs/heads/develop
| 2023-08-09T10:02:35.626403
| 2023-08-09T00:50:14
| 2023-08-09T00:50:14
| 37,813,184
| 282
| 60
|
Apache-2.0
| 2023-07-20T03:03:29
| 2015-06-21T15:05:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,495
|
py
|
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
import veriloggen.thread as vthread
import veriloggen.types.axi as axi
def mkLed():
m = Module('blinkled')
clk = m.Input('CLK')
rst = m.Input('RST')
datawidth = 32
saxi = vthread.AXISLiteRegister(m, 'saxi', clk, rst, datawidth, length=4)
def blink():
while True:
saxi.wait_flag(0, value=1, resetvalue=0)
saxi.write(1, 1) # set busy
size = saxi.read(2)
sum = 0
for i in range(size):
sum += i
saxi.write(3, sum)
saxi.write(1, 0) # unset busy
vthread.finish()
th = vthread.Thread(m, 'th_blink', clk, rst, blink)
fsm = th.start()
return m
def mkTest(memimg_name=None):
m = Module('test')
# target instance
led = mkLed()
# copy paras and ports
params = m.copy_params(led)
ports = m.copy_sim_ports(led)
clk = ports['CLK']
rst = ports['RST']
# memory = axi.AxiMemoryModel(m, 'memory', clk, rst, memimg_name=memimg_name)
# memory.connect(ports, 'myaxi')
# AXI-Slave controller
_saxi = vthread.AXIMLiteVerify(m, '_saxi', clk, rst, noio=True)
_saxi.connect(ports, 'saxi')
k = 100
expected_sum = 0
for i in range(k):
expected_sum += i
def ctrl():
for i in range(100):
pass
# size
awaddr = 8
_saxi.write_delayed(awaddr, k, 10)
# start
awaddr = 0
_saxi.write_delayed(awaddr, 1, 10)
for _ in range(10):
pass
# busy check
araddr = 4
v = _saxi.read_delayed(araddr, 10)
while v != 0:
v = _saxi.read_delayed(araddr, 10)
# result
araddr = 12
v = _saxi.read_delayed(araddr, 10)
print('result = %d, expected = %d' % (v, expected_sum))
if v == expected_sum:
print('# verify: PASSED')
else:
print('# verify: FAILED')
vthread.finish()
th = vthread.Thread(m, 'th_ctrl', clk, rst, ctrl)
fsm = th.start()
uut = m.Instance(led, 'uut',
params=m.connect_params(led),
ports=m.connect_ports(led))
# vcd_name = os.path.splitext(os.path.basename(__file__))[0] + '.vcd'
# simulation.setup_waveform(m, uut, dumpfile=vcd_name)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, m.make_reset(), period=100)
init.add(
Delay(1000000),
Systask('finish'),
)
return m
def run(filename='tmp.v', simtype='iverilog', outputfile=None):
if outputfile is None:
outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'
memimg_name = 'memimg_' + outputfile
if outputfile is None:
outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'
memimg_name = 'memimg_' + outputfile
test = mkTest(memimg_name=memimg_name)
if filename is not None:
test.to_verilog(filename)
sim = simulation.Simulator(test, sim=simtype)
rslt = sim.run(outputfile=outputfile)
return rslt
if __name__ == '__main__':
rslt = run(filename='tmp.v')
print(rslt)
|
[
"shta.ky1018@gmail.com"
] |
shta.ky1018@gmail.com
|
e3326e19b9640eea1e9bbfd8cdc74c97f007cbc8
|
6c137e70bb6b1b618fbbceddaeb74416d387520f
|
/spyre/testing/onthefly.py
|
7516351719b40b94613f133f92eb3c6cd36a4dbb
|
[
"BSD-2-Clause"
] |
permissive
|
zhong-lab/code
|
fe497c75662f8c3b7ab3c01e7e351bff6d5e8d15
|
b810362e06b44387f0768353c602ec5d29b551a2
|
refs/heads/master
| 2023-01-28T09:46:01.448833
| 2022-06-12T22:53:47
| 2022-06-12T22:53:47
| 184,670,765
| 2
| 7
|
BSD-2-Clause
| 2022-12-08T21:46:15
| 2019-05-02T23:37:39
|
Python
|
UTF-8
|
Python
| false
| false
| 321
|
py
|
# Device List
devices = {
# 'fungen':[
# 'lantz.drivers.keysight.Keysight_33622A.Keysight_33622A',
# ['USB0::0x0957::0x5707::MY53801461::INSTR'],
# {}
# ]
}
# Experiment List
spyrelets = {
'rabi':[
'spyre.spyrelets.onthefly.OnTheFlySpyrelet',
{},
{}
],
}
|
[
"none"
] |
none
|
46778ad365c9d953b71ac7459414e523c97ebb4b
|
f7485ca051de935b81a249d89cbf6f463e75ba15
|
/apps/user/migrations/0001_initial.py
|
7e4830c3bad4db83b80c6cced4d4fd7acee4bfc6
|
[] |
no_license
|
HJK17/alloygame
|
cf47446bc1581476bddbbb6a85f31aec358a3a3a
|
41cc0a5dba0c62b39efeebe1da42d672678e703c
|
refs/heads/main
| 2023-08-20T23:09:09.262069
| 2021-10-06T07:18:19
| 2021-10-06T07:18:19
| 403,262,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,639
|
py
|
# Generated by Django 3.2.1 on 2021-07-28 07:47
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除基类')),
('image', models.ImageField(upload_to='image', verbose_name='头像')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
'db_table': 'ag_user',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Address',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除基类')),
('receiver', models.CharField(max_length=20, verbose_name='收件人')),
('addr', models.CharField(max_length=256, verbose_name='收件地址')),
('zip_code', models.CharField(max_length=6, null=True, verbose_name='邮政编码')),
('phone', models.CharField(max_length=11, verbose_name='联系电话')),
('is_default', models.BooleanField(default=False, verbose_name='是否默认')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='所属账户')),
],
options={
'verbose_name': '地址',
'verbose_name_plural': '地址',
'db_table': 'ag_address',
},
),
]
|
[
"1334535487@qq.com"
] |
1334535487@qq.com
|
037f5e2109dad9ac971742c44a53bf6923ae4d93
|
61c4c220a70a18d179ed77a6b02914045945a85d
|
/knock100/knock38.py
|
ce216d3bfbfca2c9b356f33a56ec73b45b608ce3
|
[] |
no_license
|
hoka-sp/NLP100
|
5839adedc1b8d522d93859cdad07319fc8967679
|
b206c4eb2a43e268fc68fd7b54bf12132389141c
|
refs/heads/master
| 2023-07-13T15:19:44.076759
| 2021-08-09T15:06:47
| 2021-08-09T15:06:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
import sys
import knock30
from collections import defaultdict
import matplotlib.pyplot as plt
def extract_words(block):
return [b['base'] + '_' + b['pos'] + '_' + b['pos1'] for b in block]
def main():
args = sys.argv
args.append('ch04/neko.txt.mecab')
phrase_list = [knock30.parse_mecab(phrase)
for phrase in knock30.make_phrase_list(args[1])]
words = [extract_words(brock) for brock in phrase_list]
print(words)
d = defaultdict(int)
for word in words:
for tag in word:
d[tag] += 1
ans = d.values()
plt.figure(figsize=(8, 8))
plt.hist(ans, bins=100)
plt.savefig('ch04/graph38.png')
if __name__ == '__main__':
main()
|
[
"64342567+hoka-sp@users.noreply.github.com"
] |
64342567+hoka-sp@users.noreply.github.com
|
8d067df9baee38f186b6ab7885a23f86661b7c9f
|
eb712c6945b4eb188da204c6c27b725c99c98c8d
|
/Simple Right Angle Pattern.py
|
092b556c2f2b0934d6961559cba0cbab627b2da2
|
[] |
no_license
|
learncodesdaily/PP-Pattern
|
5d38ea495598e315878577303392547ee2367ec0
|
38820c60ff0f0f93cc6e7f9a003b79c3470ee53f
|
refs/heads/master
| 2022-06-11T08:44:59.456938
| 2020-05-10T16:51:21
| 2020-05-10T16:51:21
| 262,827,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
def alphabetPattern(n):
for i in range(1,n+1):
for j in range(65, 65 + i):
a = chr(j)
print(a, end=" ")
print("\r")
n = int(input("Enter Pattern Size : "))
alphabetPattern(n)
|
[
"noreply@github.com"
] |
learncodesdaily.noreply@github.com
|
711cc710fa9d0f60acbbd8cb2631d8c17a09353d
|
d113ee244b1e140724a983d0ef50c05ea8570de3
|
/lectures/week-eight/lights.py
|
bc6bedc09296a655f99984bf4fd9cd5543f0d7a7
|
[] |
no_license
|
HumnResources/CS50
|
06136552a65f48adbe7874952d5f0320eea61efe
|
050111878bc2725218a013d1f1c38e88be052875
|
refs/heads/master
| 2023-06-04T08:52:48.702331
| 2021-06-24T18:08:57
| 2021-06-24T18:08:57
| 374,654,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
import os
import requests
USERNAME = os.getenv("USERNAME")
IP = os.getenv("IP")
URL = f"http://{IP}/api/{USERNAME}/lights/1/state"
# put adds information to server, using a dict with key 'on' value 'false'
requests.put(URL, json={"on": False})
while True:
requests.put(URL, json={"bri": 254, "on": True}
|
[
"ziskachase@gmail.com"
] |
ziskachase@gmail.com
|
c9d4301742de47bbc0430217c6f470d4ba6ba0ad
|
a8ec52196f76071abe0ed1bae05f87c6abb67be8
|
/chainer/utils/conv_nd.py
|
97efa4af62bc8716fe3075162f0f52259c1b1cf8
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
fgnt/chainer
|
92496c551b83fdc5322689113a9f6a647b038bed
|
ad2a51c08981c75f285a4395f283be8bb9b5d301
|
refs/heads/master
| 2021-01-18T11:54:28.620452
| 2016-08-22T07:54:45
| 2016-08-22T07:54:45
| 66,252,118
| 0
| 1
| null | 2016-08-22T08:05:47
| 2016-08-22T08:05:45
| null |
UTF-8
|
Python
| false
| false
| 4,475
|
py
|
import itertools
import numpy
import six
from chainer import cuda
from chainer.utils.conv import get_conv_outsize
from chainer.utils import conv_nd_kernel
def as_tuple(x, n):
if hasattr(x, '__getitem__'):
assert len(x) == n
return tuple(x)
return (x,) * n
def im2col_nd_cpu(img, ksize, stride, pad, pval=0, cover_all=False):
n, c = img.shape[0:2] # (n, c, d_1, d_2, ..., d_N)
dims = img.shape[2:]
ndim = len(dims)
assert ndim == len(ksize) == len(stride) == len(pad)
outs = tuple(get_conv_outsize(d, k, s, p, cover_all)
for (d, k, s, p) in zip(dims, ksize, stride, pad))
# Pad around image.
pad_width = ((0, 0), (0, 0)) + tuple(
(p, p + s - 1) for (s, p) in zip(stride, pad))
img = numpy.pad(img, pad_width, mode='constant', constant_values=(pval,))
# Make patch array with which we will compute correlation with filter.
# shape: (n, c, k_1, k_2, ..., k_N, out_1, out_2, ..., out_N)
shape = (n, c) + ksize + outs
col = numpy.ndarray(shape, dtype=img.dtype)
# Fill the patch array.
colon = slice(None)
for kxs in itertools.product(*[six.moves.range(k) for k in ksize]):
# col[:, :, kx_1, kx_2, ..., kx_N, :, :, ..., :]
col_index = (colon, colon) + kxs + (colon,) * ndim
# img[:, :, kx_1:kx_lim_1:s_1, ..., kx_N:kx_lim_N:s_N]
kx_lims = tuple(kx + s * out
for (kx, s, out) in zip(kxs, stride, outs))
img_index = (colon, colon) + tuple(
slice(kx, kx_lim, s)
for (kx, kx_lim, s) in zip(kxs, kx_lims, stride))
col[col_index] = img[img_index]
return col
def im2col_nd_gpu(img, ksize, stride, pad, cover_all=False):
n, c = img.shape[0:2] # (n, c, d_1, d_2, ..., d_N)
dims = img.shape[2:]
ndim = len(dims)
assert ndim == len(ksize) == len(stride) == len(pad)
outs = tuple(get_conv_outsize(d, k, s, p, cover_all)
for (d, k, s, p) in zip(dims, ksize, stride, pad))
# col_shape: (n, c, k_1, k_2, ..., k_N, out_1, out_2, ..., out_N)
shape = (n, c) + ksize + outs
col = cuda.cupy.empty(shape, dtype=img.dtype)
in_params, out_params, operation, name = \
conv_nd_kernel.Im2colNDKernel.generate(ndim)
cuda.elementwise(in_params, out_params, operation, name)(
img.reduced_view(), *(dims + outs + ksize + stride + pad + (col,)))
return col
def col2im_nd_cpu(col, stride, pad, dims):
# Assured consistency of dimensions of parameters by caller.
n, c = col.shape[:2] # (n, c, kx_1, ..., kx_N, out_1, ..., out_N)
mid = (len(col.shape) - 2) // 2 + 2
ksize = col.shape[2:mid]
outs = col.shape[mid:]
colon = slice(None)
assert len(outs) == len(ksize) == len(stride) == len(pad) == len(dims)
# Image with padded size.
img_shape = (n, c) + tuple(d + 2 * p + s - 1
for (d, p, s) in zip(dims, pad, stride))
img = numpy.zeros(img_shape, dtype=col.dtype)
for kxs in itertools.product(*[six.moves.range(k) for k in ksize]):
# (:, :, kx_1:kx_lim_1:s_1, ..., kx_N:kx_lim_N:s_N)
kx_lims = tuple(kx + s * out
for (kx, s, out) in zip(kxs, stride, outs))
img_index = (colon, colon) + tuple(
slice(kx, kx_lim, s)
for (kx, kx_lim, s) in zip(kxs, kx_lims, stride))
# (:, :, kx_1, kx_2, ..., kx_N, :, :, ..., :)
col_index = (colon, colon) + kxs + (colon,) * len(outs)
img[img_index] += col[col_index]
# (:, :, p_1:d_1 + p_1, p_2:d_2 + p_2, ..., p_N:d_N + p_N]
img_index = (colon, colon) + tuple(
slice(p, d + p) for (p, d) in zip(pad, dims))
return img[img_index]
def col2im_nd_gpu(col, stride, pad, dims):
# Assured consistency of dimensions of parameters by caller.
n, c = col.shape[:2] # (n, c, k_1, ..., k_N, out_1, ..., out_N)
mid = (len(col.shape) - 2) // 2 + 2
ksize = col.shape[2:mid]
outs = col.shape[mid:]
ndim = len(dims)
assert len(outs) == len(ksize) == len(stride) == len(pad) == ndim
img_shape = (n, c) + dims # (n, c, d_1, d_2, ..., d_N)
img = cuda.cupy.empty(img_shape, dtype=col.dtype)
in_params, out_params, operation, name = \
conv_nd_kernel.Col2imNDKernel.generate(ndim)
cuda.elementwise(in_params, out_params, operation, name)(
col.reduced_view(), *(dims + outs + ksize + stride + pad + (img,)))
return img
|
[
"kamonama@gmail.com"
] |
kamonama@gmail.com
|
f4ff7b6a3d6fc1fc1ae39399a49474d8d349ada7
|
05f6f98f0b2efeb9578b19015a80121ae1906800
|
/backup/cam.py
|
0756abd07f7c5289c15316637fbb0ccf6cb839f9
|
[] |
no_license
|
anumanu/Augmented-wear
|
494831180eff55560b97bd8eca12137427207521
|
035e1c6cc5c25d1a9b320aa26ff4081aa6225fcd
|
refs/heads/master
| 2020-06-01T23:31:25.870969
| 2019-06-09T05:02:27
| 2019-06-09T05:02:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,564
|
py
|
# python cam.py --filter HSV --webcam
import cv2
import argparse
import numpy as np
import pyautogui
(screen_width,screen_height) = pyautogui.size()
def callback(value):
pass
def main():
range_filter = 'HSV'
camera = cv2.VideoCapture(0)
while True:
ret, image = camera.read()
image = cv2.flip(image, 1)
(height, width) = image.shape[:2]
frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
v1_min, v2_min, v3_min, v1_max, v2_max, v3_max = [0, 84, 136, 38, 255, 255]
thresh = cv2.inRange(frame_to_thresh, (v1_min, v2_min, v3_min), (v1_max, v2_max, v3_max))
kernel = np.ones((5,5),np.uint8)
mask = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(image, (int(x), int(y)), int(radius),(0, 255, 255), 2)
#pyautogui.moveTo(int(x)*(screen_width/width), int(y)*(screen_height/height))
pyautogui.dragTo(int(x)*(screen_width/width), int(y)*(screen_height/height),.3, button='left')
cv2.circle(image, center, 3, (0, 0, 255), -1)
cv2.putText(image,"centroid", (center[0]+10,center[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.4,(0, 0, 255),1)
cv2.putText(image,"("+str(center[0])+","+str(center[1])+")", (center[0]+10,center[1]+15), cv2.FONT_HERSHEY_SIMPLEX, 0.4,(0, 0, 255),1)
# show the frame to our screen
cv2.imshow("Original", image)
#cv2.imshow("Thresh", thresh)
#cv2.imshow("Mask", mask)
if cv2.waitKey(1) & 0xFF is ord('q'):
break
if __name__ == '__main__':
main()
|
[
"athuldevin@gmail.com"
] |
athuldevin@gmail.com
|
143282840eae55d35e061325759c7e9a140a4f57
|
f56e4bb2d3a91b068292d698388ac5e82a40f078
|
/inkshop/apps/products/migrations/0011_remove_productday_name.py
|
0c27b59784126d3c5812ca071d7fec5a926e7134
|
[] |
no_license
|
inkandfeet/inkshop
|
979064eb902c86dc95a6399e79ac753efbe547d1
|
691187b3eb4435782f8054e6404f1203e7d0c383
|
refs/heads/master
| 2022-12-13T01:26:02.361970
| 2021-11-18T23:01:50
| 2021-11-18T23:01:50
| 175,481,726
| 1
| 1
| null | 2022-12-08T04:59:16
| 2019-03-13T18:59:17
|
Python
|
UTF-8
|
Python
| false
| false
| 329
|
py
|
# Generated by Django 2.2 on 2020-08-17 00:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0010_productday_product'),
]
operations = [
migrations.RemoveField(
model_name='productday',
name='name',
),
]
|
[
"steven@inkandfeet.com"
] |
steven@inkandfeet.com
|
2dde96911891398f787d917e459942675023cc4d
|
6bf6fc3f8634d386dac5b4cc9df9f78beb8d22e2
|
/simple_server/simple_server.py
|
25f499f03c87f649d22a4f782c550ebb4ecf2358
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
loxodromic/pwap8
|
03ff2aa9a9e8752c64ac34e0002129c0a842aa31
|
fda3266c64f1d1c925fc54bacaf2f26e06aa84a6
|
refs/heads/master
| 2020-12-19T07:08:59.647615
| 2020-01-23T23:25:59
| 2020-01-23T23:25:59
| 235,658,878
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
import http.server
import socketserver
#from https://docs.python.org/3/library/http.server.html
PORT = 8000
handler = http.server.SimpleHTTPRequestHandler
handler.extensions_map = {
'.manifest': 'text/cache-manifest',
'.html': 'text/html',
'.png': 'image/png',
'.jpg': 'image/jpg',
'.css': 'text/css',
'.js': 'application/x-javascript',
'': 'application/octet-stream',
}
with socketserver.TCPServer(("", PORT), handler) as httpd:
print("serving at port", PORT)
httpd.serve_forever()
|
[
"matt@example.com"
] |
matt@example.com
|
504ad01ebe00c01f4db6cf21d97386a7a4e93d43
|
2f1d93a17565a2fa6c07799ddefba1bc64776660
|
/dual_transf.py
|
570ae28e9746504ce0ff8d8e371ee142decc3ae5
|
[] |
no_license
|
ishine/TSTNN
|
b6f40680a57f1208b805f340554613cccb370ac5
|
3a1dac4968bdb45d959985c535ab359f3f3cc4ce
|
refs/heads/master
| 2023-02-28T01:32:24.849369
| 2021-02-06T04:44:20
| 2021-02-06T04:44:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,541
|
py
|
import torch.nn as nn
import torch
import numpy as np
from single_trans import TransformerEncoderLayer
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1'
class Dual_Transformer(nn.Module):
"""
Deep duaL-path RNN.
args:
rnn_type: string, select from 'RNN', 'LSTM' and 'GRU'.
input_size: int, dimension of the input feature. The input should have shape
(batch, seq_len, input_size).
hidden_size: int, dimension of the hidden state.
output_size: int, dimension of the output size.
dropout: float, dropout ratio. Default is 0.
num_layers: int, number of stacked RNN layers. Default is 1.
bidirectional: bool, whether the RNN layers are bidirectional. Default is False.
"""
def __init__(self, input_size, output_size, dropout=0, num_layers=1):
super(Dual_Transformer, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.input = nn.Sequential(
nn.Conv2d(input_size, input_size // 2, kernel_size=1),
nn.PReLU()
)
# dual-path RNN
self.row_trans = nn.ModuleList([])
self.col_trans = nn.ModuleList([])
self.row_norm = nn.ModuleList([])
self.col_norm = nn.ModuleList([])
for i in range(num_layers):
self.row_trans.append(TransformerEncoderLayer(d_model=input_size//2, nhead=4, dropout=dropout, bidirectional=True))
self.col_trans.append(TransformerEncoderLayer(d_model=input_size//2, nhead=4, dropout=dropout, bidirectional=True))
self.row_norm.append(nn.GroupNorm(1, input_size//2, eps=1e-8))
self.col_norm.append(nn.GroupNorm(1, input_size//2, eps=1e-8))
# output layer
self.output = nn.Sequential(nn.PReLU(),
nn.Conv2d(input_size//2, output_size, 1)
)
def forward(self, input):
# input --- [b, c, num_frames, frame_size] --- [b, c, dim2, dim1]
b, c, dim2, dim1 = input.shape
output = self.input(input)
for i in range(len(self.row_trans)):
row_input = output.permute(3, 0, 2, 1).contiguous().view(dim1, b*dim2, -1) # [dim1, b*dim2, c]
row_output = self.row_trans[i](row_input) # [dim1, b*dim2, c]
row_output = row_output.view(dim1, b, dim2, -1).permute(1, 3, 2, 0).contiguous() # [b, c, dim2, dim1]
row_output = self.row_norm[i](row_output) # [b, c, dim2, dim1]
output = output + row_output # [b, c, dim2, dim1]
col_input = output.permute(2, 0, 3, 1).contiguous().view(dim2, b*dim1, -1) # [dim2, b*dim1, c]
col_output = self.col_trans[i](col_input) # [dim2, b*dim1, c]
col_output = col_output.view(dim2, b, dim1, -1).permute(1, 3, 0, 2).contiguous() # [b, c, dim2, dim1]
col_output = self.col_norm[i](col_output) # [b, c, dim2, dim1]
output = output + col_output # [b, c, dim2, dim1]
del row_input, row_output, col_input, col_output
output = self.output(output) # [b, c, dim2, dim1]
return output
'''
trans = Dual_Transformer(64, 64, num_layers=4)
trans = torch.nn.DataParallel(trans)
trans = trans.cuda()
src = torch.rand(2, 64, 250, 8)
out = trans(src)
print(out.shape)
def numParams(net):
num = 0
for param in net.parameters():
if param.requires_grad:
num += int(np.prod(param.size()))
return num
print(numParams(trans))
'''
|
[
"51517793+key2miao@users.noreply.github.com"
] |
51517793+key2miao@users.noreply.github.com
|
af63f50c7e181b66f8bc77cdc57720f68b212bcd
|
ddf896fb5487228d1f8d56f19d9e69425554b2aa
|
/main/exceptions.py
|
ba5cd92ed3fb073931941911a06c505e800eaf33
|
[] |
no_license
|
quinn-lee/novalinks
|
caf057b60d721cecb92b526bde1647e5db7e658c
|
8bb45cdaff6bde61fe00e41924109fb48c36cbd5
|
refs/heads/main
| 2023-08-25T15:30:49.049926
| 2021-10-28T12:06:27
| 2021-10-28T12:06:27
| 352,111,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
# coding:utf-8
# 自定义异常类
class ValidationException(Exception):
def __init__(self, code, msg):
super().__init__(self) # 初始化父类
self.code = code
self.msg = msg
def __str__(self):
return "{} {}".format(self.code, self.msg)
|
[
"lifuyuan33@gmail.com"
] |
lifuyuan33@gmail.com
|
c11a20375fcb8269589c69d7a42577b6ff5b69a1
|
4a36ce842a0cdbad127f8e4df245b49754154128
|
/simple_mod_installer/conf/migrator.py
|
908d18202634eef055817e0dfc25aa94b1477eb0
|
[] |
no_license
|
tfinlay/simple-mod-installer
|
4bdedb1bcfc1d000be1d0d1da778de58e372107c
|
7d8de09e7a1cd3a7b5102bc6d6d62f677889da7f
|
refs/heads/master
| 2020-03-30T21:51:48.617068
| 2018-10-04T23:06:55
| 2018-10-04T23:06:55
| 151,644,820
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 972
|
py
|
"""
These functions migrate from the key version up to the config version specified by the function's return value.
Each function takes one parameter: an absolute path to the config file
"""
import json
from simple_mod_installer.util import join_path
import logging
logger = logging.getLogger(__name__)
def update_1_0_to_1_1(config_file_path):
# type: (str) -> str
print("upgrading from version 1.0 to 1.1...")
logging.debug("upgrading from version 1.0 to 1.1...")
with open(config_file_path, 'r') as f:
config = json.load(f)
# add new values (set to default)
config["database_path"] = join_path(config["application_root"], "moddata.sqlite")
config["webserver_port"] = 4000
# remove unneeded values
# update modified values
config["version"] = "1.1"
# write out again
with open(config_file_path, 'w') as f:
json.dump(config, f)
return "1.1"
CONFIG_UPDATERS = {
"1.0": update_1_0_to_1_1,
}
|
[
"12890179+tfinlay@users.noreply.github.com"
] |
12890179+tfinlay@users.noreply.github.com
|
b20b4c24dcadb4f67bba5b69fdbe6e2fb914ae22
|
46e007e62359e7ed3ce118decb63e1b6f8692a83
|
/music/forms.py
|
a2dbf3ecb6a33fdb6c811dc67e873e8ab393a8aa
|
[] |
no_license
|
Kiran-sz/Gana
|
5762513329f785a9f907476a5bcf4c0895c8b020
|
b8dad4b05cbd5af414ce1d58cfe6653f0e1954f8
|
refs/heads/master
| 2020-04-27T15:55:49.869757
| 2019-03-17T15:40:38
| 2019-03-17T15:40:38
| 174,397,787
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
from django import forms
from django.contrib.auth.models import User
from .models import Album, Song
class AlbumForm(forms.ModelForm):
class Meta:
model = Album
fields = ['artist', 'album_title', 'genre', 'album_logo']
|
[
"kiranzond9@gmail.com"
] |
kiranzond9@gmail.com
|
a6ac7091cdb000d88943dc783eb219899f67e8eb
|
45b91235051a6bfff0fa2acb6311abf41176ef90
|
/hangman.py
|
abc99282ae4ca22de155bfdac34cc6b84164a795
|
[] |
no_license
|
dereyurtali/Hangman
|
a359ec8e0a66fcdc5d3154653a85ad43ee2b78e2
|
dce0c0c9bd23214cfc4bafbddf3db27ea7bdc2cf
|
refs/heads/master
| 2023-04-02T07:44:20.027370
| 2021-04-09T08:56:42
| 2021-04-09T08:56:42
| 354,659,133
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,526
|
py
|
# Problem Set 2, hangman.py
# Name:
# Collaborators:
# Time spent:
# Hangman Game
# -----------------------------------
# Helper code
# You don't need to understand this helper code,
# but you will have to know how to use the functions
# (so be sure to read the docstrings!)
import random
import string
WORDLIST_FILENAME = "words.txt"
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = line.split()
print(" ", len(wordlist), "words loaded.")
return wordlist
def choose_word(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
# end of helper code
# -----------------------------------
# Load the list of words into the variable wordlist
# so that it can be accessed from anywhere in the program
wordlist = load_words()
def is_word_guessed(secret_word, letters_guessed):
counter = 0
is_guessed = False
for letter in secret_word:
for letter_guessed in letters_guessed:
if letter == letter_guessed:
counter+=1
if counter == len(secret_word):
is_guessed = True
return is_guessed
def get_guessed_word(secret_word, letters_guessed):
letter_bool = False
return_word = ""
for letter in secret_word:
for letter_guessed in letters_guessed:
if not letter_bool:
if letter == letter_guessed:
letter_bool = True
else:
letter_bool = False
if letter_bool:
return_word = return_word + letter
else:
return_word = return_word + "_"
return return_word
def get_available_letters(letters_guessed):
available_letters = []
check = False
for alphabet_letter in string.ascii_lowercase:
for guessed_letter in letters_guessed:
if alphabet_letter == guessed_letter:
check = True
if not check:
available_letters.append(alphabet_letter)
return available_letters
def hangman(secret_word):
secret_word = "apple"
guesses_remaining = 6
# The game starts.
print("Welcome the game Hangman!")
print("--Made by Ali Dereyurt--")
print("------------------------")
print("The secret word has " + str(len(secret_word)) + " letters.")
print("You have " + str(guesses_remaining) + " guesses. Good luck!")
input_letter = input("Please guess a letter: ")
print(string.ascii_lowercase)
def match_with_gaps(my_word, other_word):
'''
my_word: string with _ characters, current guess of secret word
other_word: string, regular English word
returns: boolean, True if all the actual letters of my_word match the
corresponding letters of other_word, or the letter is the special symbol
_ , and my_word and other_word are of the same length;
False otherwise:
'''
# FILL IN YOUR CODE HERE AND DELETE "pass"
pass
def show_possible_matches(my_word):
'''
my_word: string with _ characters, current guess of secret word
returns: nothing, but should print out every word in wordlist that matches my_word
Keep in mind that in hangman when a letter is guessed, all the positions
at which that letter occurs in the secret word are revealed.
Therefore, the hidden letter(_ ) cannot be one of the letters in the word
that has already been revealed.
'''
# FILL IN YOUR CODE HERE AND DELETE "pass"
pass
def hangman_with_hints(secret_word):
'''
secret_word: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secret_word contains and how many guesses s/he starts with.
* The user should start with 6 guesses
* Before each round, you should display to the user how many guesses
s/he has left and the letters that the user has not yet guessed.
* Ask the user to supply one guess per round. Make sure to check that the user guesses a letter
* The user should receive feedback immediately after each guess
about whether their guess appears in the computer's word.
* After each guess, you should display to the user the
partially guessed word so far.
* If the guess is the symbol *, print out all words in wordlist that
matches the current guessed word.
Follows the other limitations detailed in the problem write-up.
'''
# FILL IN YOUR CODE HERE AND DELETE "pass"
pass
# When you've completed your hangman_with_hint function, comment the two similar
# lines above that were used to run the hangman function, and then uncomment
# these two lines and run this file to test!
# Hint: You might want to pick your own secret_word while you're testing.
if __name__ == "__main__":
pass
# To test step 2, comment out the pass line above and
# uncomment the following two lines.
# secret_word = choose_word(wordlist)
# hangman(secret_word)
###############
# To test part 3 re-comment out the above lines and
# uncomment the following two lines.
#secret_word = choose_word(wordlist)
#hangman_with_hints(secret_word)
|
[
"ali.dereyurt@stu.fsm.edu.tr"
] |
ali.dereyurt@stu.fsm.edu.tr
|
d67a175df19a408da2c199f5b0086646347b62b7
|
3319aeddfb292f8ab2602840bf0c1e0c2e5927be
|
/python/fill_mem.py
|
7fffd16b155e85c92196cf16753d72a0476bebc9
|
[] |
no_license
|
slaash/scripts
|
4cc3eeab37f55d822b59210b8957295596256936
|
482fb710c9e9bcac050384fb5f651baf3c717dac
|
refs/heads/master
| 2023-07-09T12:04:44.696222
| 2023-07-08T12:23:54
| 2023-07-08T12:23:54
| 983,247
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 664
|
py
|
#!/usr/bin/python
import sys,os,random,math
def gen_list(m):
l=list()
for n in range(0,m-1):
try:
l.append(random.randint(0,m))
except MemoryError,err:
print('Out of memory? ',err)
# if (n%int(math.sqrt(m))==0):
# sys.stdout.write('.')
# sys.stdout.flush()
return l
random.seed()
d=dict()
max=10
if (len(sys.argv[1:])>=1):
max=int(sys.argv[1])
l=[0]*max
for i in range(0,max-1):
try:
d[i]=gen_list(max)
except MemoryError,err:
print('Out of memory? ',err)
if (i%int(math.sqrt(max))==0):
sys.stdout.write('+')
sys.stdout.flush()
print("\nDone: %i x %i" % (i+1,max))
os.system('free -m')
print('Press key...')
raw_input()
|
[
"rmoisa@yahoo.com"
] |
rmoisa@yahoo.com
|
8764406ec291bb88670cfa66bd83c286d9b5f3e3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02583/s498399445.py
|
de6b4c39cce18ecbe13a7eedbe9a880f7066f64d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
n = int(input())
l = sorted(map(int,input().split()))
ans = 0
for a in range(n):
for b in range(a+1,n):
for c in range(b+1,n):
if l[a] != l[b] != l[c] and l[a]+l[b] > l[c]:
ans += 1
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
ff8346ce731f72d914f8d0a33d4da1b5cba310f1
|
289646aef1eb3e73fa657d62b77d8664b721a2a2
|
/controllers/controllers.py
|
d5ad470aa76fbdf9f9e0b6b177f0c14c8a75ce85
|
[] |
no_license
|
xmarts/axolot_tracking_beta
|
8a198fe8612b618a7effb29031418675433ca354
|
494780a50da3c470275d2d02a9f286870800aa97
|
refs/heads/master
| 2023-03-19T11:54:57.861380
| 2021-03-11T13:56:49
| 2021-03-11T13:56:49
| 344,592,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
# -*- coding: utf-8 -*-
# from odoo import http
# class MyModule4(http.Controller):
# @http.route('/my_module4/my_module4/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/my_module4/my_module4/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('my_module4.listing', {
# 'root': '/my_module4/my_module4',
# 'objects': http.request.env['my_module4.my_module4'].search([]),
# })
# @http.route('/my_module4/my_module4/objects/<model("my_module4.my_module4"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('my_module4.object', {
# 'object': obj
# })
|
[
"jesusalvarezxmarts@gmail.com"
] |
jesusalvarezxmarts@gmail.com
|
d968d99703c58fc9574ae881f38e3721e6f99fb4
|
741cd58673a025f7ecede5b1f53fc8435501c690
|
/Products_app/admin.py
|
1ccfca2afe910e6a2b9d9640ab386d4099a0d35f
|
[] |
no_license
|
icepablo/store
|
63f71145de0c30c74147f57978378eca025bd9a5
|
838f3493109056ea35f2066a2e412e887f3f09da
|
refs/heads/master
| 2020-03-24T00:33:46.738030
| 2018-09-25T12:34:12
| 2018-09-25T12:34:12
| 142,296,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
from django.contrib import admin
# Register your models here.
from .models import Product,Category
admin.site.register(Product)
admin.site.register(Category)
|
[
"nontherlight@gmail.com"
] |
nontherlight@gmail.com
|
65b07e72f6b95b846c09908e68d9a63ad7c350f0
|
0157dc1de36498038514fc41f7a49ed0edb7abb6
|
/game.py
|
ff0303ae66d829357b3694d28936835a9c4cf5c9
|
[] |
no_license
|
sistemd/game-in-python
|
d790b25b5fbb2125af932745e113082064f238da
|
ec2a3619976c0af7b90f77fcb4dd924d6b71f659
|
refs/heads/master
| 2023-04-13T04:05:31.187950
| 2018-05-05T19:37:23
| 2018-05-05T19:37:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,772
|
py
|
from typing import Callable, Optional, List, Iterable
import enum
import abc
import sdl
import utils
GRAVITY = 1000
# Redundant type aliases?
Checkbox = sdl.Rectangle
Checkboxes = Iterable[Checkbox]
MainLoopCallback = Callable[[utils.Seconds], None]
def main_loop(cb: MainLoopCallback, fps: int) -> None:
end = start = utils.current_time()
while not sdl.quit_requested():
t = utils.current_time()
delta = utils.Seconds(t - start)
if delta < 1/fps:
continue
cb(delta)
start = end
end = t
class Sprite(abc.ABC):
@abc.abstractmethod
def render(self,
renderer: sdl.Renderer,
position: complex,
flip: Optional[sdl.Flip]=None) -> None:
pass
@property
@abc.abstractmethod
def dimensions(self) -> sdl.Dimensions:
pass
def update(self) -> None:
pass
class Animation(Sprite):
def __init__(self,
sprite_sheet: sdl.Texture,
frames: List[sdl.Rectangle],
frame_delay: utils.Seconds) -> None:
self.sprite_sheet = sprite_sheet
self.frames = frames
self.frame_delay = frame_delay
self.start_time = utils.current_time()
self.current_frame_num = 0
def render(self,
renderer: sdl.Renderer,
position: complex,
flip: Optional[sdl.Flip]=None) -> None:
renderer.render_texture(
self.sprite_sheet,
src=self.current_frame,
dst=sdl.Rectangle(position,
self.current_frame.dimensions),
flip=flip)
@property
def current_frame(self) -> sdl.Rectangle:
return self.frames[self.current_frame_num]
@property
def dimensions(self) -> sdl.Dimensions:
return self.current_frame.dimensions
def update(self) -> None:
self.update_current_frame_num()
def update_current_frame_num(self) -> None:
t = self.time_since_start()
self.current_frame_num = (int(t / self.frame_delay) % len(self.frames))
def done(self) -> bool:
return self.time_since_start() > self.frame_delay * len(self.frames)
def time_since_start(self) -> utils.Seconds:
return utils.Seconds(utils.current_time() - self.start_time)
class Image(Sprite):
def __init__(self,
sprite_sheet: sdl.Texture,
frame: sdl.Rectangle) -> None:
self.sprite_sheet = sprite_sheet
self.frame = frame
def render(self,
renderer: sdl.Renderer,
position: complex,
flip: Optional[sdl.Flip]=None) -> None:
renderer.render_texture(self.sprite_sheet,
src=self.frame,
dst=sdl.Rectangle(position,
self.frame.dimensions),
flip=flip)
@property
def dimensions(self) -> sdl.Dimensions:
return self.frame.dimensions
# TODO We shouldn't need this in the future
def even_frames(first_frame: sdl.Rectangle,
frame_count: int) -> List[sdl.Rectangle]:
return [
sdl.Rectangle(first_frame.width * i, first_frame.dimensions)
for i in range(0, frame_count)
]
@enum.unique
class Direction(enum.Enum):
LEFT = enum.auto()
RIGHT = enum.auto()
def to_flip(self) -> sdl.Flip:
if self == Direction.LEFT:
return sdl.Flip.HORIZONTAL
return sdl.Flip.NONE
class Entity:
def __init__(self, position: complex, sprite: Sprite) -> None:
self.position = position
self.sprite = sprite
@property
def checkbox(self) -> Checkbox:
return sdl.Rectangle(self.position, self.sprite.dimensions)
def update_sprite(self) -> None:
self.sprite.update()
class MovingEntity(Entity):
def __init__(self,
position: complex,
direction: Direction,
velocity: complex,
sprite: Sprite) -> None:
super().__init__(position, sprite)
self.direction = direction
self.velocity = velocity
def update_physics(self,
solid_boxes: Checkboxes,
delta: utils.Seconds) -> None:
self.apply_gravity(delta)
displacement = self.velocity * delta
imag_position_delta = displacement.imag
real_position_delta = displacement.real
# Smarter way to do this without the slight glitching
for box in solid_boxes:
if self.checkbox.vertically_overlaps(box):
if self.checkbox.is_above(box):
d = box.upper_left.imag - self.checkbox.lower_left.imag
if d < displacement.imag:
imag_position_delta = d
else:
d = self.checkbox.upper_left.imag - box.lower_left.imag
if d < -displacement.imag:
imag_position_delta = -d
elif self.checkbox.horizontally_overlaps(box):
if self.checkbox.is_left_from(box):
d = box.upper_right.real - self.checkbox.upper_left.real
if d < displacement.real:
real_position_delta = d
else:
d = self.checkbox.upper_left.real - box.upper_right.real
if d < -displacement.real:
real_position_delta = -d
self.position += real_position_delta + imag_position_delta * 1j
def apply_gravity(self, delta: utils.Seconds) -> None:
self.velocity += GRAVITY * delta * delta * 1j
|
[
"enntheprogrammer@gmail.com"
] |
enntheprogrammer@gmail.com
|
4a017370d471f61cc3780adf58a4ac8e2bb1676e
|
71536013ef36dfca22f43f822d5c8f5c42d763da
|
/functions/messaging.py
|
53cb46a208e2491a8b979526cdbf42d9c73c743e
|
[] |
no_license
|
jerryneal/TradeChart
|
9b179c541778fd3417c80f9e9d89aaf1c068ca42
|
51dbc269bd4697751ad1ad68c3e700b89439e159
|
refs/heads/master
| 2021-01-12T11:27:29.305368
| 2016-12-03T15:47:05
| 2016-12-03T15:47:05
| 72,930,614
| 0
| 0
| null | 2016-11-22T16:12:38
| 2016-11-05T14:17:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,362
|
py
|
from twilio.rest import TwilioRestClient
from configParser import *
import smtplib
import logging
class SendMessage():
def __init__(self):
self.cf = ParseConfig()
self.message = None
def sendSMSMessage(self,message):
try:
server = smtplib.SMTP(self.cf.gmSetup,self.cf.gmPort)
server.starttls()
server.login(self.cf.chapUserName,self.cf.chapPassword)
server.sendmail('API Test',self.cf.smsNumber, message)
logging.debug('Message has been sent to Phone')
except Exception as e:
print e
logging.debug('Message Not sent')
def sendTwilioMessage(self,message):
#Run client
client = TwilioRestClient(self.cf.account_sid,self.cf.token)
# try:
# message = client.sms.messages.create(to="+18023772744",from_="+15005550006",
# body='Were having a baby')
# # message = client.sms.messages.create()
# except Exception as e:
# print e
# print message, message.sid
def sendEmailMessage(self,message):
pass
#
# if __name__ == '__main__':
# logging.basicConfig(format='%(asctime)s.%(msecs).03d - %(levelname)s - %(module)s.%(funcName)s: %(message)s \n</br>',datefmt='%d%b%Y %H:%M:%S',level=cf.loglevel)
|
[
"mckenzo12@live.com"
] |
mckenzo12@live.com
|
0ce2eb38c6ab9d4f84d257ae0707be923f6db556
|
8ec6beee190c8abbc4ac69d79c3569cc0c04241b
|
/weather/migrations/0002_auto_20201122_1744.py
|
43bb60e9143cc00e21e6c82ab355bd214818fb62
|
[] |
no_license
|
mkyd-kill/Django-weatherapp
|
ea2141b19876c67e2f5c10763aded306ae06b1f9
|
360c24ba7ac8a0254b2be79a1f3cdaa2cb69e4b7
|
refs/heads/master
| 2023-07-13T13:07:45.063314
| 2021-08-21T14:00:36
| 2021-08-21T14:00:36
| 398,015,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
# Generated by Django 3.1 on 2020-11-22 14:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('weather', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='city',
name='name',
field=models.CharField(max_length=50),
),
]
|
[
"romeomureithi@gmail.com"
] |
romeomureithi@gmail.com
|
bcd9ab42112147aa28a990704a1b3f4bd5082c92
|
f68cd225b050d11616ad9542dda60288f6eeccff
|
/testscripts/RDKB/component/PAM/TS_COSAPAM_UpnpDevGetMediaServerState.py
|
fe649ec40dc301d5f710ed0a0a615db7bb1f45f1
|
[
"Apache-2.0"
] |
permissive
|
cablelabs/tools-tdkb
|
18fb98fadcd169fa9000db8865285fbf6ff8dc9d
|
1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69
|
refs/heads/master
| 2020-03-28T03:06:50.595160
| 2018-09-04T11:11:00
| 2018-09-05T00:24:38
| 147,621,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,289
|
py
|
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version='1.0' encoding='utf-8'?>
<xml>
<id></id>
<!-- Do not edit id. This will be auto filled while exporting. If you are adding a new script keep the id empty -->
<version>6</version>
<!-- Do not edit version. This will be auto incremented while updating. If you are adding a new script you can keep the vresion as 1 -->
<name>TS_COSAPAM_UpnpDevGetMediaServerState</name>
<!-- If you are adding a new script you can specify the script name. Script Name should be unique same as this file name with out .py extension -->
<primitive_test_id> </primitive_test_id>
<!-- Do not change primitive_test_id if you are editing an existing script. -->
<primitive_test_name>pam_GetParameterValues</primitive_test_name>
<!-- -->
<primitive_test_version>1</primitive_test_version>
<!-- -->
<status>FREE</status>
<!-- -->
<synopsis>This test case returns the status of Upnp dev Media Server</synopsis>
<!-- -->
<groups_id />
<!-- -->
<execution_time>1</execution_time>
<!-- -->
<long_duration>false</long_duration>
<!-- execution_time is the time out time for test execution -->
<remarks></remarks>
<!-- Reason for skipping the tests if marked to skip -->
<skip>false</skip>
<!-- -->
<box_types>
<box_type>RPI</box_type>
<box_type>Broadband</box_type>
<!-- -->
<box_type>Emulator</box_type>
<!-- -->
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
<!-- -->
</rdk_versions>
<test_cases>
<test_case_id>TC_COSAPAM_19</test_case_id>
<test_objective>To Validate PAM API CosaDmlUpnpDevGetMediaServerState</test_objective>
<test_type>Positive</test_type>
<test_setup>Emulator,Broadband</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components.
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>CosaDmlUpnpDevGetMediaServerState</api_or_interface_used>
<input_parameters>Input:
None</input_parameters>
<automation_approch>1.Function which needs to be tested will be configured in Test Manager GUI.
2.Python Script will be generated by Test Manager with arguments provided in configure page.
3.Test manager will load the COSAPAM library via Test agent
4.From python script, invoke COSAPAM_UpnpGetState() stub function to get UPNP dev MediaServer status
5.COSAPAM stub function will call the ssp_CosaDmlUpnpGetState function in TDK component which in turn will call cosa api CosaDmlUpnpGetMediaServerState() of the PAM Agent in RDKB stack.
6.Responses from Cosa API, TDK Component and COSAPAM stub function will be logged in Agent Console log.
7.COSAPAM stub will validate the actual result with the expected result and send the result status to Test Manager.
8.Test Manager will publish the result in GUI as PASS/FAILURE based on the response from COSAPAM stub.</automation_approch>
<except_output>CheckPoint 1:
Values associated with the parameter specified should be logged in the Agent console/Component log and Should get UPNP dev MediaServer status successfully
CheckPoint 2:
Stub function result should be success and should see corresponding log in the agent console log</except_output>
<priority>High</priority>
<test_stub_interface>COSAPAM_UpnpGetState</test_stub_interface>
<test_script>TS_COSAPAM_UpnpDevGetMediaServerState</test_script>
<skipped>No</skipped>
<release_version></release_version>
<remarks></remarks>
</test_cases>
<script_tags />
</xml>
'''
#import statement
import tdklib;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("pam","RDKB");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_COSAPAM_UpnpDevGetMediaServerState');
#Get the result of connection with test component and STB
loadmodulestatus =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus ;
if "SUCCESS" in loadmodulestatus.upper():
#Set the result status of execution
obj.setLoadModuleStatus("SUCCESS");
tdkTestObj = obj.createTestStep('COSAPAM_UpnpGetState');
tdkTestObj.addParameter("MethodName","UpnpMediaServer");
expectedresult="SUCCESS";
#Execute the test case in STB
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get the state of upnpdev MediaServer";
print "EXPECTED RESULT 1: Should get the state of upnpdev MediaServer";
print "ACTUAL RESULT 1: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult;
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Get the state of upnpdev MediaServer";
print "EXPECTED RESULT 1: Failure in getting the state of upnpdev MediaServer";
print "ACTUAL RESULT 1: %s" %details;
print "[TEST EXECUTION RESULT] : %s" %actualresult;
obj.unloadModule("pam");
else:
print "Failed to load pam module";
obj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
|
[
"jim.lawton@accenture.com"
] |
jim.lawton@accenture.com
|
8711eb0a24716d9305861bccc9eba32a5c7f40b0
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_145/697.py
|
3aa7d3d6b3e6a7823876b141a8df00a7a9a03a0d
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
import math
import fractions
T = int(input())
for t in range(1, T+1):
p,q = map(int, input().split('/'))
g = fractions.gcd(p,q)
p //= g
q //= g
plog = math.floor(math.log(p, 2))
qlog = math.log(q, 2)
qflog = math.floor(qlog)
if (qlog != qflog):
ans = "impossible"
else:
ans = qflog-plog
print("Case #{}: {}".format(t, ans))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
2a869f0259fa62bd12e0441e047e3f7cd970d99c
|
d4976b3cbec9d017672e98eb762418eb98c03a6f
|
/training/train_cnn_timeseries.py
|
b079b2b0426934dfe9da7bd6c1a10ebd7f378c66
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
haydenshively/Tezos-Prediction
|
e6319b5b250aa1fcb7f329406e1da6e621e77782
|
0718fb45b0f9e12e388110394d251a7d9794d3c0
|
refs/heads/master
| 2022-11-19T17:39:55.270123
| 2020-07-01T21:05:33
| 2020-07-01T21:05:33
| 258,636,839
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,238
|
py
|
import os
from scipy import stats
import numpy as np
from tensorflow.keras.utils import Sequence
from transforms import GAF
from models import CNNTimeSeries
class MyData(Sequence):
def __init__(self, dir, batch_size, n_samples_in, n_samples_out=1, distrib_size=1):
history_chunks = []
files = os.listdir(dir)
files.sort()
for file in files:
filename = os.fsdecode(file)
if filename.endswith('.npy'):
history_chunks.append(np.load(os.path.join(dir, filename)))
# Combine all chunks into one big history
self.history = np.vstack(history_chunks)
# Reshape so that different currencies aren't in separate channels
shape = self.history.shape
self.history = self.history.reshape((shape[0], shape[1] * shape[2]))
# Save other info to instance
self.extremes = [self.history[:, 8].min(), self.history[:, 8].max()]
self.batch_size = batch_size
self.n_samples_in = n_samples_in
self.n_samples_out = n_samples_out
self.distrib_size = distrib_size
assert (self.n_samples_out == 1 or self.distrib_size == 1)
def __len__(self):
return (self.history.shape[0] - self.n_samples_in - self.n_samples_out) // self.batch_size - 2
def __getitem__(self, idx):
batch_start = idx * self.batch_size
X = np.zeros((self.batch_size, self.n_samples_in, self.n_samples_in, 1))
Y = np.zeros((self.batch_size, self.n_samples_out if self.n_samples_out > 1 else self.distrib_size))
for offset in range(self.batch_size):
series_x_0 = batch_start + offset
series_x_n = batch_start + offset + self.n_samples_in
out_n = self.n_samples_out
series = self.history[series_x_0:(series_x_n + out_n), 8]
gaf_series = GAF(series[:-out_n], extremes=[series.min(), series.max()])
gaf_out = GAF(series)
X[offset] = np.expand_dims(gaf_series.encoded, -1)
if self.n_samples_out > 1:
Y[offset] = gaf_out.series[-out_n:]
else:
norm = stats.norm(loc=gaf_out.series[-1], scale=.4)
Y[offset] = norm.pdf(np.linspace(-1.0, 1.0, self.distrib_size))
return X, Y
def main(data_dir):
BATCH_SIZE = 16
N_SAMPLES_IN = 40 # divide by 10 to get # hours the sequence covers
N_SAMPLES_OUT = 5
PROB_DISTRIB = 1
generator = MyData(
data_dir,
BATCH_SIZE,
N_SAMPLES_IN,
N_SAMPLES_OUT,
PROB_DISTRIB
)
cnn = CNNTimeSeries(
(N_SAMPLES_IN, N_SAMPLES_IN, 1),
N_SAMPLES_OUT if N_SAMPLES_OUT > 1 else PROB_DISTRIB,
BATCH_SIZE
)
cnn.build()
cnn.compile()
cnn.model.fit(generator,
epochs=6,
shuffle=True,
verbose=1,
steps_per_epoch=len(generator))
cnn.model.save('models/cnn_timeseries_%d_%d_%d_%d.h5' % (
BATCH_SIZE, N_SAMPLES_IN, N_SAMPLES_OUT, PROB_DISTRIB
))
if __name__ == '__main__':
main('../dataset/train')
|
[
"haydenshively@gmail.com"
] |
haydenshively@gmail.com
|
58362d785398190d27cf4b31b0dc8c4f2fb5fb8e
|
27ba1f72b60a5d2ea4050c292546537c99d3b7d2
|
/setup.py
|
8e0658b4e2880ddcce9a33629a88a915a4e7b02e
|
[] |
no_license
|
michieljmmaas/CoronaSorter
|
4b751e47128a8c96f3975f71c2edd2c2fb5cca2e
|
18cb7f59851a1c47a96192a357ed3f28098370ee
|
refs/heads/master
| 2021-05-20T02:13:06.535071
| 2020-04-20T21:25:02
| 2020-04-20T21:25:02
| 252,142,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
from setuptools import setup
setup(name='CoronaSorter', version='1.1', packages=[''], url='', license='', author='MichielMaas', author_email='michieljmmaas@gmail.com', description='Plot van de CSV data geleverd door RIVM')
|
[
"michieljmmaas@gmail.com"
] |
michieljmmaas@gmail.com
|
88890341a60f319b1124a3764a29d3a184e1b5ad
|
7fee13fbe3d9a36ccd928d821966846853fc5d75
|
/proj1/tests/string.unicode-unnamed-character.py
|
135b53667085a742c25e59bd414cb5e2fd36e1c1
|
[] |
no_license
|
sphippen/uofu-compilers-tests
|
8151cba16613a85d68c7a84917036530765ffc32
|
a5869072f1cf468f652f855aeb511c9fa2d67ae3
|
refs/heads/master
| 2021-01-23T20:22:45.542518
| 2015-05-01T06:28:02
| 2015-05-01T06:28:02
| 30,105,673
| 2
| 3
| null | 2015-03-30T19:20:22
| 2015-01-31T07:20:03
|
Python
|
UTF-8
|
Python
| false
| false
| 16
|
py
|
"\N{<control>}"
|
[
"joshkunz@me.com"
] |
joshkunz@me.com
|
ec8df99aa40209f1f2b0cb8c8fabf01bc1520036
|
284566560a7e128573e81e4782d5a97ddcd5320f
|
/src/main/resources/parser/rubocop.py
|
97301a9eb7bdfe4537c064e951ab9d0a6780ba9d
|
[] |
no_license
|
xebialabs-community/xltv-rubocop-plugin
|
41ccc30ef82c8d62765bdd1fac3b5f12df6c57a0
|
efb36c48492a7746e2a923971d8d61bd16071809
|
refs/heads/master
| 2021-01-12T08:46:00.811852
| 2019-05-10T17:12:20
| 2019-05-10T17:12:20
| 76,682,534
| 0
| 1
| null | 2016-12-16T20:35:58
| 2016-12-16T20:11:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,898
|
py
|
#
# THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS
# FOR A PARTICULAR PURPOSE. THIS CODE AND INFORMATION ARE NOT SUPPORTED BY XEBIALABS.
#
import json
from parser.xunit import throw_if_some_failed, parse_last_modified, parse_junit_test_results, open_file
def rubocop_validate_files(files):
filtered = []
for file in files:
if str(file).endswith("json"):
filtered.append(file)
throw_if_some_failed(files, filtered)
def rubocop_iterate_test_cases(file):
"""
Iterate all test cases found in `file`.
:param file:
:return: a list/iterator of tuples (test case node, test hierarchy path)
"""
with open_file(file) as data_file:
features = json.load(data_file)
for rubytest in features["files"]:
yield (rubytest, (rubytest['path'], rubytest['path'], '0'))
def rubocop_duration(splitResult):
return 0
def rubocop_result(scenario):
offenses = scenario["offenses"]
for offense in offenses:
severity = offense["severity"]
if severity in ("refactor","convention","warning"):
continue
elif severity in ("error", "fatal"):
return "FAILED"
else:
return "OTHER"
return "PASSED"
def rubocop_failure_reason(scenario):
offenses = scenario["offenses"]
for offense in offenses:
severity = offense["severity"]
if severity not in ("refactor","convention","warning"):
error_message = offense["message"]
unicode_error_message = unicode(error_message, "utf-8")
return unicode_error_message.encode("ascii", "xmlcharrefreplace")
return None
def rubocop_custom_properties(scenario, file):
return {
"path": scenario["path"]
}
def rubocop_last_modified(file):
return file.lastModified()
rubocop_validate_files(files)
last_modified = parse_last_modified(files, extract_last_modified=rubocop_last_modified)
print 'LAST MOD', last_modified, test_run_historian.isKnownKey(str(last_modified))
if not test_run_historian.isKnownKey(str(last_modified)):
events = parse_junit_test_results(files, last_modified,
iterate_test_cases=rubocop_iterate_test_cases,
extract_duration=rubocop_duration,
extract_result=rubocop_result,
extract_failure_reason=rubocop_failure_reason,
extract_custom_properties=rubocop_custom_properties)
print 'built run with events', events
else:
events = []
# Result holder should contain a list of test runs. A test run is a list of events
result_holder.result = [events] if events else []
|
[
"joris.dewinne@gmail.com"
] |
joris.dewinne@gmail.com
|
88e7578e5af18f773fac5562716a88e5c7a6dbdb
|
0d4ec25fb2819de88a801452f176500ccc269724
|
/min_queue.py
|
509cca9eabd345107e70355d3cb50c52554d3d73
|
[] |
no_license
|
zopepy/leetcode
|
7f4213764a6a079f58402892bd0ede0514e06fcf
|
3bfee704adb1d94efc8e531b732cf06c4f8aef0f
|
refs/heads/master
| 2022-01-09T16:13:09.399620
| 2019-05-29T20:00:11
| 2019-05-29T20:00:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
"""
enqueue
add ele to queue
remove al elements from list > cur ele
add ele
dequeue
remove lee from queue
remove ele from list if equal to removed element
"""
from collections import deque
class MaxQueue():
def __init__(self):
self.queue = deque([])
self.max_queue = []
def enquque(self, x):
self.queue.append(x)
while self.max_queue:
if self.max_queue[-1] < x:
self.max_queue.pop()
else:
self.max_queue.append(x)
break
if not self.max_queue:
self.max_queue.append(x)
def dequeue(self):
ele = self.queue.popleft()
if ele == self.max_queue[0]:
self.max_queue = self.max_queue[1:]
def get_max(self):
return self.max_queue[0]
class Solution:
def maxSlidingWindow(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
response = []
if not nums:
return response
l = len(nums)
mx = MaxQueue()
for i in range(0, k):
mx.enquque(nums[i])
response.append(mx.get_max())
for i in range(k, l):
mx.dequeue()
mx.enquque(nums[i])
response.append(mx.get_max())
return response
s = Solution()
# nms = [1,3,-1,-3,5,3,6,7]
nms = [8,7,6,5,4,3,2]
print(s.maxSlidingWindow(nms, 1))
|
[
"rohithiitj@gmail.com"
] |
rohithiitj@gmail.com
|
b325113484a7068d0bd9fb42c74bfdd16fdfa3fc
|
a1cbca323d0436af13bf7d273479a1981791d677
|
/大数据/pyspark/zml_salary_predict/salary_predict_feature/resume_education_feature.py
|
4edef9c1319960847f96d0f11cb9b6578d4c65f8
|
[] |
no_license
|
reganzm/ai
|
a23bce1d448e4d14b3877774e946be291d17c3e1
|
4572975e14bf4e4160f18db9c17717eab2051ea1
|
refs/heads/master
| 2020-03-26T05:23:32.507691
| 2018-12-10T06:25:02
| 2018-12-10T06:25:02
| 144,554,535
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,074
|
py
|
# -*- coding: utf-8 -*-
# 教育经历数据中的学校、学历、专业编码
from pyspark.sql import SparkSession
from pyspark.sql.types import IntegerType
if __name__ == '__main__':
spark = SparkSession.builder.master("yarn").appName("resume_educations_data_prepare").config("spark.ui.port",
"44040") \
.config('spark.default.parallelism', '40') \
.config('spark.executor.memory', '6G') \
.config('spark.driver.memory', '6G') \
.config('spark.executor.cores', '10') \
.config('spark.dynamicAllocation.minExecutors', '50') \
.config('spark.dynamicAllocation.initialExecutors', '50') \
.config('spark.task.cpus', '2') \
.config('spark.default.defaultMinPartitions', '1000') \
.config('spark.executor.memoryOverhead', '4G') \
.getOrCreate()
sc = spark.sparkContext
sc.setLogLevel('WARN')
major_df_t = spark.read.csv('/data/datasets/salary_predict/major.csv', header=True).toPandas().to_dict()
major_df_map = dict(zip(major_df_t['name'].values(), major_df_t['id'].values()))
school_df_t = spark.read.csv('/data/datasets/salary_predict/university.csv', header=True).select('sid',
'name').toPandas().to_dict()
school_df_map = dict(zip(school_df_t['name'].values(), school_df_t['sid'].values()))
degree_df_t = spark.read.csv('/data/datasets/salary_predict/degree.csv', header=True).toPandas().to_dict()
degree_df_map = dict(zip(degree_df_t['name'].values(), degree_df_t['id'].values()))
def func_school(name):
if school_df_map.get(name):
return int(school_df_map.get(name))
else:
return int(school_df_map.get('unknown'))
def func_major(name):
if major_df_map.get(name):
return int(major_df_map.get(name))
else:
return int(major_df_map.get('unknown'))
def func_degree(name):
if degree_df_map.get(name):
return int(degree_df_map.get(name))
else:
return int(degree_df_map.get('unknown'))
def register_udf(spark):
udf = spark.udf
udf.register('func_degree', func_degree, returnType=IntegerType())
udf.register('func_school', func_school, returnType=IntegerType())
udf.register('func_major', func_major, returnType=IntegerType())
def work(spark):
df = spark.read.json(
'/user/bigdata/BI/resume_flatten_v1_20180813/resume_educations.json').createOrReplaceTempView('A')
s_sql = """
select *,func_degree(degree) as degree_code,func_school(school) as school_code, func_major(major) as major_code from A
"""
spark.sql(s_sql).repartition(100).write.mode('overwrite').parquet(
'/user/bigdata/BI/resume_flatten_v1_20180813/resume_educations_with_codes.parquet')
register_udf(spark)
work(spark)
spark.stop()
|
[
"zhangmin@shandudata.com"
] |
zhangmin@shandudata.com
|
43c4a5bc0ee159d5d78f69a2595b75a757ec59c3
|
97f2d3e77c562feafc5612b2628bf917ddf9627e
|
/home/urls.py
|
1dba72dc2a0b77562d794244861336a429c317bf
|
[] |
no_license
|
isottellina/p11-purbeurre-fix
|
b2109f85052d6596191905f8db5ca4d9c25f8b4f
|
c6d62f209888316ffc9485af436b5f70fb490e8d
|
refs/heads/master
| 2023-08-17T02:38:42.112947
| 2020-06-17T18:44:20
| 2020-06-17T18:44:20
| 262,383,107
| 0
| 1
| null | 2021-09-22T18:59:42
| 2020-05-08T17:13:12
|
Python
|
UTF-8
|
Python
| false
| false
| 362
|
py
|
# urls.py ---
#
# Filename: urls.py
# Author: Louise <louise>
# Created: Sun Apr 26 21:16:25 2020 (+0200)
# Last-Updated: Mon Apr 27 00:15:10 2020 (+0200)
# By: Louise <louise>
#
from django.urls import path
from . import views
app_name = 'home'
urlpatterns = [
path('', views.index, name='index'),
path('legal', views.legal, name='legal')
]
|
[
"louise@zanier.org"
] |
louise@zanier.org
|
4440815abc0a8366b2c885d6b9a1639c097b19ce
|
c0e0f7fb132dbb9225639044e0daa354789a4101
|
/model/CustInfo.py
|
88febc56bbef470a0ae6e21a531973421c72ea4e
|
[] |
no_license
|
tomdziwood/PreciousClients
|
aff7bde3039795e24cd6ee4c044bf5711ba32ddd
|
582b2a161614a73fc701b1b02ade3b089bf38603
|
refs/heads/master
| 2023-04-29T00:03:47.376082
| 2021-05-24T21:24:32
| 2021-05-24T21:24:32
| 369,925,076
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 910
|
py
|
class CustInfo:
def __init__(self, id, firstname, lastname, street_address, district, voivodship, postcode, est_income, own_or_rent, date):
self.id = id
self.firstname = firstname
self.lastname = lastname
self.street_address = street_address
self.district = district
self.voivodship = voivodship
self.postcode = postcode
self.est_income = est_income
self.own_or_rent = own_or_rent
self.date = date
def __str__(self):
str_list = [str(self.id),
self.firstname,
self.lastname,
self.street_address,
self.district,
self.voivodship,
str(self.postcode),
str(self.est_income),
self.own_or_rent,
self.date]
return '|'.join(str_list)
|
[
"tomdziwood@gmail.com"
] |
tomdziwood@gmail.com
|
fd4085640fc9754859cef2e874972d817cf51803
|
06f7ffdae684ac3cc258c45c3daabce98243f64f
|
/vsts/vsts/service_hooks/v4_1/models/publisher.py
|
065981f8d9f43c06f237b5a4594cdddc056d4ec9
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
kenkuo/azure-devops-python-api
|
7dbfb35f1c9637c9db10207824dd535c4d6861e8
|
9ac38a97a06ee9e0ee56530de170154f6ed39c98
|
refs/heads/master
| 2020-04-03T17:47:29.526104
| 2018-10-25T17:46:09
| 2018-10-25T17:46:09
| 155,459,045
| 0
| 0
|
MIT
| 2018-10-30T21:32:43
| 2018-10-30T21:32:42
| null |
UTF-8
|
Python
| false
| false
| 2,589
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class Publisher(Model):
"""Publisher.
:param _links: Reference Links
:type _links: :class:`ReferenceLinks <service-hooks.v4_1.models.ReferenceLinks>`
:param description: Gets this publisher's localized description.
:type description: str
:param id: Gets this publisher's identifier.
:type id: str
:param input_descriptors: Publisher-specific inputs
:type input_descriptors: list of :class:`InputDescriptor <service-hooks.v4_1.models.InputDescriptor>`
:param name: Gets this publisher's localized name.
:type name: str
:param service_instance_type: The service instance type of the first party publisher.
:type service_instance_type: str
:param supported_events: Gets this publisher's supported event types.
:type supported_events: list of :class:`EventTypeDescriptor <service-hooks.v4_1.models.EventTypeDescriptor>`
:param url: The url for this resource
:type url: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'input_descriptors': {'key': 'inputDescriptors', 'type': '[InputDescriptor]'},
'name': {'key': 'name', 'type': 'str'},
'service_instance_type': {'key': 'serviceInstanceType', 'type': 'str'},
'supported_events': {'key': 'supportedEvents', 'type': '[EventTypeDescriptor]'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, _links=None, description=None, id=None, input_descriptors=None, name=None, service_instance_type=None, supported_events=None, url=None):
super(Publisher, self).__init__()
self._links = _links
self.description = description
self.id = id
self.input_descriptors = input_descriptors
self.name = name
self.service_instance_type = service_instance_type
self.supported_events = supported_events
self.url = url
|
[
"tedchamb@microsoft.com"
] |
tedchamb@microsoft.com
|
664c3548f179f3d8b289a9433cca256bd21dbeb3
|
9a7f5f07c550e02f13c79d88d0a2c39a34e4b891
|
/rules/discovery/rule_artifactory.py
|
75c1e67e7fbe0fdcfa1602c7b965d77cf60ad1be
|
[] |
no_license
|
Saloniimathur/vuln-scanner-flask
|
90901a5167761e0fccf10b6dcf92ad5aeab44333
|
07ab9b715233275f86e3d3328a0feaa0ed6c404a
|
refs/heads/master
| 2023-04-12T09:03:53.189102
| 2021-05-10T07:56:14
| 2021-05-10T07:56:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,876
|
py
|
from core.redis import rds
from core.triage import Triage
from core.parser import ScanParser
class Rule:
def __init__(self):
self.rule = 'DSC_SSB9'
self.rule_severity = 2
self.rule_description = 'This rule checks for the exposure of Artifactory Panels'
self.rule_confirm = 'Identified an Artifactory Panel'
self.rule_details = ''
self.rule_mitigation = '''Identify whether the application in question is supposed to be exposed to the network.'''
self.rule_match_string = {
'/artifactory/webapp':{
'app':'JFROG_ARTIFACTORY',
'match':['artifactory.ui', 'artifactory_views'],
'title':'Artifactory'
},
'/artifactory/libs-release':{
'app':'JFROG_LIB_RELEASE',
'match':['Index of libs-release/'],
'title':'Artifactory Directory Exposure'
},
}
self.intensity = 1
def check_rule(self, ip, port, values, conf):
t = Triage()
p = ScanParser(port, values)
domain = p.get_domain()
module = p.get_module()
if 'http' in module:
for uri, values in self.rule_match_string.items():
app_title = values['title']
resp = t.http_request(ip, port, uri=uri)
if resp is not None:
for match in values['match']:
if match in resp.text:
self.rule_details = 'Exposed {} at {}'.format(app_title, resp.url)
rds.store_vuln({
'ip':ip,
'port':port,
'domain':domain,
'rule_id':self.rule,
'rule_sev':self.rule_severity,
'rule_desc':self.rule_description,
'rule_confirm':self.rule_confirm,
'rule_details':self.rule_details,
'rule_mitigation':self.rule_mitigation
})
break
return
|
[
"krisna.pranav@gmail.com"
] |
krisna.pranav@gmail.com
|
b56ed93be4561121e0fb330608d921547dad4529
|
9aa776778cf6734c98c63eb390b3766be7e91e49
|
/Assignment 7/train_eval.py
|
bfac93bca86862ded408fc902b00743dcd4a60f2
|
[] |
no_license
|
rushi-the-neural-arch/EVA6-Computer-Vision
|
867749543dd166461f1552d9c0308bce59d9c1f0
|
8fa41fdb17f11d666189a52e2a9939d9ad7a3c20
|
refs/heads/main
| 2023-07-09T03:09:42.468784
| 2021-08-07T13:55:31
| 2021-08-07T13:55:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,652
|
py
|
import torch.nn as nn
import torch
from utils import calculate_accuracy
def train(model, iterator, optimizer, scheduler, criterion, l1_factor, device):
epoch_loss = 0
epoch_acc = 0
model.train()
for (x, y) in iterator:
x = x.to(device)
y = y.to(device)
optimizer.zero_grad()
y_pred = model(x)
#print(y_pred.shape, y.shape)
loss = criterion(y_pred, y)
if l1_factor > 0:
L1_loss = nn.L1Loss(size_average=None, reduce=None, reduction='mean')
reg_loss = 0
for param in model.parameters():
zero_vector = torch.rand_like(param) * 0
reg_loss += L1_loss(param,zero_vector)
loss += l1_factor * reg_loss
acc = calculate_accuracy(y_pred, y)
loss.backward()
optimizer.step()
scheduler.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate(model, iterator, criterion, device):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for (x, y) in iterator:
x = x.to(device)
y = y.to(device)
y_pred = model(x)
loss = criterion(y_pred, y)
acc = calculate_accuracy(y_pred, y)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
|
[
"noreply@github.com"
] |
rushi-the-neural-arch.noreply@github.com
|
a4260a14f3673d699a88bb6b289abad6bd38beb7
|
cbfddfdf5c7fa8354162efe50b41f84e55aff118
|
/venv/lib/python3.7/site-packages/nltk/treeprettyprinter.py
|
9d92c098bfa27d9d8ba042cb447f85f039ae656c
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
tclerico/SAAC
|
8d2245221dd135aea67c5e079ac7eaf542b25e2f
|
2f52007ae8043096662e76da828a84e87f71091e
|
refs/heads/master
| 2022-12-09T21:56:33.430404
| 2019-02-20T14:23:51
| 2019-02-20T14:23:51
| 153,152,229
| 3
| 0
|
MIT
| 2022-09-16T17:52:47
| 2018-10-15T17:13:29
|
Python
|
UTF-8
|
Python
| false
| false
| 24,342
|
py
|
# -*- coding: utf-8 -*-
# Natural Language Toolkit: ASCII visualization of NLTK trees
#
# Copyright (C) 2001-2018 NLTK Project
# Author: Andreas van Cranenburgh <A.W.vanCranenburgh@uva.nl>
# Peter Ljunglöf <peter.ljunglof@gu.se>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Pretty-printing of discontinuous trees.
Adapted from the disco-dop project, by Andreas van Cranenburgh.
https://github.com/andreasvc/disco-dop
Interesting reference (not used for this code):
T. Eschbach et al., Orth. Hypergraph Drawing, Journal of
Graph Algorithms and Applications, 10(2) 141--157 (2006)149.
http://jgaa.info/accepted/2006/EschbachGuentherBecker2006.10.2.pdf
"""
from __future__ import division, print_function, unicode_literals
from nltk.util import slice_bounds, OrderedDict
from nltk.compat import python_2_unicode_compatible, unicode_repr
from nltk.internals import raise_unorderable_types
from nltk.tree import Tree
import re
import sys
import codecs
from cgi import escape
from collections import defaultdict
from operator import itemgetter
from itertools import chain, islice
ANSICOLOR = {
'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37,
}
@python_2_unicode_compatible
class TreePrettyPrinter(object):
"""
Pretty-print a tree in text format, either as ASCII or Unicode.
The tree can be a normal tree, or discontinuous.
``TreePrettyPrinter(tree, sentence=None, highlight=())``
creates an object from which different visualizations can be created.
:param tree: a Tree object.
:param sentence: a list of words (strings). If `sentence` is given,
`tree` must contain integers as leaves, which are taken as indices
in `sentence`. Using this you can display a discontinuous tree.
:param highlight: Optionally, a sequence of Tree objects in `tree` which
should be highlighted. Has the effect of only applying colors to nodes
in this sequence (nodes should be given as Tree objects, terminals as
indices).
>>> from nltk.tree import Tree
>>> tree = Tree.fromstring('(S (NP Mary) (VP walks))')
>>> print(TreePrettyPrinter(tree).text())
... # doctest: +NORMALIZE_WHITESPACE
S
____|____
NP VP
| |
Mary walks
"""
def __init__(self, tree, sentence=None, highlight=()):
if sentence is None:
leaves = tree.leaves()
if (leaves and not any(len(a) == 0 for a in tree.subtrees())
and all(isinstance(a, int) for a in leaves)):
sentence = [str(a) for a in leaves]
else:
# this deals with empty nodes (frontier non-terminals)
# and multiple/mixed terminals under non-terminals.
tree = tree.copy(True)
sentence = []
for a in tree.subtrees():
if len(a) == 0:
a.append(len(sentence))
sentence.append(None)
elif any(not isinstance(b, Tree) for b in a):
for n, b in enumerate(a):
if not isinstance(b, Tree):
a[n] = len(sentence)
sentence.append('%s' % b)
self.nodes, self.coords, self.edges, self.highlight = self.nodecoords(
tree, sentence, highlight)
def __str__(self):
return self.text()
def __repr__(self):
return '<TreePrettyPrinter with %d nodes>' % len(self.nodes)
@staticmethod
def nodecoords(tree, sentence, highlight):
"""
Produce coordinates of nodes on a grid.
Objective:
- Produce coordinates for a non-overlapping placement of nodes and
horizontal lines.
- Order edges so that crossing edges cross a minimal number of previous
horizontal lines (never vertical lines).
Approach:
- bottom up level order traversal (start at terminals)
- at each level, identify nodes which cannot be on the same row
- identify nodes which cannot be in the same column
- place nodes into a grid at (row, column)
- order child-parent edges with crossing edges last
Coordinates are (row, column); the origin (0, 0) is at the top left;
the root node is on row 0. Coordinates do not consider the size of a
node (which depends on font, &c), so the width of a column of the grid
should be automatically determined by the element with the greatest
width in that column. Alternatively, the integer coordinates could be
converted to coordinates in which the distances between adjacent nodes
are non-uniform.
Produces tuple (nodes, coords, edges, highlighted) where:
- nodes[id]: Tree object for the node with this integer id
- coords[id]: (n, m) coordinate where to draw node with id in the grid
- edges[id]: parent id of node with this id (ordered dictionary)
- highlighted: set of ids that should be highlighted
"""
def findcell(m, matrix, startoflevel, children):
"""
Find vacant row, column index for node ``m``.
Iterate over current rows for this level (try lowest first)
and look for cell between first and last child of this node,
add new row to level if no free row available.
"""
candidates = [a for _, a in children[m]]
minidx, maxidx = min(candidates), max(candidates)
leaves = tree[m].leaves()
center = scale * sum(leaves) // len(leaves) # center of gravity
if minidx < maxidx and not minidx < center < maxidx:
center = sum(candidates) // len(candidates)
if max(candidates) - min(candidates) > 2 * scale:
center -= center % scale # round to unscaled coordinate
if minidx < maxidx and not minidx < center < maxidx:
center += scale
if ids[m] == 0:
startoflevel = len(matrix)
for rowidx in range(startoflevel, len(matrix) + 1):
if rowidx == len(matrix): # need to add a new row
matrix.append([vertline if a not in (corner, None)
else None for a in matrix[-1]])
row = matrix[rowidx]
i = j = center
if len(children[m]) == 1: # place unaries directly above child
return rowidx, next(iter(children[m]))[1]
elif all(a is None or a == vertline for a
in row[min(candidates):max(candidates) + 1]):
# find free column
for n in range(scale):
i = j = center + n
while j > minidx or i < maxidx:
if i < maxidx and (matrix[rowidx][i] is None
or i in candidates):
return rowidx, i
elif j > minidx and (matrix[rowidx][j] is None
or j in candidates):
return rowidx, j
i += scale
j -= scale
raise ValueError('could not find a free cell for:\n%s\n%s'
'min=%d; max=%d' % (tree[m], minidx, maxidx, dumpmatrix()))
def dumpmatrix():
"""Dump matrix contents for debugging purposes."""
return '\n'.join(
'%2d: %s' % (n, ' '.join(('%2r' % i)[:2] for i in row))
for n, row in enumerate(matrix))
leaves = tree.leaves()
if not all(isinstance(n, int) for n in leaves):
raise ValueError('All leaves must be integer indices.')
if len(leaves) != len(set(leaves)):
raise ValueError('Indices must occur at most once.')
if not all(0 <= n < len(sentence) for n in leaves):
raise ValueError('All leaves must be in the interval 0..n '
'with n=len(sentence)\ntokens: %d indices: '
'%r\nsentence: %s' % (len(sentence), tree.leaves(), sentence))
vertline, corner = -1, -2 # constants
tree = tree.copy(True)
for a in tree.subtrees():
a.sort(key=lambda n: min(n.leaves()) if isinstance(n, Tree) else n)
scale = 2
crossed = set()
# internal nodes and lexical nodes (no frontiers)
positions = tree.treepositions()
maxdepth = max(map(len, positions)) + 1
childcols = defaultdict(set)
matrix = [[None] * (len(sentence) * scale)]
nodes = {}
ids = dict((a, n) for n, a in enumerate(positions))
highlighted_nodes = set(n for a, n in ids.items()
if not highlight or tree[a] in highlight)
levels = dict((n, []) for n in range(maxdepth - 1))
terminals = []
for a in positions:
node = tree[a]
if isinstance(node, Tree):
levels[maxdepth - node.height()].append(a)
else:
terminals.append(a)
for n in levels:
levels[n].sort(key=lambda n: max(tree[n].leaves())
- min(tree[n].leaves()))
terminals.sort()
positions = set(positions)
for m in terminals:
i = int(tree[m]) * scale
assert matrix[0][i] is None, (matrix[0][i], m, i)
matrix[0][i] = ids[m]
nodes[ids[m]] = sentence[tree[m]]
if nodes[ids[m]] is None:
nodes[ids[m]] = '...'
highlighted_nodes.discard(ids[m])
positions.remove(m)
childcols[m[:-1]].add((0, i))
# add other nodes centered on their children,
# if the center is already taken, back off
# to the left and right alternately, until an empty cell is found.
for n in sorted(levels, reverse=True):
nodesatdepth = levels[n]
startoflevel = len(matrix)
matrix.append([vertline if a not in (corner, None) else None
for a in matrix[-1]])
for m in nodesatdepth: # [::-1]:
if n < maxdepth - 1 and childcols[m]:
_, pivot = min(childcols[m], key=itemgetter(1))
if (set(a[:-1] for row in matrix[:-1] for a in row[:pivot]
if isinstance(a, tuple)) &
set(a[:-1] for row in matrix[:-1] for a in row[pivot:]
if isinstance(a, tuple))):
crossed.add(m)
rowidx, i = findcell(m, matrix, startoflevel, childcols)
positions.remove(m)
# block positions where children of this node branch out
for _, x in childcols[m]:
matrix[rowidx][x] = corner
# assert m == () or matrix[rowidx][i] in (None, corner), (
# matrix[rowidx][i], m, str(tree), ' '.join(sentence))
# node itself
matrix[rowidx][i] = ids[m]
nodes[ids[m]] = tree[m]
# add column to the set of children for its parent
if m != ():
childcols[m[:-1]].add((rowidx, i))
assert len(positions) == 0
# remove unused columns, right to left
for m in range(scale * len(sentence) - 1, -1, -1):
if not any(isinstance(row[m], (Tree, int))
for row in matrix):
for row in matrix:
del row[m]
# remove unused rows, reverse
matrix = [row for row in reversed(matrix)
if not all(a is None or a == vertline for a in row)]
# collect coordinates of nodes
coords = {}
for n, _ in enumerate(matrix):
for m, i in enumerate(matrix[n]):
if isinstance(i, int) and i >= 0:
coords[i] = n, m
# move crossed edges last
positions = sorted([a for level in levels.values()
for a in level], key=lambda a: a[:-1] in crossed)
# collect edges from node to node
edges = OrderedDict()
for i in reversed(positions):
for j, _ in enumerate(tree[i]):
edges[ids[i + (j, )]] = ids[i]
return nodes, coords, edges, highlighted_nodes
def text(self, nodedist=1, unicodelines=False, html=False, ansi=False,
nodecolor='blue', leafcolor='red', funccolor='green',
abbreviate=None, maxwidth=16):
"""
:return: ASCII art for a discontinuous tree.
:param unicodelines: whether to use Unicode line drawing characters
instead of plain (7-bit) ASCII.
:param html: whether to wrap output in html code (default plain text).
:param ansi: whether to produce colors with ANSI escape sequences
(only effective when html==False).
:param leafcolor, nodecolor: specify colors of leaves and phrasal
nodes; effective when either html or ansi is True.
:param abbreviate: if True, abbreviate labels longer than 5 characters.
If integer, abbreviate labels longer than `abbr` characters.
:param maxwidth: maximum number of characters before a label starts to
wrap; pass None to disable.
"""
if abbreviate == True:
abbreviate = 5
if unicodelines:
horzline = '\u2500'
leftcorner = '\u250c'
rightcorner = '\u2510'
vertline = ' \u2502 '
tee = horzline + '\u252C' + horzline
bottom = horzline + '\u2534' + horzline
cross = horzline + '\u253c' + horzline
ellipsis = '\u2026'
else:
horzline = '_'
leftcorner = rightcorner = ' '
vertline = ' | '
tee = 3 * horzline
cross = bottom = '_|_'
ellipsis = '.'
def crosscell(cur, x=vertline):
"""Overwrite center of this cell with a vertical branch."""
splitl = len(cur) - len(cur) // 2 - len(x) // 2 - 1
lst = list(cur)
lst[splitl:splitl + len(x)] = list(x)
return ''.join(lst)
result = []
matrix = defaultdict(dict)
maxnodewith = defaultdict(lambda: 3)
maxnodeheight = defaultdict(lambda: 1)
maxcol = 0
minchildcol = {}
maxchildcol = {}
childcols = defaultdict(set)
labels = {}
wrapre = re.compile('(.{%d,%d}\\b\\W*|.{%d})' % (
maxwidth - 4, maxwidth, maxwidth))
# collect labels and coordinates
for a in self.nodes:
row, column = self.coords[a]
matrix[row][column] = a
maxcol = max(maxcol, column)
label = (self.nodes[a].label() if isinstance(self.nodes[a], Tree)
else self.nodes[a])
if abbreviate and len(label) > abbreviate:
label = label[:abbreviate] + ellipsis
if maxwidth and len(label) > maxwidth:
label = wrapre.sub(r'\1\n', label).strip()
label = label.split('\n')
maxnodeheight[row] = max(maxnodeheight[row], len(label))
maxnodewith[column] = max(maxnodewith[column], max(map(len, label)))
labels[a] = label
if a not in self.edges:
continue # e.g., root
parent = self.edges[a]
childcols[parent].add((row, column))
minchildcol[parent] = min(minchildcol.get(parent, column), column)
maxchildcol[parent] = max(maxchildcol.get(parent, column), column)
# bottom up level order traversal
for row in sorted(matrix, reverse=True):
noderows = [[''.center(maxnodewith[col]) for col in range(maxcol + 1)]
for _ in range(maxnodeheight[row])]
branchrow = [''.center(maxnodewith[col]) for col in range(maxcol + 1)]
for col in matrix[row]:
n = matrix[row][col]
node = self.nodes[n]
text = labels[n]
if isinstance(node, Tree):
# draw horizontal branch towards children for this node
if n in minchildcol and minchildcol[n] < maxchildcol[n]:
i, j = minchildcol[n], maxchildcol[n]
a, b = (maxnodewith[i] + 1) // 2 - 1, maxnodewith[j] // 2
branchrow[i] = ((' ' * a) + leftcorner).ljust(
maxnodewith[i], horzline)
branchrow[j] = (rightcorner + (' ' * b)).rjust(
maxnodewith[j], horzline)
for i in range(minchildcol[n] + 1, maxchildcol[n]):
if i == col and any(
a == i for _, a in childcols[n]):
line = cross
elif i == col:
line = bottom
elif any(a == i for _, a in childcols[n]):
line = tee
else:
line = horzline
branchrow[i] = line.center(maxnodewith[i], horzline)
else: # if n and n in minchildcol:
branchrow[col] = crosscell(branchrow[col])
text = [a.center(maxnodewith[col]) for a in text]
color = nodecolor if isinstance(node, Tree) else leafcolor
if isinstance(node, Tree) and node.label().startswith('-'):
color = funccolor
if html:
text = [escape(a) for a in text]
if n in self.highlight:
text = ['<font color=%s>%s</font>' % (
color, a) for a in text]
elif ansi and n in self.highlight:
text = ['\x1b[%d;1m%s\x1b[0m' % (
ANSICOLOR[color], a) for a in text]
for x in range(maxnodeheight[row]):
# draw vertical lines in partially filled multiline node
# labels, but only if it's not a frontier node.
noderows[x][col] = (text[x] if x < len(text)
else (vertline if childcols[n] else ' ').center(
maxnodewith[col], ' '))
# for each column, if there is a node below us which has a parent
# above us, draw a vertical branch in that column.
if row != max(matrix):
for n, (childrow, col) in self.coords.items():
if (n > 0 and
self.coords[self.edges[n]][0] < row < childrow):
branchrow[col] = crosscell(branchrow[col])
if col not in matrix[row]:
for noderow in noderows:
noderow[col] = crosscell(noderow[col])
branchrow = [a + ((a[-1] if a[-1] != ' ' else b[0]) * nodedist)
for a, b in zip(branchrow, branchrow[1:] + [' '])]
result.append(''.join(branchrow))
result.extend((' ' * nodedist).join(noderow)
for noderow in reversed(noderows))
return '\n'.join(reversed(result)) + '\n'
def svg(self, nodecolor='blue', leafcolor='red', funccolor='green'):
"""
:return: SVG representation of a tree.
"""
fontsize = 12
hscale = 40
vscale = 25
hstart = vstart = 20
width = max(col for _, col in self.coords.values())
height = max(row for row, _ in self.coords.values())
result = ['<svg version="1.1" xmlns="http://www.w3.org/2000/svg" '
'width="%dem" height="%dem" viewBox="%d %d %d %d">' % (
width * 3,
height * 2.5,
-hstart, -vstart,
width * hscale + 3 * hstart,
height * vscale + 3 * vstart)
]
children = defaultdict(set)
for n in self.nodes:
if n:
children[self.edges[n]].add(n)
# horizontal branches from nodes to children
for node in self.nodes:
if not children[node]:
continue
y, x = self.coords[node]
x *= hscale
y *= vscale
x += hstart
y += vstart + fontsize // 2
childx = [self.coords[c][1] for c in children[node]]
xmin = hstart + hscale * min(childx)
xmax = hstart + hscale * max(childx)
result.append(
'\t<polyline style="stroke:black; stroke-width:1; fill:none;" '
'points="%g,%g %g,%g" />' % (xmin, y, xmax, y))
result.append(
'\t<polyline style="stroke:black; stroke-width:1; fill:none;" '
'points="%g,%g %g,%g" />' % (x, y, x, y - fontsize // 3))
# vertical branches from children to parents
for child, parent in self.edges.items():
y, _ = self.coords[parent]
y *= vscale
y += vstart + fontsize // 2
childy, childx = self.coords[child]
childx *= hscale
childy *= vscale
childx += hstart
childy += vstart - fontsize
result += [
'\t<polyline style="stroke:white; stroke-width:10; fill:none;"'
' points="%g,%g %g,%g" />' % (childx, childy, childx, y + 5),
'\t<polyline style="stroke:black; stroke-width:1; fill:none;"'
' points="%g,%g %g,%g" />' % (childx, childy, childx, y),
]
# write nodes with coordinates
for n, (row, column) in self.coords.items():
node = self.nodes[n]
x = column * hscale + hstart
y = row * vscale + vstart
if n in self.highlight:
color = nodecolor if isinstance(node, Tree) else leafcolor
if isinstance(node, Tree) and node.label().startswith('-'):
color = funccolor
else:
color = 'black'
result += ['\t<text style="text-anchor: middle; fill: %s; '
'font-size: %dpx;" x="%g" y="%g">%s</text>' % (
color, fontsize, x, y,
escape(node.label() if isinstance(node, Tree)
else node))]
result += ['</svg>']
return '\n'.join(result)
def test():
"""Do some tree drawing tests."""
def print_tree(n, tree, sentence=None, ansi=True, **xargs):
print()
print('{0}: "{1}"'.format(n, ' '.join(sentence or tree.leaves())))
print(tree)
print()
drawtree = TreePrettyPrinter(tree, sentence)
try:
print(drawtree.text(unicodelines=ansi, ansi=ansi, **xargs))
except (UnicodeDecodeError, UnicodeEncodeError):
print(drawtree.text(unicodelines=False, ansi=False, **xargs))
from nltk.corpus import treebank
for n in [0, 1440, 1591, 2771, 2170]:
tree = treebank.parsed_sents()[n]
print_tree(n, tree, nodedist=2, maxwidth=8)
print()
print('ASCII version:')
print(TreePrettyPrinter(tree).text(nodedist=2))
tree = Tree.fromstring(
'(top (punct 8) (smain (noun 0) (verb 1) (inf (verb 5) (inf (verb 6) '
'(conj (inf (pp (prep 2) (np (det 3) (noun 4))) (verb 7)) (inf (verb 9)) '
'(vg 10) (inf (verb 11)))))) (punct 12))', read_leaf=int)
sentence = ('Ze had met haar moeder kunnen gaan winkelen ,'
' zwemmen of terrassen .'.split())
print_tree('Discontinuous tree', tree, sentence, nodedist=2)
__all__ = ['TreePrettyPrinter']
if __name__ == '__main__':
test()
|
[
"timclerico@gmail.com"
] |
timclerico@gmail.com
|
8cf8a773e5453f17c5d821de53ab612575e70022
|
80bce7b7cfffb29dc4464d5556476de621311945
|
/Hackerrank/maxToys.py
|
fa24460afbc0b105587abeccd8e80fac8a3609a8
|
[] |
no_license
|
dstadz/leetCode
|
7f00d8c00282519a1aaa5be5e8d26740d9bfe1a7
|
9fc41e52a17f9ab6343a30cc363a1325f0acd336
|
refs/heads/master
| 2021-03-20T01:28:58.432027
| 2020-12-03T17:58:13
| 2020-12-03T17:58:13
| 247,162,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the maximumToys function below.
def maximumToys(prices, k):
options = []
# filter out useless things
for p in prices:
if p < k:
options.append(p)
if sum(options) <= k:
return len(options)
tot = 0
cnt = 0
options.sort()
print(options)
while tot < k:
tot += options[cnt]
cnt += 1
print(cnt, tot)
return cnt - 1
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = input().split()
n = int(nk[0])
k = int(nk[1])
prices = list(map(int, input().rstrip().split()))
result = maximumToys(prices, k)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"danstad2012@gmail.com"
] |
danstad2012@gmail.com
|
d7aeba3928b28d5b42b67856008adb2434e7790d
|
86cfd03e0fe96246bc2ed3e6d196b1700fadb991
|
/simple_1d.py
|
99de3c50ce2896d93b374822361294d584c04bd1
|
[] |
no_license
|
ttagu99/speech_recognition
|
6911e87bb9d5d92a9451e2c213c87ce43f79a276
|
ef7626b5b3f0969367eb418caf153aa889df5e79
|
refs/heads/master
| 2020-04-25T03:48:30.636118
| 2019-02-25T11:03:23
| 2019-02-25T11:03:23
| 172,489,729
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,509
|
py
|
#-*- coding: utf-8 -*-
import os
import re
from glob import glob
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
os.environ["CUDA_VISIBLE_DEVICES"]="2"
POSSIBLE_LABELS = 'yes no up down left right on off stop go silence unknown'.split()
id2name = {i: name for i, name in enumerate(POSSIBLE_LABELS)}
name2id = {name: i for i, name in id2name.items()}
root_dir = 'I:/imgfolder/voice/'
import random
import tensorflow as tf
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import Input, Conv1D, AvgPool1D, MaxPooling1D, Activation, BatchNormalization, GlobalAveragePooling1D, GlobalMaxPool1D, concatenate, Dense, Dropout
from tensorflow.python.keras.optimizers import RMSprop, SGD
from tensorflow.python.keras.utils import to_categorical
from tensorflow.python.keras import layers
from tensorflow.python.keras._impl.keras import backend as K
from keras.utils.training_utils import multi_gpu_model
from keras.utils.generic_utils import get_custom_objects
from keras.models import Sequential
def identity_block_1d(input_tensor, kernel_size, filters, stage, block):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
conv_name_base = 'res_1d' + str(stage) + block + '_branch'
bn_name_base = 'bn_1d' + str(stage) + block + '_branch'
x = Conv1D(filters1, (1), name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization( name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv1D(filters2, kernel_size,
padding='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv1D(filters3, (1), name=conv_name_base + '2c')(x)
x = BatchNormalization(name=bn_name_base + '2c')(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block_1d(input_tensor, kernel_size, filters, stage, block, strides=(2)):
"""A block that has a conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
Note that from stage 3, the first conv layer at main path is with strides=(2,2)
And the shortcut should have strides=(2,2) as well
"""
filters1, filters2, filters3 = filters
conv_name_base = 'res_1d' + str(stage) + block + '_branch'
bn_name_base = 'bn_1d' + str(stage) + block + '_branch'
x = Conv1D(filters1, (1), strides=strides,
name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv1D(filters2, kernel_size, padding='same',
name=conv_name_base + '2b')(x)
x = BatchNormalization(name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv1D(filters3, (1), name=conv_name_base + '2c')(x)
x = BatchNormalization(name=bn_name_base + '2c')(x)
shortcut = Conv1D(filters3, (1), strides=strides,
name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(name=bn_name_base + '1')(shortcut)
x = layers.add([x, shortcut])
x = Activation('relu')(x)
return x
def load_data(data_dir):
""" Return 2 lists of tuples:
[(class_id, user_id, path), ...] for train
[(class_id, user_id, path), ...] for validation
"""
# Just a simple regexp for paths with three groups:
# prefix, label, user_id
pattern = re.compile("(.+\/)?(\w+)\/([^_]+)_.+wav")
all_files = glob(os.path.join(data_dir, 'train/audio/*/*wav'))
for idx, file in enumerate(all_files):
all_files[idx] = file.replace('\\','/')
with open(os.path.join(data_dir, 'train/validation_list.txt'), 'r') as fin:
validation_files = fin.readlines()
valset = set()
for entry in validation_files:
r = re.match(pattern, entry)
if r:
valset.add(r.group(3))
possible = set(POSSIBLE_LABELS)
train, val = [], []
for entry in all_files:
r = re.match(pattern, entry)
if r:
label, uid = r.group(2), r.group(3)
if label == '_background_noise_':
label = 'silence'
if label not in possible:
label = 'unknown'
label_id = name2id[label]
sample = (label, label_id, uid, entry)
if uid in valset:
val.append(sample)
else:
train.append(sample)
print('There are {} train and {} val samples'.format(len(train), len(val)))
columns_list = ['label', 'label_id', 'user_id', 'wav_file']
train_df = pd.DataFrame(train, columns = columns_list)
valid_df = pd.DataFrame(val, columns = columns_list)
return train_df, valid_df
train_df, valid_df = load_data(root_dir)
silence_files = train_df[train_df.label == 'silence']
# balancing imbalance data
train_df = train_df[train_df.label != 'silence']
train_unknown = train_df[train_df.label == 'unknown']
val_unknown = valid_df[valid_df.label == 'unknown']
used_train_df = train_df[train_df.label != 'unknown']
used_valid_df = valid_df[valid_df.label != 'unknown']
train_list = []
train_list.append(train_unknown)
for idx in range(18):
train_list.append(used_train_df)
for idx in range(6000):
train_list.append(silence_files)
train_df = pd.concat(train_list, ignore_index=True)
val_list = []
val_list.append(val_unknown)
for idx in range(16):
val_list.append(used_valid_df)
for idx in range(700):
val_list.append(silence_files)
valid_df = pd.concat(val_list, ignore_index=True)
train_pivot = train_df.pivot_table(index='label',aggfunc='count')
print('Train Data Check')
print(train_pivot)
valid_pivot = valid_df.pivot_table(index='label',aggfunc='count')
print('valid Data Check')
print(valid_pivot)
from scipy.io import wavfile
def normalize_wav(wav):
wav_mean = np.mean(wav)
wav = wav - wav_mean
wav_max = max(abs(wav))
if wav_max == 0 : # zero divide error
wav_max = 0.01
wav = wav.astype(np.float32)/wav_max
return wav
def read_wav_file(fname):
_, wav = wavfile.read(fname)
wav = normalize_wav(wav)
#wav = wav.astype(np.float32) / np.iinfo(np.int16).max
return wav
def pre_emphasis(wav):
pre_emphasis = np.random.uniform(0.95,0.97)
ret_wav = np.append(wav[0], wav[1:] - pre_emphasis * wav[:-1])
wav_max = max(abs(ret_wav))
ret_wav = ret_wav/wav_max
return ret_wav
def swish(x):
return (K.sigmoid(x) * x)
get_custom_objects().update({'swish': Activation(swish)})
silence_data = np.concatenate([read_wav_file(x) for x in silence_files.wav_file.values])
from scipy.signal import stft
def center_align_resize(wav, length):
if len(wav) > length: #center crop
i = int((len(wav) - length)/2)
wav = wav[i:(i+length)]
elif len(wav) < length: #silence add side
rem_len = length - len(wav)
i = int((len(silence_data) - rem_len)/2)
silence_part = silence_data[i:(i+length)]
j = int(rem_len/2)
silence_part_left = silence_part[0:j]
silence_part_right = silence_part[j:rem_len]
wav = np.concatenate([silence_part_left, wav, silence_part_right])
return wav
def process_wav_file(fname, phase, dim='1D', optlabel = None):
wav = read_wav_file(fname)
# time streching
if phase == 'TRAIN':
time_strech_flag = np.random.randint(2)
if time_strech_flag == 1:
ts_ratio = np.random.uniform(0.8,1.2)
wav = np.interp(np.arange(0, len(wav), ts_ratio), np.arange(0, len(wav)), wav)
L = 19200 # 1 sec
CL = 16000 # crop
if phase == 'TRAIN' :
if len(wav) > L:
i = np.random.randint(0, len(wav) - L)
wav = wav[i:(i+L)]
elif len(wav) < L:
rem_len = L - len(wav)
i = np.random.randint(0, len(silence_data) - rem_len)
silence_part = silence_data[i:(i+L)]
j = np.random.randint(0, rem_len)
silence_part_left = silence_part[0:j]
silence_part_right = silence_part[j:rem_len]
wav = np.concatenate([silence_part_left, wav, silence_part_right])
else:
center_align_resize(wav,L)
# crop
if phase == 'TRAIN':
i = np.random.randint(0,L-CL)
wav = wav[i:(i+CL)]
else:
i = int((L-CL)/2)
wav = wav[i:(i+CL)]
# nosise add
if phase == 'TRAIN':
noise_add_flag = np.random.randint(2)
if noise_add_flag == 1:
noise_ratio = np.random.uniform(0.0,0.5)
i = np.random.randint(0, len(silence_data) - CL)
silence_part = silence_data[i:(i+CL)]
org_max = max(wav)
silence_max = max(silence_part)
silence_part = silence_part * (org_max/silence_max)
wav = wav*(1.0-noise_ratio) + silence_part * noise_ratio
if phase == 'TRAIN':
white_noise_add_flag = np.random.randint(2)
if white_noise_add_flag == 1:
wn_ratio = np.random.uniform(0.0,0.1)
wn = np.random.randn(len(wav))
wav = wav + wn_ratio*wn
#if phase == 'TRAIN':
# pre_emphasis_flag = np.random.randint(2)
# if pre_emphasis_flag == 1:
# wav = pre_emphasis(wav)
# crop
if phase == 'TRAIN' and optlabel != None and optlabel != 'silence':
add_audio_flag = np.random.randint(2)
if add_audio_flag == 1:
type = np.random.randint(3)
if type == 0 : # equal label
if optlabel == 'unknown': # unkown + unknown
add_wav_file = train_unknown.sample().wav_file.values[0]
add_wav = read_wav_file(add_wav_file)
add_wav = center_align_resize(add_wav,CL)
wav = wav + add_wav
else:
add_wav_file = used_train_df[used_train_df.label == optlabel].sample().wav_file.values[0] # used + equal used
add_wav = read_wav_file(add_wav_file)
add_wav = center_align_resize(add_wav,CL)
wav = wav + add_wav
if type ==1 : # used but un equal label -> unkown or ratio depend?
pass
if type ==2: # add unkown label -> equal label used + unkown ratio
add_wav_file = train_unknown.sample().wav_file.values[0]
add_wav = read_wav_file(add_wav_file)
add_wav = center_align_resize(add_wav,CL)
mix_ratio = np.random.uniform(0.1,0.4)
wav = wav *(1-mix_ratio) + add_wav * mix_ratio
wav = normalize_wav(wav)
#return np.stack([phase, amp], axis = 2)
if dim=='1D':
ret_wav = np.reshape(wav,(CL,1))
return ret_wav
elif dim == '2D':
specgram = stft(wav, CL, nperseg = 400, noverlap = 240, nfft = 512, padded = False, boundary = None)
phase = np.angle(specgram[2]) / np.pi
amp = np.log1p(np.abs(specgram[2]))
return np.stack([phase, amp], axis = 2)
else : # combi
ret_wav = np.reshape(wav,(CL,1))
specgram = stft(wav, CL, nperseg = 400, noverlap = 240, nfft = 512, padded = False, boundary = None)
phase = np.angle(specgram[2]) / np.pi
amp = np.log1p(np.abs(specgram[2]))
return np.stack([phase, amp], axis = 2), ret_wav
def train_generator(train_batch_size):
while True:
this_train = train_df.groupby('label_id').apply(lambda x: x.sample(n = 2000))
shuffled_ids = random.sample(range(this_train.shape[0]), this_train.shape[0])
for start in range(0, len(shuffled_ids), train_batch_size):
x_batch = []
y_batch = []
end = min(start + train_batch_size, len(shuffled_ids))
i_train_batch = shuffled_ids[start:end]
for i in i_train_batch:
x_batch.append(process_wav_file(this_train.wav_file.values[i],phase='TRAIN', optlabel =this_train.label.values[i]))
y_batch.append(this_train.label_id.values[i])
x_batch = np.array(x_batch)
y_batch = to_categorical(y_batch, num_classes = len(POSSIBLE_LABELS))
yield x_batch, y_batch
def valid_generator(val_batch_size):
while True:
ids = list(range(valid_df.shape[0]))
for start in range(0, len(ids), val_batch_size):
x_batch = []
y_batch = []
end = min(start + val_batch_size, len(ids))
i_val_batch = ids[start:end]
for i in i_val_batch:
x_batch.append(process_wav_file(valid_df.wav_file.values[i],phase='TRAIN'))
y_batch.append(valid_df.label_id.values[i])
x_batch = np.array(x_batch)
y_batch = to_categorical(y_batch, num_classes = len(POSSIBLE_LABELS))
yield x_batch, y_batch
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
x_in_1d = Input(shape = (16000,1))
x_1d = BatchNormalization(name = 'batchnormal_1d_in')(x_in_1d)
for i in range(9):
name = 'step'+str(i)
x_1d = Conv1D(8*(2 ** i), (3),padding = 'same', name = 'conv'+name+'_1')(x_1d)
x_1d = BatchNormalization(name = 'batch'+name+'_1')(x_1d)
x_1d = Activation('relu')(x_1d)
x_1d = Conv1D(8*(2 ** i), (3),padding = 'same', name = 'conv'+name+'_2')(x_1d)
x_1d = BatchNormalization(name = 'batch'+name+'_2')(x_1d)
x_1d = Activation('relu')(x_1d)
x_1d = MaxPooling1D((2), padding='same')(x_1d)
x_1d = Conv1D(1024, (1),name='last1024')(x_1d)
x_1d_branch_1 = GlobalAveragePooling1D()(x_1d)
x_1d_branch_2 = GlobalMaxPool1D()(x_1d)
x_1d = concatenate([x_1d_branch_1, x_1d_branch_2])
x_1d = Dense(1024, activation = 'relu', name= 'dense1024')(x_1d)
x_1d = Dropout(0.2)(x_1d)
x_1d = Dense(len(POSSIBLE_LABELS), activation = 'softmax',name='cls_1d')(x_1d)
fine_tune_weight = '1dcnn_last1024_noiseadd_ts_mul_balance_inputnormal_submean_abs_whitenadd_sgd_name.hdf5'
#weight_name = '1dcnn_last1024_noiseadd_ts_7res_allcon_balance_inputnormal_submean_abs_whitenadd_dropall_sgd.hdf5'
weight_name = 'simple_1d_mixset.hdf5'
# the results from the gradient updates on the CPU
#with tf.device("/cpu:0"):
model = Model(inputs = x_in_1d, outputs = x_1d)
# FINE TUNE
#model.load_weights(root_dir + 'weights/'+ fine_tune_weight, by_name=True)
#model = multi_gpu_model(model, gpus=2)
#opt = SGD(lr = 0.01, momentum = 0.9, decay = 0.000001)
#opt = RMSprop(lr = 0.00001)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
from keras_tqdm import TQDMCallback
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, TensorBoard
batch_size = 128
callbacks = [EarlyStopping(monitor='val_loss',
patience=7,
verbose=1,
min_delta=0.00001,
mode='min'),
ReduceLROnPlateau(monitor='val_loss',
factor=0.1,
patience=4,
verbose=1,
epsilon=0.0001,
mode='min'),
ModelCheckpoint(monitor='val_loss',
filepath=root_dir + 'weights/' + weight_name,
save_best_only=True,
save_weights_only=True,
mode='min') ,
TQDMCallback(),
TensorBoard(log_dir=root_dir+ weight_name.split('.')[0], histogram_freq=0, write_graph=True, write_images=True)
]
history = model.fit_generator(generator=train_generator(batch_size),
steps_per_epoch=int((train_df.shape[0]/batch_size)/18),#344,
epochs=50,
verbose=2,
callbacks=callbacks,
validation_data=valid_generator(batch_size),
validation_steps=int(np.ceil(valid_df.shape[0]/batch_size)))
model.load_weights(root_dir + 'weights/'+ weight_name)
test_paths = glob(os.path.join(root_dir , 'test/audio/*wav'))
def get_test_set_1d(path, tta=1):
if tta ==1:
x_batch = []
x_batch.append(process_wav_file(path,phase='TEST',dim='1D'))
x_batch = np.array(x_batch)
return x_batch
def get_test_set_2d(path, tta=1):
if tta ==1:
x_batch = []
x_batch.append(process_wav_file(path,phase='TEST',dim='2D'))
x_batch = np.array(x_batch)
return x_batch
def get_test_set_combi(path, tta=1):
if tta ==1:
x_batch = []
x_batch_1d = []
x2d, x1d = process_wav_file(path,phase='TEST',dim='combi')
x_batch.append(x2d)
x_batch_1d.append(x1d)
x_batch = np.array(x_batch)
x_batch_1d = np.array(x_batch_1d)
return [x_batch, x_batch_1d]
subfile = open(root_dir + weight_name +'_sub'+ '.csv', 'w')
probfile = open(root_dir + weight_name +'_prob'+ '.csv', 'w')
subfile.write('fname,label\n')
probfile.write('fname,yes,no,up,down,left,right,on,off,stop,go,silence,unknown\n')
for idx, path in enumerate(test_paths):
fname = path.split('\\')[-1]
probs = model.predict(get_test_set_1d(path),batch_size=1)
label = id2name[np.argmax(probs)]
subfile.write('{},{}\n'.format(fname,label))
probfile.write(fname+',')
print (str(idx) +'/' + str(len(test_paths)))
for p, prob in enumerate(probs[0]):
probfile.write(str(prob))
if p == 11:
probfile.write('\n')
else:
probfile.write(',')
|
[
"ttagu99@gmail.com"
] |
ttagu99@gmail.com
|
db6fd0c315542a0507e1cf52c55d26f0f0669f12
|
7f01d72976f42c0165bf4cc6dee935a31a04cdb6
|
/src/config/settings/staging.py
|
dad01fd5ec1547d9f86181147e83dc8aca4edbe8
|
[] |
no_license
|
aasilbek/vin-decode
|
3263e98e6ed8966375377050842f7f4a00acaab0
|
266ebcd0d4baeb2b13176d4c47f7c227babd93be
|
refs/heads/main
| 2023-07-26T03:41:47.182336
| 2021-09-06T06:30:40
| 2021-09-06T06:30:40
| 403,267,819
| 0
| 0
| null | 2021-09-05T20:39:39
| 2021-09-05T09:34:45
|
Python
|
UTF-8
|
Python
| false
| false
| 65
|
py
|
from .base import * # noqa
ALLOWED_HOSTS = ["*"]
DEBUG = False
|
[
"asilbek@novalab.uz"
] |
asilbek@novalab.uz
|
9dfdd3b92ece26371c870d84de248f5f7116abc9
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02695/s405471285.py
|
c0ad507ae3a6ab6de07f9a4d9c26cf42ebb312ac
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 449
|
py
|
def resolve():
N, M, Q = list(map(int, input().split()))
Q = [list(map(int, input().split())) for _ in range(Q)]
import itertools
maxpoint = 0
for seq in itertools.combinations_with_replacement(range(1, M+1), N):
point = 0
for a, b, c, d in Q:
if seq[b-1] - seq[a-1] == c:
point += d
maxpoint = max(maxpoint, point)
print(maxpoint)
if '__main__' == __name__:
resolve()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
62489cf6bd1db233798d3235efa6a361b6c9fcfc
|
a02f2373e00f2d02ae3c3103467f8b0f950296e9
|
/MapReduce/mapper.py
|
1c5edab7b706d307e869bc4e4ebe986e083a9313
|
[] |
no_license
|
hayltonbernardes22/Data_analysis_study
|
3621dc6f92c7ebcd447f5f0392d81aa5e92fbb72
|
aec88f4f580dcf620736b643a93ab0d9ce32ad39
|
refs/heads/master
| 2023-08-27T19:08:50.073282
| 2021-11-05T20:07:53
| 2021-11-05T20:07:53
| 299,105,977
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
"""mapper.py"""
import sys
# input comes from STDIN (standard input)
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# split the line into words
words = line.split()
# increase counters
for word in words:
# write the results to STDOUT (standard output);
# what we output here will be the input for the
# Reduce step, i.e. the input for reducer.py
#
# tab-delimited; the trivial word count is 1
print '%s\t%s' % (word, 1)
|
[
"noreply@github.com"
] |
hayltonbernardes22.noreply@github.com
|
633e7c045409b3b10cab965303d655ecbd4338e7
|
f47cd722e457c5ace3ae11717c6535b06e91a2b8
|
/Chaper02/U02_Ex09_ConvertFtoC.py
|
fffaab6b5306c5d5d545ddd35a75c837aee589d2
|
[] |
no_license
|
sebastians22/COOP2018
|
659cd1113cd5e2062866c1833768811dbd413c3b
|
c8ce2a0c24b8065e2ac4a07148b8dadca7d5d173
|
refs/heads/master
| 2020-03-28T06:36:50.829082
| 2019-03-01T05:27:16
| 2019-03-01T05:27:16
| 147,847,342
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
# U02_Ex09_ConvertFtoC.py
#
# Author: Sebastian Schaefer
# Course: Coding for OOP
# Section: A2
# Date: 04 Sep 2018
# IDE: PyCharm
#
# Assignment Info
# Exercise: 08
# Source: Python Programming
# Chapter: 02
#
# Program Description
#
# This program converts F to C
#
#
#
# Algorithm (pseudocode)
# Print program introduction
# Get C from user and assign to fahrenheitption
# Calculate formula
# Print F
#
def main():
print("Click on the bottom box, and input a fahrenheit degrees. After you input a number press enter, and the celsius degress will appear")
# Print program introduction
print("This program converts temperature from fahrenheit to celsius")
# Get °C from user and assign to fahrenheit
fahrenheit = eval(input("Enter °F to convert: "))
celsius = (fahrenheit - 32) * 5/9
# Calculate °F using 9/5 * °C + 32 and assign to fahrenheit main()
# Print °F
print(fahrenheit, "°F is equivalent to ", celsius, "°C")
print("re run the program do use it again")
main()
|
[
"sebastian.s22@student.parishepiscopal.org"
] |
sebastian.s22@student.parishepiscopal.org
|
2c8d42fdfbbb3bb1764d33bc816b9153cb402755
|
a2d38faaadf89873271fc43c9f44e4e4299a9844
|
/BE/app/audio_prediction_app.py
|
cc07f09baffa0a02a7ab996823031a72dbba8892
|
[] |
no_license
|
fsoft-ailab/aicovidvn-web
|
6bc85c378ed984edfb928a33ad23aa4e229cec13
|
f626eb168ef61cf1bfdf986b5c16937015b555ef
|
refs/heads/main
| 2023-06-25T09:35:48.155538
| 2021-07-31T10:07:02
| 2021-07-31T10:07:02
| 387,201,530
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,622
|
py
|
import json
import time
import base64
from flask import Blueprint, request, jsonify
from common import url_constant, param_constant, constant
from werkzeug.exceptions import BadRequest, InternalServerError
from utils import log_service, utils, postgres_util, s3_util
from services.audio_collection_services import AudioCollectionServices
from services.audio_prediction_services import AudioPredCNNService
from services.audio_checking_services import AudioCheckServices
from instance import environment
from multiprocessing import Queue
mod = Blueprint('audio_prediction_app', __name__)
# Store flag
q = Queue()
@mod.route(url_constant.CHECK_EMAIL, methods=['POST', 'GET'])
def check_email():
try:
is_base64_email = False
data = request.get_data()
my_json = data.decode('utf8')
json_data = json.loads(my_json)
email = json_data["email"]
if '@' not in email:
is_base64_email = True
if not is_base64_email:
email = email.lower()
db = postgres_util.PostgresDB()
query = "SELECT * FROM collections WHERE email= %s"
cursor = db.execute_query_with_data(query, data=(email,))
data = cursor.fetchone()
if data is None:
response = {
'email': email,
'status_check': 'not exist',
'health_status': 'None'
}
else:
response = {
'email': email,
'status_check': 'exist',
'health_status': data[7],
'created_time': data[8].timestamp() * 1000,
'updated_time': data[9].timestamp() * 1000
}
return jsonify(response)
except:
return jsonify({
'check status': {
'status_code': 500, 'message': 'Server internal error'
}})
@mod.route(url_constant.AUDIO_PREDICTION_VGG16_V1, methods=['POST'])
def audio_prediction_vgg16_v1():
submit_token = request.args.get(param_constant.PARAM_SUBMIT_ID)
submit_time = request.args.get(param_constant.PARAM_SUBMIT_TIME)
cough_sound = request.files.get(param_constant.PARAM_COUGH_SOUND)
mouth_sound = request.files.get(param_constant.PARAM_MOUTH_SOUND)
nose_sound = request.files.get(param_constant.PARAM_NOSE_SOUND)
email = request.form.get(param_constant.PARAM_EMAIL)
info = request.form.get(param_constant.PARAM_INFO)
if cough_sound is None and mouth_sound is None and nose_sound is None:
raise BadRequest()
# Collect data
collect_ser = AudioCollectionServices()
submit_id = collect_ser.collect(info, cough_sound, mouth_sound, nose_sound)
base_dir = "{}/{}".format(constant.TMP_DIR, submit_id)
base_token_dir = "{}/{}".format(constant.RESULT_DIR, submit_token)
try:
# Create directory if not exist
utils.create_directory(base_dir)
audio_service = AudioPredCNNService(max_period=10, submit_id=submit_id, submit_time=submit_time)
s3_cough_dir = None
s3_mouth_dir = None
s3_nose_dir = None
cough_predict_result = ''
mouth_predict_result = ''
nose_predict_result = ''
if cough_sound is not None:
cough_sound_dir = "{}/{}_original.wav".format(base_dir, "cough")
cough_save_dir = f"{base_token_dir}/{constant.COUGH}_original.wav"
cough_predict_result = audio_service.predict(cough_sound_dir, type="cough")
s3_util.upload_file(cough_sound_dir, cough_save_dir)
s3_cough_dir = s3_util.generate_url(cough_save_dir)
if mouth_sound is not None:
mouth_sound_dir = "{}/{}_original.wav".format(base_dir, "mouth")
mouth_save_dir = f"{base_token_dir}/{constant.MOUTH}_original.wav"
mouth_predict_result = audio_service.predict(mouth_sound_dir, type="mouth")
s3_util.upload_file(mouth_sound_dir, mouth_save_dir)
s3_mouth_dir = s3_util.generate_url(mouth_save_dir)
if nose_sound is not None:
nose_sound_dir = "{}/{}_original.wav".format(base_dir, "nose")
nose_save_dir = f"{base_token_dir}/{constant.NOSE}_original.wav"
nose_predict_result = audio_service.predict(nose_sound_dir, type="nose")
s3_util.upload_file(nose_sound_dir, nose_save_dir)
s3_nose_dir = s3_util.generate_url(nose_save_dir)
result = json.dumps({'cough_result': cough_predict_result,
'mouth_result': mouth_predict_result,
'nose_result': nose_predict_result})
db = postgres_util.PostgresDB()
query = "INSERT INTO results(id,cough,breathe_nose,breathe_mouth,results,email,info) VALUES (%s,%s,%s,%s,%s,%s,%s)"
db.execute_query_with_data(query,
data=(
submit_token, s3_cough_dir, s3_nose_dir, s3_mouth_dir, str(result), email, info))
db.close_connection()
utils.remove_folder(base_dir)
return result
except:
utils.remove_folder(base_dir)
return jsonify({
'check status': {
'status_code': 500, 'message': 'Server internal error'
}})
@mod.route(url_constant.AUDIO_VISUALIZATION_VGG16_V1, methods=['GET'])
def audio_visualization_vgg16_v1():
try:
# Handle multi request
if q.qsize() > 10:
return json.dumps({'server_busy': True})
q.put(constant.LOCK)
submit_id = request.args.get(param_constant.PARAM_SUBMIT_ID)
submit_time = request.args.get(param_constant.PARAM_SUBMIT_TIME)
base_dir = "{}/{}".format(constant.TMP_DIR, submit_id)
# Create directory if not exist
utils.create_directory(base_dir)
cough_sound_dir = "{}/{}_original.wav".format(base_dir, "cough")
cough_save_dir = cough_sound_dir.replace(constant.TMP_DIR, constant.RESULT_DIR)
mouth_sound_dir = "{}/{}_original.wav".format(base_dir, "mouth")
mouth_save_dir = mouth_sound_dir.replace(constant.TMP_DIR, constant.RESULT_DIR)
nose_sound_dir = "{}/{}_original.wav".format(base_dir, "nose")
nose_save_dir = nose_sound_dir.replace(constant.TMP_DIR, constant.RESULT_DIR)
if submit_id is None:
raise BadRequest()
is_cough_existed = s3_util.download_file(cough_save_dir, cough_sound_dir)
is_mouth_existed = s3_util.download_file(mouth_save_dir, mouth_sound_dir)
is_nose_existed = s3_util.download_file(nose_save_dir, nose_sound_dir)
audio_service = AudioPredCNNService(max_period=10, submit_id=submit_id, submit_time=submit_time)
feature_cough_url = None
feature_nose_url = None
feature_mouth_url = None
if is_cough_existed:
feature_cough_image_dir = audio_service.visualize(cough_sound_dir, dest="{}/{}.jpg".format(base_dir, "cough"))
s3_util.upload_file(feature_cough_image_dir, feature_cough_image_dir,
extra_args=constant.S3_IMAGE_EXTRA_PARAM)
feature_cough_url = s3_util.generate_url(feature_cough_image_dir)
if is_mouth_existed:
feature_mouth_image_dir = audio_service.visualize(mouth_sound_dir, dest="{}/{}.jpg".format(base_dir, "mouth"))
s3_util.upload_file(feature_mouth_image_dir, feature_mouth_image_dir,
extra_args=constant.S3_IMAGE_EXTRA_PARAM)
feature_mouth_url = s3_util.generate_url(feature_mouth_image_dir)
if is_nose_existed:
feature_nose_image_dir = audio_service.visualize(nose_sound_dir, dest="{}/{}.jpg".format(base_dir, "nose"))
s3_util.upload_file(feature_nose_image_dir, feature_nose_image_dir,
extra_args=constant.S3_IMAGE_EXTRA_PARAM)
feature_nose_url = s3_util.generate_url(feature_nose_image_dir)
db = postgres_util.PostgresDB()
query = "UPDATE results SET cough_img= %s, breathe_nose_img= %s, breathe_mouth_img= %s WHERE id = %s"
db.execute_query_with_data(query, data=(feature_cough_url, feature_nose_url, feature_mouth_url, submit_id))
db.close_connection()
utils.remove_folder(base_dir)
q.get()
feature_cough_url_presined = s3_util.get_presigned_url_from_original_url(feature_cough_url)
feature_mouth_url_presined = s3_util.get_presigned_url_from_original_url(feature_mouth_url)
feature_nose_url_presined = s3_util.get_presigned_url_from_original_url(feature_nose_url)
return json.dumps({'cough_feature_url': feature_cough_url_presined,
'mouth_feature_url': feature_mouth_url_presined,
'nose_feature_url': feature_nose_url_presined})
except Exception as e:
utils.remove_folder(base_dir)
q.get()
return jsonify({
'check status': {
'status_code': 500, 'message': 'Server internal error'
}})
@mod.route(url_constant.AUDIO_GET_RESULT, methods=['GET'])
def get_results():
submit_id = request.args.get(param_constant.PARAM_SUBMIT_ID)
if submit_id is None:
raise BadRequest()
db = postgres_util.PostgresDB()
query = "SELECT id, breathe_nose, breathe_mouth, cough, results, cough_img, breathe_nose_img, breathe_mouth_img " \
"FROM results " \
"WHERE id= %s"
cursor = db.execute_query(query, (submit_id,))
data = cursor.fetchone()
db.close_connection()
if data is None:
raise BadRequest()
result = {}
result["id"] = data[0]
result["nose"] = s3_util.get_presigned_url_from_original_url(data[1])
result["mouth"] = s3_util.get_presigned_url_from_original_url(data[2])
result["cough"] = s3_util.get_presigned_url_from_original_url(data[3])
result["results"] = json.loads(data[4])
if data[5] is not None:
result['cough_feature_url'] = s3_util.get_presigned_url_from_original_url(data[5])
if data[6] is not None:
result['nose_feature_url'] = s3_util.get_presigned_url_from_original_url(data[6])
if data[7] is not None:
result['mouth_feature_url'] = s3_util.get_presigned_url_from_original_url(data[7])
return json.dumps(result)
@mod.route(url_constant.ADD_FEEDBACK, methods=['GET'])
def get_feedback():
submit_id = request.args.get(param_constant.PARAM_SUSBMIT_ID)
if submit_id is None:
raise BadRequest()
db = postgres_util.PostgresDB()
query = "SELECT id, type FROM feedbacks WHERE id= %s"
cursor = db.execute_query(query, data=(submit_id))
data = cursor.fetchone()
db.close_connection()
return ""
@mod.route(url_constant.ADD_FEEDBACK, methods=['POST'])
def add_feedback():
submit_id = request.form.get(param_constant.PARAM_SUBMIT_ID)
type = request.form.get(param_constant.PARAM_TYPE)
if submit_id is None or type is None:
raise BadRequest()
db = postgres_util.PostgresDB()
query = "SELECT id, type FROM feedbacks WHERE id= %s"
cursor = db.execute_query_with_data(query, data=(submit_id,))
data = cursor.fetchone()
if data is None or data[0] is None:
query = "INSERT INTO feedbacks(id, type) VALUES (%s,%s)"
db.execute_query_with_data(query, data=(submit_id, type,))
else:
query = "UPDATE feedbacks SET type= %s WHERE id = %s"
db.execute_query_with_data(query, data=(type, submit_id,))
db.close_connection()
return ""
@mod.route(url_constant.CHECK_AUDIO, methods=['POST'])
def check_audio():
audio = request.files.get(param_constant.PARAM_AUDIO)
millis = int(round(time.time() * 1000))
base_dir = "{}/{}".format(constant.TMP_DIR, f'sound-checking-{millis}')
if audio is None:
raise BadRequest()
try:
# Create directory if not exist
utils.create_directory(base_dir)
sound_dir = "{}/{}_original.wav".format(base_dir, "audio")
audio.save(sound_dir)
check_services = AudioCheckServices(max_noise_period_weight=environment.NOISE_DURATION_WEIGHT_FILTER,
max_period=environment.LENGTH_FILTER)
result = check_services.check(sound_dir, fix_length=False)
utils.remove_folder(base_dir)
return result
except Exception as e:
utils.remove_folder(base_dir)
raise InternalServerError(description=str(e))
@mod.route(url_constant.HEALTH_CHECK, methods=['GET'])
def health_check():
log_service.info("health_check() Start")
return "ok"
|
[
"noreply@github.com"
] |
fsoft-ailab.noreply@github.com
|
becedd2878f6e3858bad94afb54decbd5501c9df
|
4360f645e23c0d4077769b2085a197751b509804
|
/Python/Card_Deck_Player_old.py
|
cd21aa9067cbcd0a34b20cf2a784f2f6e87194a0
|
[] |
no_license
|
TeresaRem/CodingDojo
|
4d003916f0040d9b6bcb1c52759611536f2974e5
|
6be2f6ddb32a6c877cb6ce2a629b1a46708bb6a6
|
refs/heads/master
| 2021-06-12T00:05:18.062462
| 2017-02-06T23:56:58
| 2017-02-06T23:56:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,549
|
py
|
import random
class Card(object):
def __init__(self, suit, number):
self.suit=suit
self.number=number
self.visib=False
def flip(self):
if self.visib==True:
self.visib=False
else:
self.visib=True
return self
def displayCard(self):
print self.suit, str(self.number)
return ""
class Deck(object):
def __init__(self):
self.cards = []
self.build()
def build(self):
suits = ['s','h','d','c']
types = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
for card_suit in suits:
for card_value in types:
self.cards.append(Card(card_suit, card_value))
# print card_suit, card_value
return self
def shuffle(self):
random.shuffle(self.cards)
return self
def deal(self, player):
player.hand.append(self.cards[-1])
print type(player.hand)
for i in player.hand[i]:
# print player.hand
self.cards.pop()
return self
class Player(object):
def __init__(self,name):
self.name = name
self.hand = []
def draw(self,deck):
self.hand = deck.deal(self)
return self
def discard(self):
self.hand.pop()
print type(self.hand)
return self
Deck1 = Deck()
Deck1.shuffle() #Deck.shuffle()
ricky = Player('ricky')
Deck1.deal(ricky).deal(ricky)
# ricky.draw(Deck1)
|
[
"wapniarski@gmail.com"
] |
wapniarski@gmail.com
|
46f00f42a71b5e55e83bda0564202507b68eb03d
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayBossFncGfaccenterConsolidationAcceptResponse.py
|
dc10dbd7e88c59b7deaaec1c29288feca42e302d
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,442
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayBossFncGfaccenterConsolidationAcceptResponse(AlipayResponse):
def __init__(self):
super(AlipayBossFncGfaccenterConsolidationAcceptResponse, self).__init__()
self._consolidation_success = None
self._need_retry = None
self._result_msg = None
@property
def consolidation_success(self):
return self._consolidation_success
@consolidation_success.setter
def consolidation_success(self, value):
self._consolidation_success = value
@property
def need_retry(self):
return self._need_retry
@need_retry.setter
def need_retry(self, value):
self._need_retry = value
@property
def result_msg(self):
return self._result_msg
@result_msg.setter
def result_msg(self, value):
self._result_msg = value
def parse_response_content(self, response_content):
response = super(AlipayBossFncGfaccenterConsolidationAcceptResponse, self).parse_response_content(response_content)
if 'consolidation_success' in response:
self.consolidation_success = response['consolidation_success']
if 'need_retry' in response:
self.need_retry = response['need_retry']
if 'result_msg' in response:
self.result_msg = response['result_msg']
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
5d835d442c8cea1929273cb83ab37aed722fd719
|
a8b37bd399dd0bad27d3abd386ace85a6b70ef28
|
/airbyte-cdk/python/airbyte_cdk/sources/file_based/discovery_policy/default_discovery_policy.py
|
56bd19d01f16e48e289dd511f1b8cb00beda50fd
|
[
"LicenseRef-scancode-free-unknown",
"MIT",
"Elastic-2.0"
] |
permissive
|
thomas-vl/airbyte
|
5da2ba9d189ba0b202feb952cadfb550c5050871
|
258a8eb683634a9f9b7821c9a92d1b70c5389a10
|
refs/heads/master
| 2023-09-01T17:49:23.761569
| 2023-08-25T13:13:11
| 2023-08-25T13:13:11
| 327,604,451
| 1
| 0
|
MIT
| 2021-01-07T12:24:20
| 2021-01-07T12:24:19
| null |
UTF-8
|
Python
| false
| false
| 701
|
py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from airbyte_cdk.sources.file_based.discovery_policy.abstract_discovery_policy import AbstractDiscoveryPolicy
DEFAULT_N_CONCURRENT_REQUESTS = 10
DEFAULT_MAX_N_FILES_FOR_STREAM_SCHEMA_INFERENCE = 10
class DefaultDiscoveryPolicy(AbstractDiscoveryPolicy):
"""
Default number of concurrent requests to send to the source on discover, and number
of files to use for schema inference.
"""
@property
def n_concurrent_requests(self) -> int:
return DEFAULT_N_CONCURRENT_REQUESTS
@property
def max_n_files_for_schema_inference(self) -> int:
return DEFAULT_MAX_N_FILES_FOR_STREAM_SCHEMA_INFERENCE
|
[
"noreply@github.com"
] |
thomas-vl.noreply@github.com
|
b30e7d624446c7260f0f799485ef0ad8f553a8ee
|
e8c3e7964f4b448e94481704d29508e9d6bd1798
|
/CommonTools/test/buildWZworkspace_forZZ_1sigma_allErrOK_withEWKSyst_signalShapeFix_f5z_ifLessThen0SetTo0_2604Files_SMaTGCfit_QCDscaleFix.py
|
64f483f5c7da078f04405ce67dfa038144d252d5
|
[] |
no_license
|
senka/ZZ_2l2nu_4l_CMS_combination
|
1401f81dc255ea0ae4a0a5c73b022670849a1152
|
197655fa2143ffe1665cd7a1c6e5af2a2f48e57a
|
refs/heads/master
| 2021-01-13T02:06:27.885996
| 2014-08-09T16:15:14
| 2014-08-09T16:15:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,308
|
py
|
import pyroot_logon
import limits
import os
import sys
from array import *
from ROOT import *
from optparse import OptionParser
from ConfigParser import SafeConfigParser
def isItCorrelated(name):
print '\t ----> isItCorrelated: testing ',name
if ('_eff_b' in name or '_les' in name or '_pu' in name or '_umet' in name or '_res_j' in name or '_scale_j' in name or '_ewk' in name or '_QCD_ACC_JVeto' in name ):
print '-> true'
return True
else:
print '-> false'
return False
parser = OptionParser(description="%prog : A RooStats Implementation of Anomalous Triple Gauge Coupling Analysis.",
usage="buildWZworkspace --config=example_config.cfg")
cfgparse = SafeConfigParser()
parser.add_option("--config",dest="config",help="The name of the input configuration file.")
(options,args) = parser.parse_args()
miss_options = False
if options.config is None:
print 'Need to specify --config'
miss_options=True
if miss_options:
exit(1)
cfgparse.read(options.config)
options.config = cfgparse # put the parsed config file into our options
cfg = options.config
#lType = sys.argv[1]
#codename = ""
#planeID = sys.argv[2]
norm_sig_sm = -1
norm_sig_sm_up = -1
norm_sig_sm_down = -1
norm_bkg = -1
norm_obs = -1
fit_sections = cfg.sections()
fit_sections.remove('Global') #don't need to iterate over the global configuration
basepath = '%s/src/CombinedEWKAnalysis/CommonTools/data/WV_semileptonic'%os.environ['CMSSW_BASE']
for section in fit_sections:
codename = section
lType = codename
print '\n\tlType=',lType
f = TFile('%s/%s_boosted.root'%(basepath,codename))
Nbkg = cfg.get(codename,'Nbkg')
print "Nbkg= ",Nbkg
Nbkg_int=int(Nbkg)
bkg_name = []
for i in range(1,Nbkg_int+1):
bkg_name.append(cfg.get(codename,'bkg%i_name'%i))
background = []
for i in range(0,Nbkg_int):
background.append(f.Get(bkg_name[i]))
print 'backgrounds= ',background
background_shapeSyst = []
for i in range(0,Nbkg_int):
background_shapeSyst.append([])
for name in cfg.get(codename,'bkg%i_shape_syst'%(i+1)).split(','):
background_shapeSyst[i].append(name)
background_backshapeUp = []
background_backshapeDown = []
for j in range(0,Nbkg_int):
background_backshapeUp.append([])
background_backshapeDown.append([])
for i in range(0,len(background_shapeSyst[j])):
print ' bkg shape syst: ',background_shapeSyst[j]
print ' getting bkgUp ','%sUp'%background_shapeSyst[j][i]
background_backshapeUp[j].append(f.Get('%sUp'%background_shapeSyst[j][i]))
background_backshapeDown[j].append(f.Get('%sDown'%background_shapeSyst[j][i]))
data_obs = f.Get('data_obs')
# diboson = f.Get('diboson')
diboson = f.Get('zz2l2nu')
doSignalShape_unc=False
cfg_items=cfg.items(codename)
for cfg_item in cfg_items:
if 'signal_shape_syst' in cfg_item:
doSignalShape_unc = True
print 'doSignalShape_unc=',doSignalShape_unc
if (doSignalShape_unc):
diboson_up = {}
diboson_down = {}
norm_sig_sm_up = {}
norm_sig_sm_down = {}
signal_shapeSyst = [string(i) for i in cfg.get(codename,'signal_shape_syst').split(',')]
for i in range(0,len(signal_shapeSyst)):
print ' signal shape syst: ',signal_shapeSyst[i]
diboson_up[i] = f.Get('%sUp'%signal_shapeSyst[i])
diboson_down[i] = f.Get('%sDown'%signal_shapeSyst[i])
norm_sig_sm_up[i] = diboson_up[i].Integral()
norm_sig_sm_down[i] = diboson_down[i].Integral()
norm_sig_sm = diboson.Integral()
norm_bkg = []
for i in range(0,Nbkg_int):
norm_bkg.append(background[i].Integral())
norm_obs = data_obs.Integral()
print 'bkg integral: ',norm_bkg
if (doSignalShape_unc):
print 'signal shape unc: ',norm_sig_sm_down,' ',norm_sig_sm,' ',norm_sig_sm_up
theWS = RooWorkspace('WV_%sboosted'%codename, 'WV_%sboosted'%codename)
wpt = theWS.factory('W_pt_%s[%f,%f]' % (codename,data_obs.GetBinLowEdge(1),
data_obs.GetBinLowEdge(data_obs.GetNbinsX())+data_obs.GetBinWidth(data_obs.GetNbinsX())))
binning=array('d',[])
for i in range(1, data_obs.GetNbinsX()+1):
binning.append(data_obs.GetBinLowEdge(i))
binning.append(data_obs.GetBinLowEdge(data_obs.GetNbinsX()+1))
print "bining: "
for i in range(0, len(binning)):
print binning[i]
bins=RooBinning(len(binning)-1, binning)
wpt.setBinning(bins)
lz = theWS.factory('lZ[0., -0.006, 0.006]')
lz.setConstant(False)
dkg = theWS.factory('dkg[0.,-0.006, 0.006]')
dg1 = theWS.factory('dg1[0.,-0.006,0.006]')
vars = RooArgList(wpt)
varSet = RooArgSet(wpt)
data = RooDataHist('data_obs', 'data_obs_WV_%s'%codename, vars, data_obs)
bkgHist = {}
for i in range(0,Nbkg_int):
bkgHist[i] = RooDataHist('WV_semileptonic_bkg%i_%s'%(i+1,codename),
'WV_semileptonic_bkg%i_%s'%(i+1,codename),
vars,
background[i])
bkgHist_systUp = []
bkgHist_systDown = []
for j in range(0,Nbkg_int):
bkgHist_systUp.append([])
bkgHist_systDown.append([])
for i in range(0,len(background_shapeSyst[j])):
print j," ",i
#change name here:
print '\n\t\t==========> testing in function= ', isItCorrelated('testing')
print '\t\t==========> wz3lnu_CMS_eff_b in function= ', isItCorrelated('wz3lnu_CMS_eff_b')
print '\t\t==========> %s in function= '%background_shapeSyst[j][i], isItCorrelated(background_shapeSyst[j][i])
print '\n'
if (isItCorrelated(background_shapeSyst[j][i])):
print ' \n\t\t ==================================> <=========================== '
name_forCorr=background_shapeSyst[j][i]
print ' name_forCorr= ',name_forCorr
name_forCorr=name_forCorr.replace('zll_','')
name_forCorr=name_forCorr.replace('wz3lnu_','')
print ' -> name_forCorr= ',name_forCorr
bkgHist_systUp[j].append(RooDataHist('WV_semileptonic_bkg%i_%s_%sUp'%(j+1,codename,name_forCorr),
'WV_semileptonic_bkg%i_%s_%sUp'%(j+1,codename,name_forCorr),
vars,
background_backshapeUp[j][i]))
bkgHist_systDown[j].append(RooDataHist('WV_semileptonic_bkg%i_%s_%sDown'%(j+1,codename,name_forCorr),
'WV_semileptonic_bkg%i_%s_%sDown'%(j+1,codename,name_forCorr),
vars,
background_backshapeDown[j][i]))
else:
bkgHist_systUp[j].append(RooDataHist('WV_semileptonic_bkg%i_%s_%sUp'%(j+1,codename,background_shapeSyst[j][i]),
'WV_semileptonic_bkg%i_%s_%sUp'%(j+1,codename,background_shapeSyst[j][i]),
vars,
background_backshapeUp[j][i]))
bkgHist_systDown[j].append(RooDataHist('WV_semileptonic_bkg%i_%s_%sDown'%(j+1,codename,background_shapeSyst[j][i]),
'WV_semileptonic_bkg%i_%s_%sDown'%(j+1,codename,background_shapeSyst[j][i]),
vars,
background_backshapeDown[j][i]))
# bkgHist_systUp[j].append(RooDataHist('WV_semileptonic_bkg%i_%s_%sUp'%(j+1,codename,background_shapeSyst[j][i]),
# 'WV_semileptonic_bkg%i_%s_%sUp'%(j+1,codename,background_shapeSyst[j][i]),
# vars,
# background_backshapeUp[j][i]))
# bkgHist_systDown[j].append(RooDataHist('WV_semileptonic_bkg%i_%s_%sDown'%(j+1,codename,background_shapeSyst[j][i]),
# 'WV_semileptonic_bkg%i_%s_%sDown'%(j+1,codename,background_shapeSyst[j][i]),
# vars,
# background_backshapeDown[j][i]))
dibosonHist = RooDataHist('WV_semileptonic_SM_%s_rawshape'%codename,
'WV_semileptonic_SM_%s_rawshape'%codename,
vars,
diboson)
if (doSignalShape_unc):
dibosonHist_up = {}
dibosonHist_down = {}
for i in range(0,len(signal_shapeSyst)):
print ' \n\t\t ==================================> SIGNAL %s <=========================== '%signal_shapeSyst[i],isItCorrelated(str(signal_shapeSyst[i]))
print ' \n\t\t ==================================> SIGNAL zz2l2nu_CMS_scale_j <=========================== ',isItCorrelated('zz2l2nu_CMS_scale_j')
#change name here
# dibosonHist_up[i] = RooDataHist('WV_semileptonic_SM_%s_rawshape_%sUp'%(codename,signal_shapeSyst[i]),
# 'WV_semileptonic_SM_%s_rawshape_%sUp'%(codename,signal_shapeSyst[i]),
# vars,
# diboson_up[i])
# dibosonHist_down[i] = RooDataHist('WV_semileptonic_SM_%s_rawshape_%sDown'%(codename,signal_shapeSyst[i]),
# 'WV_semileptonic_SM_%s_rawshape_%sDown'%(codename,signal_shapeSyst[i]),
# vars,
# diboson_down[i])
if (isItCorrelated(str(signal_shapeSyst[i]))):
print ' \n\t\t ==================================> <=========================== '
name_forCorr=str(signal_shapeSyst[i])
print ' name_forCorr= ',name_forCorr
name_forCorr=name_forCorr.replace('zz2l2nu_','')
print ' -> name_forCorr= ',name_forCorr
dibosonHist_up[i] = RooDataHist('WV_semileptonic_SM_%s_rawshape_%sUp'%(codename,name_forCorr),
'WV_semileptonic_SM_%s_rawshape_%sUp'%(codename,name_forCorr),
vars,
diboson_up[i])
dibosonHist_down[i] = RooDataHist('WV_semileptonic_SM_%s_rawshape_%sDown'%(codename,name_forCorr),
'WV_semileptonic_SM_%s_rawshape_%sDown'%(codename,name_forCorr),
vars,
diboson_down[i])
else:
dibosonHist_up[i] = RooDataHist('WV_semileptonic_SM_%s_rawshape_%sUp'%(codename,signal_shapeSyst[i]),
'WV_semileptonic_SM_%s_rawshape_%sUp'%(codename,signal_shapeSyst[i]),
vars,
diboson_up[i])
dibosonHist_down[i] = RooDataHist('WV_semileptonic_SM_%s_rawshape_%sDown'%(codename,signal_shapeSyst[i]),
'WV_semileptonic_SM_%s_rawshape_%sDown'%(codename,signal_shapeSyst[i]),
vars,
diboson_down[i])
dibosonPdf = RooHistFunc('WV_semileptonic_SM_%s_shape'%codename,
'WV_semileptonic_SM_%s_shape'%codename,
varSet,
dibosonHist)
if (doSignalShape_unc):
dibosonPdf_up = {}
dibosonPdf_down = {}
for i in range(0,len(signal_shapeSyst)):
# change name here
if (isItCorrelated(str(signal_shapeSyst[i]))):
name_forCorr=str(signal_shapeSyst[i])
name_forCorr=name_forCorr.replace('zz2l2nu_','')
dibosonPdf_up[i] = RooHistFunc('WV_semileptonic_SM_%s_shape_%sUp'%(codename,name_forCorr),
'WV_semileptonic_SM_%s_shape_%sUp'%(codename,name_forCorr),
varSet,
dibosonHist_up[i])
dibosonPdf_down[i] = RooHistFunc('WV_semileptonic_SM_%s_shape_%sDown'%(codename,name_forCorr),
'WV_semileptonic_SM_%s_shape_%sDown'%(codename,name_forCorr),
varSet,
dibosonHist_down[i])
else:
dibosonPdf_up[i] = RooHistFunc('WV_semileptonic_SM_%s_shape_%sUp'%(codename,signal_shapeSyst[i]),
'WV_semileptonic_SM_%s_shape_%sUp'%(codename,signal_shapeSyst[i]),
varSet,
dibosonHist_up[i])
dibosonPdf_down[i] = RooHistFunc('WV_semileptonic_SM_%s_shape_%sDown'%(codename,signal_shapeSyst[i]),
'WV_semileptonic_SM_%s_shape_%sDown'%(codename,signal_shapeSyst[i]),
varSet,
dibosonHist_down[i])
# print '\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ reading RooATGCFunction\n'
# aTGC = RooATGCFunction_wz('ATGC_shapescale_WWgammaZ_WV_atgc_semileptonic_%s'%codename,
# 'ATGC_shapescale_%s'%codename,
# wpt,
# lz,
# dkg,
# dg1,
# '%s/signal_WV_%s.root'%(basepath,codename))
print '\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ read RooATGCFunction\n'
limtype = -1
planeID = 'dkglZ'
print 'setting up for %s plane!'%planeID
if ( planeID == 'dkglZ' ):
limtype = 0
elif ( planeID == 'dg1lZ' ):
limtype = 1
elif ( planeID == 'dkgdg1'):
limtype = 2
else:
raise RuntimeError('InvalidCouplingChoice',
'We can only use [dkg,lZ], [dg1,lZ], and [dkg,dg1]'\
' as POIs right now!')
print limtype
print '\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ reading RooATGCSemi\n'
if (doSignalShape_unc):
kappaLow = {}
kappaHigh = {}
aTGCPdf_norm = {}
theta = {}
kappaLow_sum_d = 1.
kappaHigh_sum_d = 1.
for i in range(0,len(signal_shapeSyst)):
kappaLow[i] = RooRealVar("kappaL_%s_%s"%(i+1,codename),"kappaL_%s_%s"%(i+1,codename),norm_sig_sm_down[i]/norm_sig_sm)
kappaLow[i].setConstant(True)
kappaHigh[i] = RooRealVar("kappaH_%s_%s"%(i+1,codename),"kappaH_%s_%s"%(i+1,codename),norm_sig_sm_up[i]/norm_sig_sm)
kappaHigh[i].setConstant(True)
kappaLow_sum_d = kappaLow_sum_d*norm_sig_sm_down[i]/norm_sig_sm
kappaHigh_sum_d = kappaHigh_sum_d*norm_sig_sm_up[i]/norm_sig_sm
# theWS.factory("%s[-7,7]"%signal_shapeSyst[i])
# theta[i] = theWS.var("%s"%signal_shapeSyst[i])
if (isItCorrelated(str(signal_shapeSyst[i]))):
name_forCorr=str(signal_shapeSyst[i])
name_forCorr=name_forCorr.replace('zz2l2nu_','')
theWS.factory("%s[-7,7]"%name_forCorr)
theta[i] = theWS.var("%s"%name_forCorr)
else:
theWS.factory("%s[-7,7]"%signal_shapeSyst[i])
theta[i] = theWS.var("%s"%signal_shapeSyst[i])
aTGCPdf_norm[i] = AsymPow('ATGCPdf_WWgammaZ_WV_atgc_semileptonic_%s_integral%s'%(codename,i+1),
'ATGCPdf_WV_%s_integral%s'%(codename,i+1),
kappaLow[i],
kappaHigh[i],
theta[i])
if (len(signal_shapeSyst)==1):
aTGCPdf_norm_sum = aTGCPdf_norm[0]
else:
for i in range(0,len(signal_shapeSyst)):
if (i==0): prodset=RooArgList(aTGCPdf_norm[i])
else: prodset.add(RooArgList(aTGCPdf_norm[i]))
aTGCPdf_norm_sum = RooProduct("aTGCPdf_norm_sum","aTGCPdf_norm_sum",prodset)
kappaLow_sum = RooRealVar("kappaLow_sum","kappaLow_sum",kappaLow_sum_d)
kappaHigh_sum = RooRealVar("kappaHigh_sum","kappaHigh_sum",kappaHigh_sum_d)
aTGCPdf_norm_sum.SetNameTitle('ATGCPdf_WWgammaZ_WV_atgc_semileptonic_%s_norm'%codename,
'ATGCPdf_WV_%s_norm'%codename)
aTGCPdf = RooATGCSemiAnalyticPdf_wz('ATGCPdf_WWgammaZ_WV_atgc_semileptonic_%s'%codename,
'ATGCPdf_WV_%s'%codename,
wpt,
dkg,
lz,
dg1,
dibosonPdf,
'%s/signal_WV_%s_f5z_ifLessThen0SetTo0_2604Files_SMaTGCfit.root'%(basepath,codename),
limtype
)
if (doSignalShape_unc):
aTGCPdf_up = {}
aTGCPdf_down = {}
for i in range(0,len(signal_shapeSyst)):
# change name here
if (isItCorrelated(str(signal_shapeSyst[i]))):
name_forCorr=str(signal_shapeSyst[i])
name_forCorr=name_forCorr.replace('zz2l2nu_','')
aTGCPdf_up[i] = RooATGCSemiAnalyticPdf_wz('ATGCPdf_WWgammaZ_WV_atgc_semileptonic_%s_%sUp'%(codename,name_forCorr),
'ATGCPdf_WV_%s'%codename,
wpt,
dkg,
lz,
dg1,
dibosonPdf_up[i],
'%s/signal_WV_%s_f5z_ifLessThen0SetTo0_2604Files_SMaTGCfit.root'%(basepath,codename),
limtype
)
aTGCPdf_down[i] = RooATGCSemiAnalyticPdf_wz('ATGCPdf_WWgammaZ_WV_atgc_semileptonic_%s_%sDown'%(codename,name_forCorr),
'ATGCPdf_WV_%s'%codename,
wpt,
dkg,
lz,
dg1,
dibosonPdf_down[i],
'%s/signal_WV_%s_f5z_ifLessThen0SetTo0_2604Files_SMaTGCfit.root'%(basepath,codename),
limtype
)
else:
aTGCPdf_up[i] = RooATGCSemiAnalyticPdf_wz('ATGCPdf_WWgammaZ_WV_atgc_semileptonic_%s_%sUp'%(codename,signal_shapeSyst[i]),
'ATGCPdf_WV_%s'%codename,
wpt,
dkg,
lz,
dg1,
dibosonPdf_up[i],
'%s/signal_WV_%s_f5z_ifLessThen0SetTo0_2604Files_SMaTGCfit.root'%(basepath,codename),
limtype
)
aTGCPdf_down[i] = RooATGCSemiAnalyticPdf_wz('ATGCPdf_WWgammaZ_WV_atgc_semileptonic_%s_%sDown'%(codename,signal_shapeSyst[i]),
'ATGCPdf_WV_%s'%codename,
wpt,
dkg,
lz,
dg1,
dibosonPdf_down[i],
'%s/signal_WV_%s_f5z_ifLessThen0SetTo0_2604Files_SMaTGCfit.root'%(basepath,codename),
limtype
)
print '\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ read RooATGCSemi\n'
getattr(theWS, 'import')(data)
for i in range(0,Nbkg_int):
getattr(theWS, 'import')(bkgHist[i])
for j in range(0,Nbkg_int):
for i in range(0,len(background_shapeSyst[j])):
getattr(theWS, 'import')(bkgHist_systUp[j][i])
getattr(theWS, 'import')(bkgHist_systDown[j][i])
getattr(theWS, 'import')(aTGCPdf)
if (doSignalShape_unc):
for i in range(0,len(signal_shapeSyst)):
getattr(theWS, 'import')(aTGCPdf_up[i])
getattr(theWS, 'import')(aTGCPdf_down[i])
# getattr(theWS, 'import')(aTGCPdf_norm[i])
getattr(theWS, 'import')(aTGCPdf_norm_sum)
theWS.Print()
fout = TFile('%s_boosted_ws.root'%(codename), 'recreate')
theWS.Write()
fout.Close()
### make the card for this channel and plane ID
card = """
# Simple counting experiment, with one signal and a few background processes
imax 1 number of channels
jmax {Nbkg_int} number of backgrounds
kmax * number of nuisance parameters (sources of systematical uncertainties)
------------""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs,Nbkg_int=Nbkg_int)
for i in range(0,Nbkg_int):
card += """
shapes WV_semileptonic_bkg{Nbkg_int}_{codename} {codename}boosted ./{codename}_boosted_ws.root WV_{codename}boosted:$PROCESS WV_{codename}boosted:$PROCESS_$SYSTEMATIC""".format(Nbkg_int=i+1,codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
card += """
shapes data_obs {codename}boosted ./{codename}_boosted_ws.root WV_{codename}boosted:$PROCESS """.format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs,Nbkg_int=Nbkg_int)
if (doSignalShape_unc):
card += """
shapes WWgammaZ_WV_atgc_semileptonic_{codename} {codename}boosted ./{codename}_boosted_ws.root WV_{codename}boosted:ATGCPdf_$PROCESS WV_{codename}boosted:ATGCPdf_$PROCESS_$SYSTEMATIC """.format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
else:
card += """
shapes WWgammaZ_WV_atgc_semileptonic_{codename} {codename}boosted ./{codename}_boosted_ws.root WV_{codename}boosted:ATGCPdf_$PROCESS
""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
card += """
------------
bin {codename}boosted
observation {norm_obs}
------------
bin {codename}boosted\t\t""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
for i in range(0,Nbkg_int):
card += """\t\t\t{codename}boosted""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg[i],norm_obs=norm_obs)
card += """
process\t\t\t WWgammaZ_WV_atgc_semileptonic_{codename} """.format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg[i],norm_obs=norm_obs)
for i in range(0,Nbkg_int):
card += """\tWV_semileptonic_bkg{Nbkg_int}_{codename}""".format(Nbkg_int=i+1,codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg[i],norm_obs=norm_obs)
card += """
process 0 """.format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg[i],norm_obs=norm_obs)
for i in range(0,Nbkg_int):
card += """ \t\t\t\t{i}""".format(i=i+1,codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg[i],norm_obs=norm_obs)
card += """
rate {norm_sig_sm}\t""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg[i],norm_obs=norm_obs)
for i in range(0,Nbkg_int):
card += """ \t\t\t{norm_bkg}""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg[i],norm_obs=norm_obs)
card += """
------------
lumi_8TeV \t lnN \t 1.026 """
for i in range(0,Nbkg_int):
if (i==2): card += """\t\t\t\t1.026""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
else: card += """\t\t\t\t-""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
card += """
CMS_eff_{codename[0]} lnN 1.03 """.format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg[i],norm_obs=norm_obs)
for i in range(0,Nbkg_int):
if (i==2): card += """\t\t\t\t1.03""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
else: card += """\t\t\t\t-""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
card += """
CMS_hzz2l2v_sys_topwwwjetsdata_8TeV_{codename[0]} lnN - """.format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
for i in range(0,Nbkg_int):
if (i==1): card += """\t\t\t\t1.2""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
else: card += """\t\t\t\t-""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
for j in range(0,Nbkg_int):
for i in range(0,len(background_shapeSyst[j])):
#change name here:
# if (isItCorrelated(background_shapeSyst[j][i])):
# name_forCorr=background_shapeSyst[j][i]
# name_forCorr=name_forCorr.replace('zll_','')
# name_forCorr=name_forCorr.replace('wz3lnu_','')
# card += """
#{background_shapeSyst} shape """.format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs,i=i,background_shapeSyst=name_forCorr)
if not(isItCorrelated(background_shapeSyst[j][i])):
card += """
{background_shapeSyst} shape1 """.format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs,i=i,background_shapeSyst=background_shapeSyst[j][i])
# card += """
#{background_shapeSyst} shape """.format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs,i=i,background_shapeSyst=background_shapeSyst[j][i])
for k in range(0,j+1):
card += """-\t\t\t\t""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs,i=i,background_shapeSyst=background_shapeSyst[j][i])
card += """1.0 """.format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs,i=i,background_shapeSyst=background_shapeSyst[j][i])
for k in range(1,Nbkg_int-j):
card += """\t\t\t\t-""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs,i=i,background_shapeSyst=background_shapeSyst[j][i])
# card += """
#QCDJeT_aTG lnN 1.12 """
# for i in range(0,Nbkg_int):
# card += """\t\t\t\t-""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs,i=i)
# card += """
#QCDscale_VV lnN - """
# for i in range(0,Nbkg_int):
# if (i==2): card += """\t\t\t\t1.054""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
# else: card += """\t\t\t\t-""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
card += """
Zlldata_syst lnN - """
for i in range(0,Nbkg_int):
if (i==0): card += """\t\t\t\t1.4""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
else: card += """\t\t\t\t-""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
card += """
pdf_VV lnN 1.058 """
for i in range(0,Nbkg_int):
if (i==2): card += """\t\t\t\t1.042""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
else: card += """\t\t\t\t-""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
if (doSignalShape_unc):
for i in range(0,len(signal_shapeSyst)):
#change name here:
if (isItCorrelated(str(signal_shapeSyst[i]))):
name_forCorr=str(signal_shapeSyst[i])
name_forCorr=name_forCorr.replace('zz2l2nu_','')
card += """
{signal_shapeSyst} shape1 1.0 """.format(signal_shapeSyst=name_forCorr)
for j in range(0,Nbkg_int):
isItCorr=False
for k in range(0,len(background_shapeSyst[j])):
if (name_forCorr in background_shapeSyst[j][k]):
isItCorr=True
if (isItCorr):
card += """\t\t\t\t1.0""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
else:
card += """\t\t\t\t-""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
else:
card += """
{signal_shapeSyst} shape1 1.0 """.format(signal_shapeSyst=signal_shapeSyst[i])
for i in range(0,Nbkg_int):
card += """\t\t\t\t-""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
print card
cardfile = open('wv_semil_%sboosted.txt'%(codename),'w')
cardfile.write(card)
cardfile.close
|
[
"senka.duric@cern.ch"
] |
senka.duric@cern.ch
|
8a2ae10b22c4b3a967896e84b577bda9040e3527
|
eb781c723da986b8230f869bc42a60a0f28eb257
|
/number_of_ways_treverse_grid.py
|
1af3ed67d61aca507cef4a0ddc63597b0d205252
|
[] |
no_license
|
sandy836/Interviewpro
|
9dc49a4a6903559067fd6c63867d99562f607774
|
f50efab83d6ff84bef132e841cd642a8511c013d
|
refs/heads/master
| 2022-09-29T23:02:08.201229
| 2020-06-07T13:48:41
| 2020-06-07T13:48:41
| 254,679,704
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
def num_ways(n, m):
dp = [[0]*n for _ in range(m)]
for i in range(n):
dp[0][i] = 1
for j in range(m):
dp[j][0] = 1
for i in range(1, m):
for j in range(1, n):
dp[i][j] = dp[i][j-1]+dp[i-1][j]
return dp[-1][-1]
print(num_ways(3,7))
|
[
"sandeepshrivastava518@gmail.com"
] |
sandeepshrivastava518@gmail.com
|
bf9b56c08bf4cf1b9e5b81624139200b57d166bc
|
9c7f47b2f31ea4ae55e33c706efe524eb62ff177
|
/HT_1/HT_1_13.py
|
102b0b529a7af654ee6b096b686edbd4e13fa6a1
|
[] |
no_license
|
Kantarian/GITHUB
|
05b6d5425b345667a4188ced23da76ed337b910a
|
fa047cbb2beb9bf372b22596bea8aaef80423872
|
refs/heads/main
| 2023-02-14T16:57:50.229446
| 2021-01-13T15:43:48
| 2021-01-13T15:43:48
| 311,783,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
#13. Write a script to get the maximum and minimum value in a dictionary.
my_dict = {'x':500, 'y':5874, 'z': 560}
key_max = max(my_dict.keys(), key=(lambda k: my_dict[k]))
key_min = min(my_dict.keys(), key=(lambda k: my_dict[k]))
print('Maximum Value: ',my_dict[key_max])
print('Minimum Value: ',my_dict[key_min])
|
[
"noreply@github.com"
] |
Kantarian.noreply@github.com
|
59e922e5a23e26a9a7e7f22c7006d5d0cdecf0da
|
99912297cd307c87aab1c4f3a3959858fd054340
|
/ssc/cuboid_fitting.py
|
779887b71876627cfaf1ca5831b4a445b93c8ed1
|
[
"MIT"
] |
permissive
|
zhigangjiang/SingleShotCuboids
|
2d07a3aa4138f8714a0b1bb0c8b94957c42257f2
|
ff2c06fb8cba8fae3be2e1293546b6e558c8f757
|
refs/heads/master
| 2023-03-07T19:16:58.609393
| 2021-02-20T15:39:07
| 2021-02-20T15:39:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,935
|
py
|
import torch
import numpy as np
import functools
import kornia
class CuboidFitting(torch.nn.Module):
def __init__(self,
mode: str='joint', # one of ['joint', 'floor', 'ceil', 'avg']
floor_distance: float=-1.6,
):
super(CuboidFitting, self).__init__()
self.homography_func = functools.partial(
self._homography_floor_svd,
floor_z=floor_distance)\
if mode == 'floor' else (
functools.partial(
self._homography_ceil_svd,
ceil_z=-floor_distance
) if mode == 'ceil' else (functools.partial(
self._homography_avg_svd,
floor_z=floor_distance,
ceil_z=-floor_distance
) if mode == 'avg' else functools.partial(
self._homography_joint_svd,
floor_z=floor_distance,
ceil_z=-floor_distance
)
)
)
cuboid_axes = torch.Tensor([[
[-1, 1],
[-1, -1],
[1, -1],
[1, 1],
]]).float()
self.register_buffer("cuboid_axes", cuboid_axes)
def _get_scale_all(self, coords: torch.Tensor, eps: float=1e-12) -> torch.Tensor:
a_x1 = torch.linalg.norm(coords[:, 0, :] - coords[:, 1, :], ord=2, dim=1)
a_y1 = torch.linalg.norm(coords[:, 1, :] - coords[:, 2, :], ord=2, dim=1)
a_x2 = torch.linalg.norm(coords[:, 2, :] - coords[:, 3, :], ord=2, dim=1)
a_y2 = torch.linalg.norm(coords[:, 3, :] - coords[:, 0, :], ord=2, dim=1)
a_x = 0.5 * (a_x1 + a_x2)
a_y = 0.5 * (a_y1 + a_y2)
return torch.stack([a_y, a_x], dim=1)
def _svd(self,
points1: torch.Tensor,
points2: torch.Tensor
) -> torch.Tensor:
'''
Computes a similarity transform (sR, t) that takes
a set of 3D points S1 (3 x N) closest to a set of 3D points S2,
where R is an 3x3 rotation matrix, t 3x1 translation, s scale.
i.e. solves the orthogonal Procrutes problem.
'''
b, _, c = points1.shape
# 1. Remove mean.
points1 = torch.transpose(points1, -2, -1)
points2 = torch.transpose(points2, -2, -1)
centroid1 = points1.mean(dim=-1, keepdims=True)
centroid2 = points1.mean(dim=-1, keepdims=True)
centered1 = points1 - centroid1
centered2 = points2 - centroid2
# 2. Compute variance of X1 used for scale.
variance = torch.sum(centered1 ** 2, dim=[1, 2])
# 3. The outer product of X1 and X2.
K = centered1 @ torch.transpose(centered2, -2, -1)
# 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are singular vectors of K.
U, s, V = torch.svd(K)
# Construct Z that fixes the orientation of R to get det(R)=1.
Z = torch.eye(c).to(U).unsqueeze(0).repeat(b, 1, 1)
Z[:,-1, -1] *= torch.sign(torch.det(U @ torch.transpose(V, -2, -1)))
# Construct R.
rotation = V @ (Z @ torch.transpose(U, -2, -1))
# 5. Recover scale.
scale = torch.cat([torch.trace(x).unsqueeze(0) for x in (rotation @ K)]) / variance
# 6. Recover translation.
scale = scale.unsqueeze(-1).unsqueeze(-1)
translation = centroid2 - (scale * (rotation @ centroid1))
return rotation, translation, scale
def _transform_points(self,
points: torch.Tensor,
rotation: torch.Tensor,
translation: torch.Tensor,
scale: torch.Tensor,
) -> torch.Tensor:
xformed = scale * (rotation @ torch.transpose(points, -2, -1)) + translation
return torch.transpose(xformed, -2, -1)
def _homography_floor_svd(self,
top_corners: torch.Tensor, # in [-1, 1]
bottom_corners: torch.Tensor, # in [-1, 1]
floor_z: float=-1.6,
):
b, N, _ = top_corners.size()
u = bottom_corners[:, :, 0] * np.pi
v = bottom_corners[:, :, 1] * (-0.5 * np.pi)
c = floor_z / torch.tan(v)
x = c * torch.sin(u)
y = -c * torch.cos(u)
floor_xy = torch.stack([x, y], dim=-1)
scale = self._get_scale_all(floor_xy)
scale = scale / 2.0
centroid = floor_xy.mean(dim=1)
c = torch.linalg.norm(floor_xy, ord=2, dim=-1)
v = top_corners[:, :, 1] * (-0.5 * np.pi)
ceil_z = (c * torch.tan(v)).mean(dim=1, keepdim=True)
ceil_z = ceil_z.unsqueeze(1).expand(b, 4, 1).contiguous()
floor_xy = floor_xy - centroid.unsqueeze(1)
inds = torch.sort(torch.atan2(floor_xy[..., 0], floor_xy[..., 1] + 1e-12))[1]
axes = self.cuboid_axes[:, inds.squeeze(), :]
homography = kornia.get_perspective_transform(floor_xy, axes)
homogeneous = torch.cat([floor_xy, torch.ones_like(floor_xy[..., -1:])], dim=2)
xformed = (homography @ homogeneous.transpose(1, 2)).transpose(1, 2)
xformed = xformed[:, :, :2] / xformed[:, :, 2].unsqueeze(-1)
rect_floor_xy = xformed * scale.unsqueeze(1) + centroid.unsqueeze(1)
original_xy = floor_xy + centroid.unsqueeze(1)
R, t, s = self._svd(rect_floor_xy, original_xy[:, inds.squeeze(), :])
rect_floor_xy = self._transform_points(rect_floor_xy, R, t, s)
bottom_points = torch.cat([rect_floor_xy, floor_z * torch.ones_like(c.unsqueeze(-1))], dim=-1)
top_points = torch.cat([rect_floor_xy, ceil_z], dim=-1)
return top_points, bottom_points
def _homography_joint_svd(self,
top_corners: torch.Tensor, # in [-1, 1]
bottom_corners: torch.Tensor, # in [-1, 1]
floor_z: float=-1.6,
ceil_z: float=1.6,
):
b, N, _ = top_corners.size()
floor_u = bottom_corners[:, :, 0] * np.pi
floor_v = bottom_corners[:, :, 1] * (-0.5 * np.pi)
floor_c = floor_z / torch.tan(floor_v)
floor_x = floor_c * torch.sin(floor_u)
floor_y = -floor_c * torch.cos(floor_u)
floor_xy = torch.stack([floor_x, floor_y], dim=-1)
floor_scale = self._get_scale_all(floor_xy)
floor_scale = floor_scale / 2.0
floor_ceil_c = torch.linalg.norm(floor_xy, ord=2, dim=-1)
floor_ceil_v = top_corners[:, :, 1] * (-0.5 * np.pi)
floor_ceil_z = (floor_ceil_c * torch.tan(floor_ceil_v)).mean(dim=1, keepdim=True)
floor_ceil_z = floor_ceil_z.unsqueeze(1).expand(b, 4, 1).contiguous()
ceil_u_t = top_corners[:, :, 0] * np.pi
ceil_v_t = top_corners[:, :, 1] * (-0.5 * np.pi)
ceil_c = ceil_z / torch.tan(ceil_v_t)
ceil_x = ceil_c * torch.sin(ceil_u_t)
ceil_y = -ceil_c * torch.cos(ceil_u_t)
ceil_xy = torch.stack([ceil_x, ceil_y], dim=-1)
ceil_floor_c = torch.linalg.norm(ceil_xy, ord=2, dim=-1)
ceil_v_b = bottom_corners[:, :, 1] * (-0.5 * np.pi)
ceil_floor_z = (ceil_floor_c * torch.tan(ceil_v_b)).mean(dim=1, keepdim=True)
fix_ceil = -ceil_z / ceil_floor_z
ceil_z_fix = ceil_z * fix_ceil
ceil_z_fix = ceil_z_fix.unsqueeze(1).expand(b, 4, 1).contiguous()
ceil_floor_fixed_c = ceil_z_fix.squeeze(-1) / torch.tan(ceil_v_t)
ceil_x = ceil_floor_fixed_c * torch.sin(ceil_u_t)
ceil_y = -ceil_floor_fixed_c * torch.cos(ceil_u_t)
ceil_xy = torch.stack([ceil_x, ceil_y], dim=-1)
ceil_scale = self._get_scale_all(ceil_xy)
ceil_scale = ceil_scale / 2.0
joint_xy = 0.5 * (floor_xy + ceil_xy)
joint_scale = 0.5 * (floor_scale + ceil_scale)
joint_centroid = joint_xy.mean(dim=1)
joint_xy = joint_xy - joint_centroid.unsqueeze(1)
inds = torch.sort(torch.atan2(joint_xy[..., 0], joint_xy[..., 1] + 1e-12))[1]
axes = self.cuboid_axes[:, inds.squeeze(), :]
homography = kornia.get_perspective_transform(joint_xy, axes)
homogeneous = torch.cat([joint_xy, torch.ones_like(joint_xy[..., -1:])], dim=2)
xformed = (homography @ homogeneous.transpose(1, 2)).transpose(1, 2)
xformed = xformed[:, :, :2] / xformed[:, :, 2].unsqueeze(-1)
rect_joint_xy = xformed * joint_scale.unsqueeze(1) + joint_centroid.unsqueeze(1)
original_xy = joint_xy + joint_centroid.unsqueeze(1)
R, t, s = self._svd(rect_joint_xy, original_xy[:, inds.squeeze(), :])
rect_joint_xy = self._transform_points(rect_joint_xy, R, t, s)
bottom_points = torch.cat([rect_joint_xy, floor_z * torch.ones_like(floor_c.unsqueeze(-1))], dim=-1)
top_points = torch.cat([rect_joint_xy, ceil_z_fix], dim=-1)
return top_points, bottom_points
def _homography_ceil_svd(self,
top_corners: torch.Tensor, # in [-1, 1]
bottom_corners: torch.Tensor, # in [-1, 1]
ceil_z: float=1.6,
):
b, N, _ = top_corners.size()
u_t = top_corners[:, :, 0] * np.pi
v_t = top_corners[:, :, 1] * (-0.5 * np.pi)
c = ceil_z / torch.tan(v_t)
x = c * torch.sin(u_t)
y = -c * torch.cos(u_t)
ceil_xy = torch.stack([x, y], dim=-1)
c = torch.linalg.norm(ceil_xy, ord=2, dim=-1)
v_b = bottom_corners[:, :, 1] * (-0.5 * np.pi)
floor_z = (c * torch.tan(v_b)).mean(dim=1, keepdim=True)
fix_ceil = -ceil_z / floor_z
floor_z = -ceil_z
ceil_z_fix = ceil_z * fix_ceil
ceil_z_fix = ceil_z_fix.unsqueeze(1).expand(b, 4, 1).contiguous()
c = ceil_z_fix.squeeze(-1) / torch.tan(v_t)
x = c * torch.sin(u_t)
y = -c * torch.cos(u_t)
ceil_xy = torch.stack([x, y], dim=-1)
scale = self._get_scale_all(ceil_xy)
scale = scale / 2.0
centroid = ceil_xy.mean(dim=1)
ceil_xy = ceil_xy - centroid.unsqueeze(1)
inds = torch.sort(torch.atan2(ceil_xy[..., 0], ceil_xy[..., 1] + 1e-12))[1]
axes = self.cuboid_axes[:, inds.squeeze(), :]
homography = kornia.get_perspective_transform(ceil_xy, axes)
homogeneous = torch.cat([ceil_xy, torch.ones_like(ceil_xy[..., -1:])], dim=2)
xformed = (homography @ homogeneous.transpose(1, 2)).transpose(1, 2)
xformed = xformed[:, :, :2] / xformed[:, :, 2].unsqueeze(-1)
rect_ceil_xy = xformed * scale.unsqueeze(1) + centroid.unsqueeze(1)
original_xy = ceil_xy + centroid.unsqueeze(1)
R, t, s = self._svd(rect_ceil_xy, original_xy[:, inds.squeeze(), :])
rect_ceil_xy = self._transform_points(rect_ceil_xy, R, t, s)
bottom_points = torch.cat([rect_ceil_xy, floor_z * torch.ones_like(c.unsqueeze(-1))], dim=-1)
top_points = torch.cat([rect_ceil_xy, ceil_z_fix], dim=-1)
return top_points, bottom_points
def _homography_avg_svd(self,
top_corners: torch.Tensor, # in [-1, 1]
bottom_corners: torch.Tensor, # in [-1, 1]
floor_z: float=-1.6,
ceil_z: float=1.6,
):
top_ceil, bottom_ceil = self._homography_ceil_svd(top_corners, bottom_corners, ceil_z)
top_floor, bottom_floor = self._homography_floor_svd(top_corners, bottom_corners, floor_z)
return (top_ceil + top_floor) * 0.5, (bottom_ceil + bottom_floor) * 0.5
def _project_points(self,
points3d: torch.Tensor,
epsilon: float=1e-12,
):
phi = torch.atan2(points3d[:, :, 0], -1.0 * points3d[:, :, 1] + epsilon) # [-pi, pi]
xy_dist = torch.linalg.norm(points3d[:, :, :2], ord=2, dim=-1)
theta = -1.0 * torch.atan2(points3d[:, :, 2], xy_dist + epsilon) # [-pi / 2.0, pi / 2.0]
u = phi / np.pi
v = theta / (0.5 * np.pi)
return torch.stack([u, v], dim=-1)
def forward(self, corners: torch.Tensor) -> torch.Tensor:
top, bottom = torch.chunk(corners, 2, dim=1)
b = top.shape[0]
aligned = []
for i in range(b):
t = top[i, ...].unsqueeze(0)
b = bottom[i, ...].unsqueeze(0)
try:
t_xyz, b_xyz = self.homography_func(t, b)
t_uv, b_uv = self._project_points(t_xyz), self._project_points(b_xyz)
t_uv = t_uv[:, torch.argsort(t_uv[0, :, 0]), :]
b_uv = b_uv[:, torch.argsort(b_uv[0, :, 0]), :]
aligned_corners = torch.cat([t_uv, b_uv], dim=1).squeeze(0)
aligned.append(aligned_corners)
except RuntimeError as ex:
aligned.append(corners[i, ...])
return torch.stack(aligned, dim=0)
if __name__ == "__main__":
from cuboid_test_utils import *
from cuboid_tests import *
import sys
selected_test ='15' if len(sys.argv) < 2 else str(sys.argv[1])
selected_mode ='floor' if len(sys.argv) < 3 else str(sys.argv[2])
modes = ['floor', 'ceil', 'joint', 'avg']
for name, test in get_tests():
if selected_test not in name:
continue
for mode in modes:
if selected_mode not in mode:
continue
alignment = CuboidFitting(mode=mode)
top, bottom = test()
if torch.cuda.is_available():
top = top.cuda()
bottom = bottom.cuda()
alignment = alignment.cuda()
corners = torch.cat([top, bottom], dim=1)
aligned = alignment.forward(corners)
images = np.zeros([1, 256, 512, 3], dtype=np.uint8)
top_pts2d, bottom_pts2d = torch.chunk(aligned, 2, dim=-2)
draw_points(top_pts2d, images, [255, 0, 0])
draw_points(bottom_pts2d, images, [255, 0, 0])
top_pts2d, bottom_pts2d = torch.chunk(corners, 2, dim=-2)
draw_points(top_pts2d, images, [0, 255, 0])
draw_points(bottom_pts2d, images, [0, 255, 0])
show_frozen(f"{mode} {name}", images[0])
# show_playback(f"{mode} {name}", images[0])
|
[
"nzioulis@iti.gr"
] |
nzioulis@iti.gr
|
8ad9bb30b3e0dce0d9be4144394ea10060e7df85
|
e52afdf311d9b682fd2edfa2ac131bd83bbe63eb
|
/Week 2/4/Polynomial_LR.py
|
5446a78c823704506e21c48f5ec0f3701b8fd318
|
[] |
no_license
|
YashwanthMN1/MLEdyoda
|
cc1185e4618e896764a0b01773a886e49ba9b8e7
|
36a9470729c57c7b6b742bac239e9352f8b2a133
|
refs/heads/main
| 2023-05-02T11:51:11.801693
| 2021-05-25T13:52:24
| 2021-05-25T13:52:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,232
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 9 15:10:23 2021
@author: RISHBANS
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Company_Performance.csv')
X = dataset.iloc[:, [0]].values
y = dataset.iloc[:, 1].values
# Fitting Linear Regression
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
# Fitting Polynomial Regression
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree = 4)
X_poly = poly_reg.fit_transform(X)
#poly_reg.fit(X_poly, y)
lin_reg_poly = LinearRegression()
lin_reg_poly.fit(X_poly, y)
y_pred = lin_reg_poly.predict(X_poly)
# Visualising -> Linear Regression results
plt.scatter(X, y, color = 'red')
plt.plot(X, lin_reg.predict(X), color = 'blue')
plt.title('Size of Company (Linear Regression)')
plt.xlabel('No. of Year in Operation ')
plt.ylabel('No. of Emp')
plt.show()
# Visualising -> Polynomial Regression results
plt.scatter(X, y, color = 'red')
plt.plot(X, y_pred, color = 'blue')
plt.title('Size of Company (Polynomial Regression)')
plt.xlabel('No. of Year in Operation')
plt.ylabel('No. of Emp')
plt.show()
|
[
"rishibansal02@gmail.com"
] |
rishibansal02@gmail.com
|
5a816edcf9b5572793ef4934aa937c7b939a75a5
|
7102f5d667da9b68622e96c17d22ae5e48c68878
|
/Problems' Solution/DesignerDoorMat.py
|
d72174d6a27b563513dcb1fec7423ae715333edc
|
[] |
no_license
|
shahnawaz-pabon/Python-with-HackerRank
|
0032ff5c44fe9015f637131ae4d15d1bdc1812be
|
53c68e82089f263f52b2c1431faccc979ccad5a3
|
refs/heads/master
| 2021-07-15T14:46:03.514436
| 2017-05-30T16:20:54
| 2017-05-30T16:20:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,082
|
py
|
N, M = map(int, input().split())
for i in range(1, N, 2):
print((".|." * i).center(M, "-"))
print("WELCOME".center(M, "-"))
for i in range(N - 2, 0, -2):
print((".|." * i).center(M, "-"))
'''
N, M = map(int, input().split())
s = '.|.'
wlcm = 'WELCOME'
upper = int(N / 2)
lower = upper
koyta = 1
koyta_hypen = int(M / 2)-1
koyta_str = 1
for i in range(upper):
for j in range(koyta_hypen):
print('-', end='')
for j in range(koyta_str):
print(s, end='')
for j in range(koyta_hypen):
print('-', end='')
koyta_str += 2
koyta_hypen = M - (koyta_str * 3)
koyta_hypen = int(koyta_hypen / 2)
print()
print(wlcm.center(M, '-'))
for i in range(lower):
koyta_str -= 2
koyta_hypen = M - (koyta_str * 3)
koyta_hypen = int(koyta_hypen / 2)
for j in range(koyta_hypen):
print('-', end='')
for j in range(koyta_str):
print(s, end='')
for j in range(koyta_hypen):
print('-', end='')
print()
'''
|
[
"noreply@github.com"
] |
shahnawaz-pabon.noreply@github.com
|
ff2d8f2ab7ae00026e60b344129a01111c5b7aad
|
c7d5c327e673fb11fe60afc0defbbfc4cd50f45a
|
/Free/proyecto/python/5.py
|
18609a4b7fa2e3e1d702e8376ac3bf8a36c74cf8
|
[] |
no_license
|
juandaar/IoT-Platform-php-2016
|
3ebc5ffe62195a59ec40ac6b25faaec550a1ffd9
|
6a1334d749c9351d47a1c915f908c5d6d3f5f333
|
refs/heads/master
| 2020-12-04T16:57:23.586731
| 2020-01-05T00:22:41
| 2020-01-05T00:22:41
| 231,845,085
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,983
|
py
|
import argparse
import numpy as np
import json
from sklearn import linear_model
#holi
parser = argparse.ArgumentParser('informacion')
parser.add_argument('numero_inicial', metavar='N', type=int)
parser.add_argument('valores')
args = parser.parse_args()
arreglo_json=args.valores
numero_inicial=args.numero_inicial;
datos=json.loads(arreglo_json)
dato_analizada=[]
fecha_analizada=[]
dato_modelo=[]
fecha_modelo=[]
contador=0
control=[]
control_analisis=[]
cantidad=len(datos)
contador_analisis=0;
if(numero_inicial==0 ):
separacion=int(cantidad*0.9)
while (contador<cantidad):
if (contador<separacion):
dato_modelo.append(datos[contador]['valor'])
fecha_modelo.append(datos[contador]['fecha'])
control.append(contador+1)
else:
dato_analizada.append(datos[contador]['valor'])
fecha_analizada.append(datos[contador]['fecha'])
control_analisis.append(contador+1)
contador_analisis=contador_analisis+1;
contador=contador+1;
else:
separacion=numero_inicial;
while (contador<cantidad):
if (contador<separacion):
dato_modelo.append(datos[contador]['valor'])
fecha_modelo.append(datos[contador]['fecha'])
control.append(contador+1)
else:
dato_analizada.append(datos[contador]['valor'])
fecha_analizada.append(datos[contador]['fecha'])
control_analisis.append(contador+1)
contador_analisis=contador_analisis+1;
contador=contador+1;
dato_modelo = np.array(dato_modelo, np.float64)
control = np.array([control], np.float64)
control=np.reshape(control,(separacion,1))
control_analisis = np.array([control_analisis], np.float64)
control_analisis=np.reshape(control_analisis,(cantidad-separacion,1))
fecha_analizada=np.array(control,np.dtype(str))
# crear regresión lineal
regr = linear_model.LinearRegression()
#entrenar el model
regr.fit(control,dato_modelo)
regresion=regr.predict(control_analisis)
arreglo=np.array(regresion).tolist()
php=json.dumps(arreglo)
print(separacion)
print (php)
print (json.dumps(dato_analizada))
|
[
"juandaar@gmail.com"
] |
juandaar@gmail.com
|
a90e66c06f0382b100105aa3e048d08ef5a258e6
|
5752f1ddb8df26568777cc207d75a247e485af6b
|
/CODES/spot_hot_n_cold_omni_avg_maps.py
|
cf0f3cb84f6f3efb7c8cab3a6dd10c4afe75741b
|
[
"MIT"
] |
permissive
|
manu-script/recomb_landscape
|
240ba83195f336eb1841443a201b9871ddc29d42
|
cf01d7d0fc8a57a3ca2a105498a3acdaeb9ac6cc
|
refs/heads/master
| 2021-05-13T15:33:00.190293
| 2019-10-12T09:22:09
| 2019-10-12T09:22:09
| 116,767,671
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,588
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 22 09:55:00 2017
@author: manu
"""
import os
import numpy as np
import pandas as pd
import peakutils as pu
import warnings
from scipy.optimize import OptimizeWarning
from joblib import Parallel, delayed
from multiprocessing import cpu_count
from scipy.interpolate import UnivariateSpline, interp1d
warnings.simplefilter("error", OptimizeWarning)
warnings.simplefilter("error", RuntimeWarning)
POPS={"COS":["CEU","TSI","FIN","GBR","IBS","YRI","LWK","ASW","ACB","MKK","CHB","JPT","CHS","CDX","KHV","MXL","PUR","CLM","PEL","GIH"]}
fmaps={}
for pop in os.listdir("../OMNI_INTERPOLATED/"):
print("Reading interpolated map:",pop)
if pop not in fmaps.keys():
fmaps[pop]={}
for chrom in os.listdir("../OMNI_INTERPOLATED/"+pop):
gen_map=pd.read_table("../OMNI_INTERPOLATED/"+pop+"/"+chrom,header=0)
fmaps[pop][chrom]=interp1d(gen_map["pos"],gen_map["map"]) #Generating interpolation functions
def refine_hot(start,end,df):
data=df.loc[(df.pos >= start) & (df.pos<=end),:].copy() #slicing dataframe for block values
try:
params=pu.gaussian_fit(np.array(data.pos),np.array(data.rate),center_only=False) #estimate gaussian parameters
data["gauss"]=pu.gaussian(data.pos,*params) #fit a gaussian curve
try:
spline = UnivariateSpline(data.pos, data.gauss-(data.gauss.max()/2), s=0) #Full width half maximum
r1, r2 = spline.roots()
return (r1,r2)
except:
d=data.iloc[data.gauss>data.gauss.max()/2,:] #if FWHM fails, return datapoints > mean of gaussian curve
if not d.empty:
if not d.pos.iloc[0]==d.pos.iloc[-1]:
return (d.pos.iloc[0],d.pos.iloc[-1])
except:
d=data[data.rate>(data.rate.max()/2)] #if gaussian fitting fails, return datapoints > half of max rate
if not d.empty:
if not d.pos.iloc[0]==d.pos.iloc[-1]:
return (d.pos.iloc[0],d.pos.iloc[-1])
def spotify(inp):
fname,h_out,c_out=inp
items=fname.split("/")
chrom=items[-1]
pop=items[-2]
print("Computing Hot & Cold spots:",fname)
df=pd.read_table(fname,header=0) #Reading map into dataframe
fmap=interp1d(df.pos,df.map)
df["dif"]=[0]+list(np.diff(df.rate)) #calculating consecutive differences in rate
df["shif"]=list(np.diff(df.rate))+[0]
h_ind=df[(df.dif<0)&(df.shif>0)].index+1
h_spots=[(h_ind[i],h_ind[i+1]) for i in range(len(h_ind)-1)] #computing hot blocks based on rise and fall of rates
hotspots=[]
for start,end in h_spots:
hot=refine_hot(df.loc[start].pos,df.loc[end].pos,df) #refining the peak
if hot:
hotspots.append([int(hot[0]),int(hot[1])])
hspots=pd.DataFrame(hotspots,columns=["start","end"]).round(6)
hspots["n_start"]=list(hspots.start[1:])+[0]
hspots["cont"]=hspots.n_start-hspots.end #finding adjacent continuos peaks
merged=[]
tmp=pd.DataFrame(columns=["start","end","n_start","cont"])
flag=False
for ind,row in hspots.iterrows():
if row.cont==0:
tmp=tmp.append(row)
flag=False
elif row.cont!=0 and not tmp.empty and not flag:
tmp=tmp.append(row)
merged.append((tmp.start.iloc[0],tmp.end.iloc[-1])) #Merging the adjacent peaks
flag=True
tmp=pd.DataFrame(columns=["start","end","n_start","cont"])
ind=hspots[hspots.cont==0].index
hspots=hspots[["start","end"]][~hspots.index.isin(set(list(ind)+list(ind+1)))].append(pd.DataFrame(merged,columns=["start","end"]),ignore_index=True).sort_values(by="start")
hspots["avg_rate"]=(fmap(hspots["end"]) - fmap(hspots["start"])) / ((hspots["end"]-hspots["start"]) / 1e6) #Computing average recombination rate
hspots["chr"]=chrom[:-4]
hspots["flag"]=hspots.avg_rate>1
for p in POPS[pop]:
f=fmaps[p][chrom]
hspots[p+"_rate"]=(f(hspots["end"]) - f(hspots["start"])) / ((hspots["end"]-hspots["start"]) / 1e6)
hspots.loc[(hspots["flag"]==False) & (hspots[p+"_rate"]>1),"flag"]=True #Interpolating and computing rate in each population
hspots=hspots[hspots.flag==True]
hspots.round(6).to_csv(h_out,sep="\t",columns=["chr","start","end"]+[i for i in hspots.columns if i.endswith("_rate")],index=False) #Writing hotspots
hspots["n_start"]=list(hspots.start[1:])+[0]
c_spots=hspots[["end","n_start"]][:-1] #Computing the locations of coldspots between hotspots
coldspots=[]
for start,end in zip(c_spots.end,c_spots.n_start):
rate=(fmap(end)-fmap(start))/((end-start)/1e6) #Calculating the average recombination rate in coldspots
coldspots.append([df.chr.iloc[0],int(start),int(end),rate])
cspots=pd.DataFrame(coldspots,columns=["chr","start","end","avg_rate"])
cspots["chr"]=chrom[:-4]
cspots["flag"]=cspots.avg_rate<=1
for p in POPS[pop]:
f=fmaps[p][chrom]
cspots[p+"_rate"]=(f(cspots["end"]) - f(cspots["start"])) / ((cspots["end"]-cspots["start"]) / 1e6) #computing the recombination rate in each population
cspots.loc[(cspots["flag"]==False) & (cspots[p+"_rate"]<=1),"flag"]=True
cspots=cspots[cspots.flag==True]
cspots.round(6).to_csv(c_out,sep="\t",columns=["chr","start","end"]+[i for i in cspots.columns if i.endswith("_rate")],index=False) #Writing coldspots
if not os.path.exists("../HOTSPOTS"):
os.makedirs("../HOTSPOTS")
if not os.path.exists("../COLDSPOTS"):
os.makedirs("../COLDSPOTS")
for sup in POPS.keys():
if not os.path.exists("../HOTSPOTS/"+sup):
os.makedirs("../HOTSPOTS/"+sup)
for sup in POPS.keys():
if not os.path.exists("../COLDSPOTS/"+sup):
os.makedirs("../COLDSPOTS/"+sup)
inp=[("../OMNI_POP_AVG/"+pop+"/"+chrom,"../HOTSPOTS/"+pop+"/"+chrom,"../COLDSPOTS/"+pop+"/"+chrom) for pop in os.listdir("../OMNI_POP_AVG/") for chrom in os.listdir("../OMNI_POP_AVG/"+pop)]
Parallel(n_jobs=cpu_count(), verbose=25)(delayed(spotify)(i)for i in inp)
|
[
"manuvaivasvata7@gmail.com"
] |
manuvaivasvata7@gmail.com
|
fdb01d318303a533b9e7b88a3aa633b89b8ea312
|
629606ef6e0ce252f74729ac60f57ca8805c3c78
|
/hw_002_LargeData/hw_001_matplotlib/venv/lib/python3.7/site-packages/matplotlib/cbook/deprecation.py
|
3f8639e631b3b73b837fda3c724ad481f739df3d
|
[] |
no_license
|
LeeXyan/lxgzhw006
|
cc31024874725f60b766c9d5d24c2dafc66b8de3
|
621a73544262df7e104806579242deeaa8dbe2c2
|
refs/heads/master
| 2021-10-10T17:41:52.381843
| 2019-01-15T00:25:08
| 2019-01-15T00:25:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,416
|
py
|
import functools
import textwrap
import warnings
class MatplotlibDeprecationWarning(UserWarning):
"""
A class for issuing deprecation warnings for Matplotlib users.
In light of the fact that Python builtin DeprecationWarnings are ignored
by default as of Python 2.7 (see link below), this class was put in to
allow for the signaling of deprecation, but via UserWarnings which are not
ignored by default.
https://docs.python.org/dev/whatsnew/2.7.html#the-future-for-python-2-x
"""
mplDeprecation = MatplotlibDeprecationWarning
"""mplDeprecation is deprecated. Use MatplotlibDeprecationWarning instead."""
def _generate_deprecation_message(
since, message='', name='', alternative='', pending=False,
obj_type='attribute', addendum='', *, removal=''):
if removal == "":
removal = {"2.2": "in 3.1", "3.0": "in 3.2"}.get(
since, "two minor releases later")
elif removal:
if pending:
raise ValueError(
"A pending deprecation cannot have a scheduled removal")
removal = "in {}".format(removal)
if not message:
message = (
"The %(name)s %(obj_type)s"
+ (" will be deprecated in a future version"
if pending else
(" was deprecated in Matplotlib %(since)s"
+ (" and will be removed %(removal)s"
if removal else
"")))
+ "."
+ (" Use %(alternative)s instead." if alternative else "")
+ (" %(addendum)s" if addendum else ""))
return message % dict(
func=name, name=name, obj_type=obj_type, since=since, removal=removal,
alternative=alternative, addendum=addendum)
def warn_deprecated(
since, message='', name='', alternative='', pending=False,
obj_type='attribute', addendum='', *, removal=''):
"""
Used to display deprecation in a standard way.
Parameters
----------
since : str
The release at which this API became deprecated.
message : str, optional
Override the default deprecation message. The format
specifier `%(name)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)s` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated object.
alternative : str, optional
An alternative API that the user may use in place of the deprecated
API. The deprecation warning will tell the user about this alternative
if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning. Cannot be used together with *removal*.
removal : str, optional
The expected removal version. With the default (an empty string), a
removal version is automatically computed from *since*. Set to other
Falsy values to not schedule a removal date. Cannot be used together
with *pending*.
obj_type : str, optional
The object type being deprecated.
addendum : str, optional
Additional text appended directly to the final message.
Examples
--------
Basic example::
# To warn of the deprecation of "hw_001_matplotlib.name_of_module"
warn_deprecated('1.4.0', name='hw_001_matplotlib.name_of_module',
obj_type='module')
"""
message = '\n' + _generate_deprecation_message(
since, message, name, alternative, pending, obj_type, addendum,
removal=removal)
category = (PendingDeprecationWarning if pending
else MatplotlibDeprecationWarning)
warnings.warn(message, category, stacklevel=2)
def deprecated(since, message='', name='', alternative='', pending=False,
obj_type=None, addendum='', *, removal=''):
"""
Decorator to mark a function or a class as deprecated.
Parameters
----------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier `%(name)s` may be used for the name of the object,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
object.
name : str, optional
The name of the deprecated object; if not provided the name
is automatically determined from the passed in object,
though this is useful in the case of renamed functions, where
the new function is just assigned to the name of the
deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative API that the user may use in place of the deprecated
API. The deprecation warning will tell the user about this alternative
if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning. Cannot be used together with *removal*.
removal : str, optional
The expected removal version. With the default (an empty string), a
removal version is automatically computed from *since*. Set to other
Falsy values to not schedule a removal date. Cannot be used together
with *pending*.
addendum : str, optional
Additional text appended directly to the final message.
Examples
--------
Basic example::
@deprecated('1.4.0')
def the_function_to_deprecate():
pass
"""
if obj_type is not None:
warn_deprecated(
"3.0", "Passing 'obj_type' to the 'deprecated' decorator has no "
"effect, and is deprecated since Matplotlib %(since)s; support "
"for it will be removed %(removal)s.")
def deprecate(obj, message=message, name=name, alternative=alternative,
pending=pending, addendum=addendum):
if not name:
name = obj.__name__
if isinstance(obj, type):
obj_type = "class"
old_doc = obj.__doc__
func = obj.__init__
def finalize(wrapper, new_doc):
obj.__doc__ = new_doc
obj.__init__ = wrapper
return obj
elif isinstance(obj, property):
obj_type = "attribute"
func = None
name = name or obj.fget.__name__
old_doc = obj.__doc__
class _deprecated_property(property):
def __get__(self, instance, owner):
if instance is not None:
from . import _warn_external
_warn_external(message, category)
return super().__get__(instance, owner)
def __set__(self, instance, value):
if instance is not None:
from . import _warn_external
_warn_external(message, category)
return super().__set__(instance, value)
def __delete__(self, instance):
if instance is not None:
from . import _warn_external
_warn_external(message, category)
return super().__delete__(instance)
def finalize(_, new_doc):
return _deprecated_property(
fget=obj.fget, fset=obj.fset, fdel=obj.fdel, doc=new_doc)
else:
obj_type = "function"
if isinstance(obj, classmethod):
func = obj.__func__
old_doc = func.__doc__
def finalize(wrapper, new_doc):
wrapper = functools.wraps(func)(wrapper)
wrapper.__doc__ = new_doc
return classmethod(wrapper)
else:
func = obj
old_doc = func.__doc__
def finalize(wrapper, new_doc):
wrapper = functools.wraps(func)(wrapper)
wrapper.__doc__ = new_doc
return wrapper
message = _generate_deprecation_message(
since, message, name, alternative, pending, obj_type, addendum,
removal=removal)
category = (PendingDeprecationWarning if pending
else MatplotlibDeprecationWarning)
def wrapper(*args, **kwargs):
warnings.warn(message, category, stacklevel=2)
return func(*args, **kwargs)
old_doc = textwrap.dedent(old_doc or '').strip('\n')
message = message.strip()
new_doc = (('\n.. deprecated:: %(since)s'
'\n %(message)s\n\n' %
{'since': since, 'message': message}) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
return finalize(wrapper, new_doc)
return deprecate
|
[
"1156956636@qq.com"
] |
1156956636@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.