blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3a40a5e4a05246938f3827a9d2f4804b32cb1e50
|
eab0cc7ca62bd97cb75a13e4e1ec24391d3253c6
|
/NAMS/download.py
|
0d25eadfa833f8e99a42f6de2758afcd23242fe4
|
[] |
no_license
|
MiCurry/easy_plots
|
f0ee07d11998d5661e15f5c32daab8cfcfc1f5ad
|
e03e9906e43c49cc21b02293dcc773defffd5e9d
|
refs/heads/master
| 2021-01-19T13:42:02.378373
| 2017-05-31T17:59:13
| 2017-05-31T17:59:13
| 88,103,099
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,328
|
py
|
import os
import sys
import urllib
from datetime import datetime, timedelta
# TODO: Change arguments to a float??
""" download(north, south, east, west, height)
north - north latitude value (string)
south - south latitude value (string)
east - east longitude value (string)
west - west longitude value (string)
height - height-above-ground (meters)
returns filename as WIND_date_height.nc
"""
def download(north, south, east, west, height):
#list of file ids created
new_file_ids = []
current_time = datetime.now().date()
end_time = datetime.now().date()+timedelta(days=1)
begin = str(current_time) + 'T00%3A00%3A00Z'
end = str(end_time) + 'T00%3A00%3A00Z'
print "Downloading...."
url = 'http://thredds.ucar.edu/thredds/ncss/grib/NCEP/NAM/CONUS_12km/conduit/Best?var=u-component_of_wind_height_above_ground&var=v-component_of_wind_height_above_ground&north='+north+'&west='+west+'&east='+east+'&south='+south+'&horizStride=1&time_start='+begin+'&time_end='+end+'&timeStride=1&addLatLon=true&accept=netcdf'
fileName = "/home/data/{0}-{1}.nc".format("WIND", current_time)
urllib.urlretrieve(url, fileName)
print "Downloaded File: ", fileName
return file
north = "46.5"; south = "41.7"; east = "-116"; west = "-125"; height = "0";
download(north, south, east, west, height)
|
[
"currymiles@gmail.com"
] |
currymiles@gmail.com
|
eb3b66b1e699ae4e0f838ee32d7a5106b449befc
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/120_design_patterns/016_iterator/_exercises/templates/6-python-design-patterns-building-more-m6-exercise-files/BeforeIterator/__main1__.py
|
2eb5be6885205db5a459393ddd9c9113541e58c8
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 347
|
py
|
from testdata import employees
def main():
print("Employees:")
for i in range(1, employees.headcount+1):
employee = employees.get_employee(i)
print('Employee Id: {}; Name: {}; Date of Hire: {}'
.format(employee.empid, employee.name, employee.hiredate)
)
if __name__ == '__main__':
main()
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
0d94bb112cca7dde3d728ef42f2d1cd0bb36b5c2
|
eee19e7aace0ee38039a6d829b4511e3a981334a
|
/AISProject/ais_conv.py
|
4200ec8ccf95240b550f78a3280100fb5df7c2f4
|
[
"MIT"
] |
permissive
|
lzz5235/Code-Segment
|
fb47c67c3a019c6ed30b80ab70da31276558ab58
|
e10a172972ea75151f77929dfe105729600c854e
|
refs/heads/master
| 2020-12-23T03:24:50.918907
| 2019-11-17T15:59:30
| 2019-11-17T15:59:30
| 41,672,861
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,300
|
py
|
import os
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
import numpy as np
NationDict = {307: 'Aruba', 401: 'Afghanistan', 601: 'South Africa (Rep. of)', 603: 'Angola (Rep. of)', 301: 'Anguilla',
201: 'Albania (Rep. of)', 605: 'Algeria (People\'s Democratic Rep. of)', 303: 'Alaska (State of)',
607: 'Saint Paul and Amsterdam Islands', 202: 'Andorra (Principality of)', 701: 'Argentine Rep.',
216: 'Armenia (Rep. of)', 403: 'Saudi Arabia (Kingdom of)', 608: 'Ascension Island',
304: 'Antigua and Barbuda',
305: 'Antigua and Barbuda', 306: 'Netherlands Caribbean', 503: 'Australia', 203: 'Austria',
423: 'Azerbaijani Rep.',
204: 'Azores', 710: 'Brazil (Federative Rep. of)', 308: 'Bahamas (Commonwealth of the)', 309: 'Bahamas ('
'Commonwealth of the)',
311: 'Bahamas (Commonwealth of the)', 609: 'Burundi (Rep. of)', 205: 'Belgium', 610: 'Benin (Rep. of)',
310: 'Bermuda', 633: 'Burkina Faso', 405: 'Bangladesh (People\'s Rep. of)', 408: 'Bahrain (Kingdom of)',
478: 'Bosnia and Herzegovina', 206: 'Belarus (Rep. of)', 312: 'Belize',
720: 'Bolivia (Plurinational State of)',
611: 'Botswana (Rep. of)', 314: 'Barbados', 506: 'Myanmar (Union of)', 508: 'Brunei Darussalam',
410: 'Bhutan ('
'Kingdom of)',
207: 'Bulgaria (Rep. of)', 612: 'Central African Rep', 316: 'Canada', 514: 'Cambodia (Kingdom of)',
515: 'Cambodia '
'(Kingdom of)',
725: 'Chile', 412: 'China (People\'s Rep. of)', 413: 'China (People\'s Rep. of)',
414: 'China (People\'s Rep. of)',
516: 'Christmas Island (Indian Ocean)', 518: 'Cook Islands', 730: 'Colombia (Rep. of)', 417: 'Sri Lanka ('
'Democratic '
'Socialist Rep. of)',
613: 'Cameroon (Rep. of)', 676: 'Democratic Rep. of the Congo', 615: 'Congo (Rep. of the)',
616: 'Comoros (Union '
'of the)',
617: 'Cape Verde (Rep. of)', 618: 'Crozet Archipelago', 619: 'Cote d\'Ivoire (Rep. of)',
321: 'Costa Rica',
323: 'Cuba', 208: 'Vatican City State', 319: 'Cayman Islands', 209: 'Cyprus (Rep. of)',
210: 'Cyprus (Rep. of)',
212: 'Cyprus (Rep. of)', 270: 'Czech Rep.', 211: 'Germany (Federal Rep. of)',
218: 'Germany (Federal Rep. of)',
621: 'Djibouti (Rep. of)', 325: 'Dominica (Commonwealth of)', 219: 'Denmark', 220: 'Denmark',
327: 'Dominican Rep.',
224: 'Spain', 225: 'Spain', 622: 'Egypt (Arab Rep. of)', 735: 'Ecuador', 625: 'Eritrea',
276: 'Estonia (Rep. of)',
624: 'Ethiopia (Federal Democratic Rep. of)', 226: 'France', 227: 'France', 228: 'France', 230: 'Finland',
520: 'Fiji (Rep. of)', 740: 'Falkland Islands (Malvinas)', 231: 'Faroe Islands',
510: 'Micronesia (Federated '
'States of)',
232: 'United Kingdom of Great Britain and Northern Ireland', 233: 'United Kingdom of Great Britain and '
'Northern Ireland',
234: 'United Kingdom of Great Britain and Northern Ireland', 235: 'United Kingdom of Great Britain and '
'Northern Ireland',
626: 'Gabonese Rep.', 213: 'Georgia', 627: 'Ghana', 236: 'Gibraltar', 329: 'Gambia (Rep. of the)',
629: 'Guadeloupe ('
'French Department of)',
630: 'Guinea-Bissau (Rep. of)', 631: 'Equatorial Guinea (Rep. of)', 237: 'Greece', 239: 'Greece',
240: 'Greece',
241: 'Greece', 330: 'Grenada', 331: 'Greenland', 332: 'Guatemala (Rep. of)',
745: 'Guiana (French Department of)',
632: 'Guinea (Rep. of)', 750: 'Guyana', 477: 'Hong Kong (Special Administrative Region of China)',
334: 'Honduras (Rep. of)',
243: 'Hungary', 244: 'Netherlands (Kingdom of the)', 245: 'Netherlands (Kingdom of the)',
246: 'Netherlands ('
'Kingdom of the)',
238: 'Croatia (Rep. of)', 336: 'Haiti (Rep. of)', 247: 'Italy', 523: 'Cocos (Keeling) Islands',
419: 'India (Rep. '
'of)',
525: 'Indonesia (Rep. of)', 250: 'Ireland', 422: 'Iran (Islamic Rep. of)', 425: 'Iraq (Rep. of)',
251: 'Iceland',
428: 'Israel (State of)', 431: 'Japan', 432: 'Japan', 339: 'Jamaica',
438: 'Jordan (Hashemite Kingdom of)',
436: 'Kazakhstan (Rep. of)', 634: 'Kenya (Rep. of)', 635: 'Kerguelen Islands', 451: 'Kyrgyz Rep.',
529: 'Kiribati ('
'Rep. of)',
341: 'Saint Kitts and Nevis (Federation of)', 440: 'Korea (Rep. of)', 441: 'Korea (Rep. of)',
445: 'Democratic '
'People\'s '
'Rep. of Korea',
447: 'Kuwait (State of)', 531: 'Lao People\'s Democratic Rep.', 450: 'Lebanon', 636: 'Liberia (Rep. of)',
637: 'Liberia (Rep. of)', 642: 'Libya', 343: 'Saint Lucia', 252: 'Liechtenstein (Principality of)',
644: 'Lesotho ('
'Kingdom of)',
277: 'Lithuania (Rep. of)', 253: 'Luxembourg', 275: 'Latvia (Rep. of)',
453: 'Macao (Special Administrative '
'Region of China)',
645: 'Mauritius (Rep. of)', 254: 'Monaco (Principality of)', 214: 'Moldova (Rep. of)',
647: 'Madagascar (Rep. '
'of)', 255: 'Madeira',
345: 'Mexico', 538: 'Marshall Islands (Rep. of the)', 274: 'The Former Yugoslav Rep. of Macedonia',
533: 'Malaysia',
455: 'Maldives (Rep. of)', 649: 'Mali (Rep. of)', 215: 'Malta', 229: 'Malta', 248: 'Malta', 249: 'Malta',
256: 'Malta',
262: 'Montenegro', 457: 'Mongolia', 650: 'Mozambique (Rep. of)',
536: 'Northern Mariana Islands (Commonwealth of '
'the)',
242: 'Morocco (Kingdom of)', 347: 'Martinique (French Department of)', 248: 'Montserrat',
654: 'Mauritania ('
'Islamic Rep. of)',
655: 'Malawi', 350: 'Nicaragua', 540: 'New Caledonia', 656: 'Niger (Rep. of the)',
657: 'Nigeria (Federal Rep. of)',
542: 'Niue', 659: 'Namibia (Rep. of)', 257: 'Norway', 258: 'Norway', 259: 'Norway',
459: 'Nepal (Federal Democratic '
'Rep. of)',
544: 'Nauru (Rep. of)', 512: 'New Zealand', 546: 'French Polynesia', 461: 'Oman (Sultanate of)',
463: 'Pakistan ('
'Islamic Rep. of)',
548: 'Philippines (Rep. of the)', 511: 'Palau (Rep. of)', 553: 'Papua New Guinea',
351: 'Panama (Rep. of)',
352: 'Panama (Rep. of)', 353: 'Panama (Rep. of)', 354: 'Panama (Rep. of)', 355: 'Panama (Rep. of)',
356: 'Panama (Rep. of)',
357: 'Panama (Rep. of)', 370: 'Panama (Rep. of)', 371: 'Panama (Rep. of)', 372: 'Panama (Rep. of)',
373: 'Panama ('
'Rep. of)',
261: 'Poland (Rep. of)', 263: 'Portugal', 755: 'Paraguay (Rep. of)', 760: 'Peru', 443: 'Palestine',
555: 'Pitcairn '
'Island',
358: 'Puerto Rico', 466: 'Qatar (State of)', 660: 'Reunion (French Department of)', 264: 'Romania',
661: 'Rwanda ('
'Rep. of)',
273: 'Russian Federation', 265: 'Sweden', 266: 'Sweden', 662: 'Sudan (Rep. of the)',
663: 'Senegal (Rep. of)',
664: 'Seychelles (Rep. of)', 665: 'Saint Helena', 557: 'Solomon Islands', 359: 'El Salvador (Rep. of)',
559: 'American Samoa', 561: 'Samoa (Independent State of)', 268: 'San Marino (Rep. of)',
563: 'Singapore (Rep. of)',
564: 'Singapore (Rep. of)', 565: 'Singapore (Rep. of)', 566: 'Singapore (Rep. of)',
666: 'Somali Democratic Rep.',
361: 'Saint Pierre and Miquelon (Territorial Collectivity of)', 279: 'Serbia (Rep. of)',
667: 'Sierra Leone',
668: 'Sao Tome and Principe (Democratic Rep. of)', 269: 'Switzerland (Confederation of)',
765: 'Suriname (Rep. '
'of)',
267: 'Slovak Rep.', 278: 'Slovenia (Rep. of)', 669: 'Swaziland (Kingdom of)', 468: 'Syrian Arab Rep.',
364: 'Turks '
'and Caicos Islands',
670: 'Chad (Rep. of)', 671: 'Togolese Rep.', 567: 'Thailand', 472: 'Tajikistan (Rep. of)',
434: 'Turkmenistan',
570: 'Tonga (Kingdom of)', 362: 'Trinidad and Tobago', 672: 'Tunisia', 271: 'Turkey', 572: 'Tuvalu',
674: 'Tanzania ('
'United Rep. of)',
677: 'Tanzania (United Rep. of)', 470: 'United Arab Emirates', 675: 'Uganda (Rep. of)', 272: 'Ukraine',
770: 'Uruguay (Eastern Rep. of)', 338: 'United States of America', 366: 'United States of America',
367: 'United '
'States of America',
368: 'United States of America', 369: 'United States of America', 437: 'Uzbekistan (Rep. of)',
375: 'Saint '
'Vincent and the Grenadines',
376: 'Saint Vincent and the Grenadines', 377: 'Saint Vincent and the Grenadines',
775: 'Venezuela (Bolivarian '
'Rep. of)',
379: 'United States Virgin Islands', 378: 'British Virgin Islands', 574: 'Viet Nam (Socialist Rep. of)',
576: 'Vanuatu (Rep. of)', 577: 'Vanuatu (Rep. of)', 578: 'Wallis and Futuna Islands',
416: 'Taiwan (Province of '
'China)',
501: 'Adelie Land', 473: 'Yemen (Rep. of)', 475: 'Yemen (Rep. of)', 678: 'Zambia (Rep. of)',
679: 'Zimbabwe (Rep. of)'
}
ShipTypeDict = {5:'Navy',6:'Carrier',7:'Cargo',8:'Tanker'}
def getNationFlag(MMSI):
num = long(MMSI)
num /=1000000
if num not in NationDict:
return 'unknown'
return NationDict[num]
def getShipType(type):
num = int(type)
num /=10
if num not in ShipTypeDict:
return 'unknown'
return ShipTypeDict[num]
def get_data_DY(input_path, all_MMSI):
print input_path
if 0 == int(os.path.getsize(input_path)):
return
et = ET.parse(input_path)
element = et.getroot()
element_Ships = element.findall('Ship')
for ship in element_Ships:
mmsi = long(ship.find("MMSI").text)
DynamicInfo = ship.find("DynamicInfo")
LastTime = DynamicInfo.find("LastTime").text
Latitude = float(DynamicInfo.find("Latitude").text)
Longitude = float(DynamicInfo.find("Longitude").text)
Speed = float(DynamicInfo.find("Speed").text)
course = float(DynamicInfo.find("course").text)
HeadCourse = float(DynamicInfo.find("HeadCourse").text)
AngularRate = float(DynamicInfo.find("AngularRate").text)
NaviStatus = float(DynamicInfo.find("NaviStatus").text)
ShipData = {'MMSI':mmsi, 'DynamicInfo':[]}
ShipData['DynamicInfo'].append({'LastTime':str(LastTime),'Latitude':Latitude,'Longitude':Longitude,
'Speed':Speed,
'course':course,'HeadCourse':HeadCourse,'AngularRate':AngularRate,
'NaviStatus':NaviStatus})
if mmsi < 100000000:
continue
write_data_DY(ShipData)
# all_MMSI.append(ShipData)
def get_data_ST(input_path,all_MMSI):
print input_path
if 0 == int(os.path.getsize(input_path)):
return
et = ET.parse(input_path)
element = et.getroot()
element_Ships = element.findall('Ship')
for ship in element_Ships:
mmsi = long(ship.find("MMSI").text)
StaticInfo = ship.find("StaticInfo")
LastTime = StaticInfo.find("LastTime").text
ShipType = int(StaticInfo.find("ShipType").text)
Length = float(StaticInfo.find("Length").text)
Width = float(StaticInfo.find("Width").text)
Left = float(StaticInfo.find("Left").text)
Trail = float(StaticInfo.find("Trail").text)
Draught = float(StaticInfo.find("Draught").text)
IMO = long(StaticInfo.find("IMO").text)
CallSign = StaticInfo.find("CallSign").text
ETA = StaticInfo.find("ETA").text
Name = StaticInfo.find("Name").text
Dest = StaticInfo.find("Dest").text
ShipData = {'MMSI': mmsi, 'StaticInfo': []}
ShipData['StaticInfo'].append({'LastTime': str(LastTime), 'ShipType': ShipType, 'Length': Length,
'Width': Width,
'Left': Left, 'Trail': Trail, 'Draught': Draught,
'IMO': IMO,
'CallSign': str(CallSign),'ETA':str(ETA),'Name':str(Name),'Dest':str(Dest)})
if mmsi < 100000000:
continue
write_data_ST(ShipData)
# all_MMSI.append(ShipData)
def write_data_DY(ShipData):
MMSI = ShipData['MMSI']
string = str(MMSI) + ',' + ShipData['DynamicInfo'][0]['LastTime'] + ',' + str(ShipData['DynamicInfo'][0]['Latitude'])\
+ ',' + str(ShipData['DynamicInfo'][0]['Longitude']) + ',' + str(ShipData['DynamicInfo'][0]['Speed']) + \
',' + str(ShipData['DynamicInfo'][0]['course']) + ',' + str(ShipData['DynamicInfo'][0]['NaviStatus'])
with open('./ShipLineTest/ais_dy.txt', 'a') as f: # DY
f.write(string + '\n')
def write_data_ST(ShipData):
MMSI = ShipData['MMSI']
tmp = str(ShipData['StaticInfo'][0]['Length']) + ' x ' + str(ShipData['StaticInfo'][0]['Width']) + ' m|'
string = str(MMSI) + '|' + ShipData['StaticInfo'][0]['Name'] + '|' + getNationFlag(MMSI) + '|' + getShipType(
ShipData['StaticInfo'][0]['ShipType']) + '|N/A|N/A|N/A|' + str(MMSI) + '|' + ShipData['StaticInfo'][0][
'CallSign'] + '|' + tmp + str(ShipData['StaticInfo'][0]['Draught']) + ' m|'+ str(ShipData['StaticInfo'][0][
'IMO'])
with open('./ShipLineTest/ais_st.txt', 'a') as f: # ST
f.write(string + '\n')
def Classfication_By_Nation(input_path,Nations):
import shutil
print input_path
if 0 == int(os.path.getsize(input_path)):
return
et = ET.parse(input_path)
element = et.getroot()
element_Ships = element.findall('Ship')
for ship in element_Ships:
mmsi = long(ship.find("MMSI").text)
if getNationFlag(mmsi) != Nations:
break
shutil.copyfile(input_path, './Japan_Tanker/' + os.path.split(input_path)[1])
def Classfication_By_Draught(input_path,draught):
import shutil
print input_path
if 0 == int(os.path.getsize(input_path)):
return
et = ET.parse(input_path)
element = et.getroot()
element_Ships = element.findall('Ship')
ship = element_Ships[0]
mmsi = long(ship.find("MMSI").text)
StaticInfo = ship.find("StaticInfo")
Draught = float(StaticInfo.find("Draught").text)
if draught.has_key(int(Draught)):
draught[int(Draught)] += 1
else:
draught[int(Draught)] = 1
def Classfication_By_Weight(input_path,weight):
print input_path
if 0 == int(os.path.getsize(input_path)):
return
et = ET.parse(input_path)
element = et.getroot()
element_Ships = element.findall('Ship')
ship = element_Ships[0]
mmsi = long(ship.find("MMSI").text)
StaticInfo = ship.find("StaticInfo")
Draught = float(StaticInfo.find("Draught").text)
if weight.has_key(int(Draught)):
weight[int(Draught)] += 1
else:
weight[int(Draught)] = 1
def Classfication_By_WS(input_path):
print input_path
if 0 == int(os.path.getsize(input_path)):
return
et = ET.parse(input_path)
element = et.getroot()
element_Ships = element.findall('Ship')
ship = element_Ships[0]
StaticInfo = ship.find("StaticInfo")
Length = float(StaticInfo.find("Length").text)
Width = float(StaticInfo.find("Width").text)
Draught = float(StaticInfo.find("Draught").text)
slist = []
for ship in element_Ships:
DynamicInfo = ship.find("DynamicInfo")
Speed = float(DynamicInfo.find("Speed").text)
if int(Speed) != 0 :
slist.append(Speed)
slist = np.array(slist)
Y = np.median(slist)
X = int(Length*Width*Draught)
return X,Y
if __name__ == "__main__":
all_MMSI_DY=[]
all_MMSI_ST=[]
data_paths_dy = []
data_paths_st = []
draught = {}
from matplotlib import pyplot as plt
from matplotlib.ticker import MultipleLocator
from matplotlib import pyplot as plt
from matplotlib.ticker import MultipleLocator
speed_list = []
draught_list = []
for file in [os.path.join('/media/xxxx/xx/AISProject/Japan_Tanker', s) for s in os.listdir(
'/media/xxxx/xx/AISProject/Japan_Tanker')]:
X,Y = Classfication_By_WS(file)
speed_list.append(X)
draught_list.append(Y)
speed_list = np.array(speed_list)
draught_list = np.array(draught_list)
plt.scatter(speed_list,draught_list,25,cmap=plt.cm.jet,marker='o',edgecolors='k',zorder=10,alpha=0.7)
plt.xticks(np.arange(0,400000,20000))
plt.yticks(np.arange(6,25,2))
plt.xlabel("Ship Tanker Tonnage")
plt.ylabel("Speed")
plt.title("Japanese Ship Tanker Tonnage/Speed Scatter")
plt.grid()
plt.show()
|
[
"lzz5235@vip.qq.com"
] |
lzz5235@vip.qq.com
|
6d69b85e4acd9fec219dae89e6dc3f1d19170dbe
|
4aadd2bca50408f14414de12caf5ce01e4dfbcf0
|
/main_app/migrations/0012_routine_activity_name.py
|
99d81918261781388b087123ebf373d4ac115743
|
[] |
no_license
|
daronefrancis/Tiff-and-The-Lads
|
3868161394faaaadde491402b93ae3162ba01bdd
|
1f5fd914ea8ccef84fccf858bcc634cd706f02bb
|
refs/heads/master
| 2022-11-17T10:18:36.270781
| 2020-07-10T14:15:26
| 2020-07-10T14:15:26
| 277,565,184
| 0
| 0
| null | 2020-07-06T14:31:33
| 2020-07-06T14:31:32
| null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
# Generated by Django 3.0.6 on 2020-07-09 21:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0011_auto_20200709_1635'),
]
operations = [
migrations.AddField(
model_name='routine',
name='activity_name',
field=models.CharField(default='temp', max_length=50),
preserve_default=False,
),
]
|
[
"daronefrancis@gmail.com"
] |
daronefrancis@gmail.com
|
a74b2822c5671132bdfc8acb516eb7d19f9613ca
|
cc52ae6cf0fd6b66de5b2e36ec6d755749b96850
|
/3_pattern1.py
|
6b91dea9b372992dee5375ab7fc51b0cf7bd4ae8
|
[] |
no_license
|
edagotti689/PYTHON-7-REGULAR-EXPRESSIONS
|
5a48a3e381d7be476741c87c253956f0b65697be
|
f935b6e9c8f1d4ce49c8bc0923b072c6d0680d6e
|
refs/heads/master
| 2020-11-24T01:13:26.544872
| 2019-12-13T18:24:06
| 2019-12-13T18:24:06
| 227,898,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
import re
# Matches 1 or more occurencies of preceding expression.
# name = 'sriram'
# mo = re.match('\w+', name)
# print(mo.group())
# Matches 0 or more occurrence of preceding expression.
name = 'sriram123'
mo = re.match('\d*', name)
print(mo.group())
# Matches 0 or 1 occurrence of preceding expression.
# name = 'sriram123'
# mo = re.match('\d?', name)
# print(mo.group())
|
[
"noreply@github.com"
] |
edagotti689.noreply@github.com
|
51d195e1ea2a4b102ddff7f8544416bdc5bf2851
|
57c4e419d696621fad7d0ac28e78743ea1c0296e
|
/.ipynb_checkpoints/app-checkpoint.py
|
5fe60243ea7af364e294cd2f7b260cb12159ecb9
|
[] |
no_license
|
geadalfa/depokSehat
|
18c996fa9c061663db31d07a1cb99e74d5b5b30f
|
c6f11fdddbcbd7c52be2eb08c8798ccfc4b11225
|
refs/heads/main
| 2023-07-08T12:45:33.999830
| 2021-08-12T12:15:14
| 2021-08-12T12:15:14
| 346,625,734
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,046
|
py
|
from flask import Flask,render_template,url_for,request, redirect, Response
import numpy as np
#import pickle
import pickle5 as pickle
import pandas as pd
#import tensorflow as tf
from tensorflow.keras.models import load_model
#from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
#from tensorflow.keras.models import model_from_json
from numpy import array
app=Flask(__name__)
#model = tf.create_model()
model = load_model("lstmModel.h5")
model.load_weights("geaNlp_weight_model.h5")
# with open(path_to_protocol5, "rb") as fh:
# data = pickle.load(fh)
with open('tokenizer2.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
@app.route('/')
def table():
df = pd.read_csv('hasil_label.csv', index_col=0)
positif1 = df.loc[df['nilai'] > 15].head()
negatif1 = df.loc[df['nilai'] < -25].head()
netral1 = df.loc[df['nilai'] == -1].head()
headings = ("Tweet", "Nilai", "Sentimen")
tuples1 = [tuple(x) for x in positif1.values]
tuples2 = [tuple(x) for x in negatif1.values]
tuples3 = [tuple(x) for x in netral1.values]
senti_count = df['sentimen'].value_counts()
senti_count2=list(zip(senti_count,senti_count.index))
senti_count2=tuple(zip(senti_count,senti_count.index))
senti_count2 = [tuple(str(x) for x in tup) for tup in senti_count2]
senti_count2 = [(sub[1], sub[0]) for sub in senti_count2]
return render_template('home.html', sentimen=senti_count, tabel=df, headings = headings,
positif=tuples1, negatif=tuples2, netral=tuples3, sentimen2=senti_count2)
def default():
return redirect('/home.html')
@app.route('/home.html')
def home():
df = pd.read_csv('hasil_label.csv', index_col=0)
positif1 = df.loc[df['nilai'] > 15].head()
negatif1 = df.loc[df['nilai'] < -25].head()
netral1 = df.loc[df['nilai'] == -1].head()
headings = ("Tweet", "Nilai", "Sentimen")
tuples1 = [tuple(x) for x in positif1.values]
tuples2 = [tuple(x) for x in negatif1.values]
tuples3 = [tuple(x) for x in netral1.values]
senti_count = df['sentimen'].value_counts()
senti_count2=list(zip(senti_count,senti_count.index))
senti_count2=tuple(zip(senti_count,senti_count.index))
senti_count2 = [tuple(str(x) for x in tup) for tup in senti_count2]
senti_count2 = [(sub[1], sub[0]) for sub in senti_count2]
return render_template('home.html', sentimen=senti_count, tabel=df, headings = headings,
positif=tuples1, negatif=tuples2, netral=tuples3, sentimen2=senti_count2)
@app.route('/predict',methods=['POST'])
def predict():
max_length = 200
if request.method == 'POST':
review = request.form['review']
data = [review]
#tokenizer.fit_on_texts(data)
enc = tokenizer.texts_to_sequences(data)
enc = pad_sequences(enc, maxlen=max_length, dtype='int32', value=0)
my_prediction = model.predict(array([enc][0]))[0]
#class1 = model.predict_classes(array([enc][0]))[0]
sentiment = model.predict(enc)[0]
#print(my_prediction)
#print(review)
#print(data)
#neg = np.argmax(sentiment)
print(sentiment)
if (np.argmax(sentiment) == 0):
sentimennya = 0
# neg = sentiment
# sentiment = neg
#print('Sentimen: Negatif')
elif (np.argmax(sentiment) == 1):
sentimennya = 1
# net = sentiment
# sentiment = net
#print('Sentimen: Netral')
else:
sentimennya = 2
# pos = sentiment
# sentiment = pos
#print('Sentimen: Positif')
return render_template('result.html',prediction = sentimennya, teks=review)
@app.route('/style.css',methods=['GET'])
def stylecss():
read_file = open("static/style.css", "r")
opens = read_file.read()
return Response(opens, mimetype='text/css')
if __name__ == '__main__':
app.run(debug=True)
|
[
"ggeadalfa@gmail.com"
] |
ggeadalfa@gmail.com
|
30a55bb17173a3f4680ecf975a40f336f9e04562
|
bc120a459a66291d18172b123978076160d731f7
|
/run_codegen.py
|
0d06b8f4f85ea22ebf5d0393e9ff560e47bccb0f
|
[] |
no_license
|
takp/grpc-sample
|
9c4c79e5bf0269125de87ddf6606c4c6a61c6c18
|
d03c171faa7a69d933f9eaaaa0558ff27bc39bcf
|
refs/heads/master
| 2020-04-04T00:57:44.023282
| 2018-11-04T05:24:11
| 2018-11-04T05:24:11
| 155,663,011
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs protoc with the gRPC plugin to generate messages and gRPC stubs."""
from grpc_tools import protoc
protoc.main((
'',
'-I./protos',
'--python_out=.',
'--grpc_python_out=.',
'./protos/helloworld.proto',
))
|
[
"takayoshi.nishida@gmail.com"
] |
takayoshi.nishida@gmail.com
|
738181729eb2d9a6b0e3fd73dfae9eb50c4cf98c
|
d1bcd0150979532a9b6caa37a9b119b4203c1091
|
/PyBootCamp/print_and_string.py
|
e09c933bae56bcb79c6eaad620da759e70579e32
|
[] |
no_license
|
svfarande/Python-Bootcamp
|
5b626bbb6f7047eb3c3cd5490ac58f42ff4771d6
|
0d7eb14b50fbe9bd8709422a51f34f1d6202c8e6
|
refs/heads/main
| 2023-03-11T19:29:37.574840
| 2021-03-07T18:12:19
| 2021-03-07T18:12:19
| 339,967,567
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 796
|
py
|
mystr = "Shubham"
print(len(mystr))
print(mystr[::-1])
print("Shubham"[::-1])
mystr = mystr[:3] + "B" + mystr[4:]
print(mystr)
letter = "S"
print(letter * 5)
mystr = "Shubham Farande"
print(mystr.split("a"))
print('%f' % (0.1 + 0.2 - 0.3))
print("Answer is {s:}".format(s=100 / 777)) # Answer is 0.1287001287001287
print("Answer is {s:.2}".format(s=100 / 777)) # Answer is 0.13
print("Answer is {s:10}".format(s=100 / 777)) # Answer is 0.1287001287001287
print("Answer is {s:10.0}".format(s=100 / 777)) # Answer is 0.1
print("Answer is {s:10.1}".format(s=100 / 777)) # Answer is 0.1
print("Answer is {s:10.2}".format(s=100 / 777)) # Answer is 0.13
print("Answer is {s:10.3}".format(s=100 / 777)) # Answer is 0.129
mystr = "aa"
print(f"My name is {mystr}.")
|
[
"svfarande@gmail.com"
] |
svfarande@gmail.com
|
12fcdca2eabebc85e0a958a99edb385746718a0d
|
2c95e0f7bb3f977306f479d5c99601ab1d5c61f2
|
/tests/wallet/cc_wallet/test_cc_wallet.py
|
71b397292145e285fe313b58e34463afcb6b990c
|
[
"Apache-2.0"
] |
permissive
|
Olive-blockchain/Olive-blockchain-CLI
|
d62444f8456467f8105531178d2ae53d6e92087d
|
8c4a9a382d68fc1d71c5b6c1da858922a8bb8808
|
refs/heads/main
| 2023-07-19T03:51:08.700834
| 2021-09-19T16:05:10
| 2021-09-19T16:05:10
| 406,045,499
| 0
| 0
|
Apache-2.0
| 2021-09-19T16:05:10
| 2021-09-13T16:20:38
|
Python
|
UTF-8
|
Python
| false
| false
| 23,670
|
py
|
import asyncio
from typing import List
import pytest
from olive.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from olive.full_node.mempool_manager import MempoolManager
from olive.simulator.simulator_protocol import FarmNewBlockProtocol
from olive.types.blockchain_format.coin import Coin
from olive.types.blockchain_format.sized_bytes import bytes32
from olive.types.peer_info import PeerInfo
from olive.util.ints import uint16, uint32, uint64
from olive.wallet.cc_wallet.cc_utils import cc_puzzle_hash_for_inner_puzzle_hash
from olive.wallet.cc_wallet.cc_wallet import CCWallet
from olive.wallet.puzzles.cc_loader import CC_MOD
from olive.wallet.transaction_record import TransactionRecord
from olive.wallet.wallet_coin_record import WalletCoinRecord
from tests.setup_nodes import setup_simulators_and_wallets
from tests.time_out_assert import time_out_assert
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
async def tx_in_pool(mempool: MempoolManager, tx_id: bytes32):
tx = mempool.get_spendbundle(tx_id)
if tx is None:
return False
return True
class TestCCWallet:
@pytest.fixture(scope="function")
async def wallet_node(self):
async for _ in setup_simulators_and_wallets(1, 1, {}):
yield _
@pytest.fixture(scope="function")
async def two_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
@pytest.fixture(scope="function")
async def three_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 3, {}):
yield _
@pytest.mark.asyncio
async def test_colour_creation(self, two_wallet_nodes):
num_blocks = 3
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node.wallet_state_manager, wallet, uint64(100))
tx_queue: List[TransactionRecord] = await wallet_node.wallet_state_manager.get_send_queue()
tx_record = tx_queue[0]
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(15, cc_wallet.get_confirmed_balance, 100)
await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100)
@pytest.mark.asyncio
async def test_cc_spend(self, two_wallet_nodes):
num_blocks = 3
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node.wallet_state_manager, wallet, uint64(100))
tx_queue: List[TransactionRecord] = await wallet_node.wallet_state_manager.get_send_queue()
tx_record = tx_queue[0]
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(15, cc_wallet.get_confirmed_balance, 100)
await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100)
assert cc_wallet.cc_info.my_genesis_checker is not None
colour = cc_wallet.get_colour()
cc_wallet_2: CCWallet = await CCWallet.create_wallet_for_cc(wallet_node_2.wallet_state_manager, wallet2, colour)
assert cc_wallet.cc_info.my_genesis_checker == cc_wallet_2.cc_info.my_genesis_checker
cc_2_hash = await cc_wallet_2.get_new_inner_hash()
tx_record = await cc_wallet.generate_signed_transaction([uint64(60)], [cc_2_hash])
await wallet.wallet_state_manager.add_pending_transaction(tx_record)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, cc_wallet.get_confirmed_balance, 40)
await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 40)
await time_out_assert(30, cc_wallet_2.get_confirmed_balance, 60)
await time_out_assert(30, cc_wallet_2.get_unconfirmed_balance, 60)
cc_hash = await cc_wallet.get_new_inner_hash()
tx_record = await cc_wallet_2.generate_signed_transaction([uint64(15)], [cc_hash])
await wallet.wallet_state_manager.add_pending_transaction(tx_record)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, cc_wallet.get_confirmed_balance, 55)
await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 55)
@pytest.mark.asyncio
async def test_get_wallet_for_colour(self, two_wallet_nodes):
num_blocks = 3
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node.wallet_state_manager, wallet, uint64(100))
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
colour = cc_wallet.get_colour()
assert await wallet_node.wallet_state_manager.get_wallet_for_colour(colour) == cc_wallet
@pytest.mark.asyncio
async def test_generate_zero_val(self, two_wallet_nodes):
num_blocks = 4
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node.wallet_state_manager, wallet, uint64(100))
ph = await wallet2.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, cc_wallet.get_confirmed_balance, 100)
await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100)
assert cc_wallet.cc_info.my_genesis_checker is not None
colour = cc_wallet.get_colour()
cc_wallet_2: CCWallet = await CCWallet.create_wallet_for_cc(wallet_node_2.wallet_state_manager, wallet2, colour)
assert cc_wallet.cc_info.my_genesis_checker == cc_wallet_2.cc_info.my_genesis_checker
spend_bundle = await cc_wallet_2.generate_zero_val_coin()
await time_out_assert(15, tx_in_pool, True, full_node_api.full_node.mempool_manager, spend_bundle.name())
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
async def unspent_count():
unspent: List[WalletCoinRecord] = list(
await cc_wallet_2.wallet_state_manager.get_spendable_coins_for_wallet(cc_wallet_2.id())
)
return len(unspent)
await time_out_assert(15, unspent_count, 1)
unspent: List[WalletCoinRecord] = list(
await cc_wallet_2.wallet_state_manager.get_spendable_coins_for_wallet(cc_wallet_2.id())
)
assert unspent.pop().coin.amount == 0
@pytest.mark.asyncio
async def test_cc_spend_uncoloured(self, two_wallet_nodes):
num_blocks = 3
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node.wallet_state_manager, wallet, uint64(100))
tx_queue: List[TransactionRecord] = await wallet_node.wallet_state_manager.get_send_queue()
tx_record = tx_queue[0]
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(15, cc_wallet.get_confirmed_balance, 100)
await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100)
assert cc_wallet.cc_info.my_genesis_checker is not None
colour = cc_wallet.get_colour()
cc_wallet_2: CCWallet = await CCWallet.create_wallet_for_cc(wallet_node_2.wallet_state_manager, wallet2, colour)
assert cc_wallet.cc_info.my_genesis_checker == cc_wallet_2.cc_info.my_genesis_checker
cc_2_hash = await cc_wallet_2.get_new_inner_hash()
tx_record = await cc_wallet.generate_signed_transaction([uint64(60)], [cc_2_hash])
await wallet.wallet_state_manager.add_pending_transaction(tx_record)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(15, cc_wallet.get_confirmed_balance, 40)
await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 40)
await time_out_assert(15, cc_wallet_2.get_confirmed_balance, 60)
await time_out_assert(15, cc_wallet_2.get_unconfirmed_balance, 60)
cc2_ph = await cc_wallet_2.get_new_cc_puzzle_hash()
tx_record = await wallet.wallet_state_manager.main_wallet.generate_signed_transaction(10, cc2_ph, 0)
await wallet.wallet_state_manager.add_pending_transaction(tx_record)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
id = cc_wallet_2.id()
wsm = cc_wallet_2.wallet_state_manager
await time_out_assert(15, wsm.get_confirmed_balance_for_wallet, 70, id)
await time_out_assert(15, cc_wallet_2.get_confirmed_balance, 60)
await time_out_assert(15, cc_wallet_2.get_unconfirmed_balance, 60)
@pytest.mark.asyncio
async def test_cc_spend_multiple(self, three_wallet_nodes):
num_blocks = 3
full_nodes, wallets = three_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node_0, wallet_server_0 = wallets[0]
wallet_node_1, wallet_server_1 = wallets[1]
wallet_node_2, wallet_server_2 = wallets[2]
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
wallet_1 = wallet_node_1.wallet_state_manager.main_wallet
wallet_2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet_0.get_new_puzzlehash()
await wallet_server_0.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await wallet_server_1.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await wallet_server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet_0.get_confirmed_balance, funds)
cc_wallet_0: CCWallet = await CCWallet.create_new_cc(wallet_node_0.wallet_state_manager, wallet_0, uint64(100))
tx_queue: List[TransactionRecord] = await wallet_node_0.wallet_state_manager.get_send_queue()
tx_record = tx_queue[0]
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(15, cc_wallet_0.get_confirmed_balance, 100)
await time_out_assert(15, cc_wallet_0.get_unconfirmed_balance, 100)
assert cc_wallet_0.cc_info.my_genesis_checker is not None
colour = cc_wallet_0.get_colour()
cc_wallet_1: CCWallet = await CCWallet.create_wallet_for_cc(
wallet_node_1.wallet_state_manager, wallet_1, colour
)
cc_wallet_2: CCWallet = await CCWallet.create_wallet_for_cc(
wallet_node_2.wallet_state_manager, wallet_2, colour
)
assert cc_wallet_0.cc_info.my_genesis_checker == cc_wallet_1.cc_info.my_genesis_checker
assert cc_wallet_0.cc_info.my_genesis_checker == cc_wallet_2.cc_info.my_genesis_checker
cc_1_hash = await cc_wallet_1.get_new_inner_hash()
cc_2_hash = await cc_wallet_2.get_new_inner_hash()
tx_record = await cc_wallet_0.generate_signed_transaction([uint64(60), uint64(20)], [cc_1_hash, cc_2_hash])
await wallet_0.wallet_state_manager.add_pending_transaction(tx_record)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(15, cc_wallet_0.get_confirmed_balance, 20)
await time_out_assert(15, cc_wallet_0.get_unconfirmed_balance, 20)
await time_out_assert(30, cc_wallet_1.get_confirmed_balance, 60)
await time_out_assert(30, cc_wallet_1.get_unconfirmed_balance, 60)
await time_out_assert(30, cc_wallet_2.get_confirmed_balance, 20)
await time_out_assert(30, cc_wallet_2.get_unconfirmed_balance, 20)
cc_hash = await cc_wallet_0.get_new_inner_hash()
tx_record = await cc_wallet_1.generate_signed_transaction([uint64(15)], [cc_hash])
await wallet_1.wallet_state_manager.add_pending_transaction(tx_record)
tx_record_2 = await cc_wallet_2.generate_signed_transaction([uint64(20)], [cc_hash])
await wallet_2.wallet_state_manager.add_pending_transaction(tx_record_2)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record_2.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(15, cc_wallet_0.get_confirmed_balance, 55)
await time_out_assert(15, cc_wallet_0.get_unconfirmed_balance, 55)
await time_out_assert(30, cc_wallet_1.get_confirmed_balance, 45)
await time_out_assert(30, cc_wallet_1.get_unconfirmed_balance, 45)
await time_out_assert(30, cc_wallet_2.get_confirmed_balance, 0)
await time_out_assert(30, cc_wallet_2.get_unconfirmed_balance, 0)
@pytest.mark.asyncio
async def test_cc_max_amount_send(self, two_wallet_nodes):
num_blocks = 3
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node.wallet_state_manager, wallet, uint64(100000))
tx_queue: List[TransactionRecord] = await wallet_node.wallet_state_manager.get_send_queue()
tx_record = tx_queue[0]
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(15, cc_wallet.get_confirmed_balance, 100000)
await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100000)
assert cc_wallet.cc_info.my_genesis_checker is not None
cc_2_hash = await cc_wallet.get_new_inner_hash()
amounts = []
puzzle_hashes = []
for i in range(1, 50):
amounts.append(uint64(i))
puzzle_hashes.append(cc_2_hash)
spent_coint = (await cc_wallet.get_cc_spendable_coins())[0].coin
tx_record = await cc_wallet.generate_signed_transaction(amounts, puzzle_hashes, coins={spent_coint})
await wallet.wallet_state_manager.add_pending_transaction(tx_record)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await asyncio.sleep(2)
async def check_all_there():
spendable = await cc_wallet.get_cc_spendable_coins()
spendable_name_set = set()
for record in spendable:
spendable_name_set.add(record.coin.name())
puzzle_hash = cc_puzzle_hash_for_inner_puzzle_hash(CC_MOD, cc_wallet.cc_info.my_genesis_checker, cc_2_hash)
for i in range(1, 50):
coin = Coin(spent_coint.name(), puzzle_hash, i)
if coin.name() not in spendable_name_set:
return False
return True
await time_out_assert(15, check_all_there, True)
await asyncio.sleep(5)
max_sent_amount = await cc_wallet.get_max_send_amount()
# 1) Generate transaction that is under the limit
under_limit_tx = None
try:
under_limit_tx = await cc_wallet.generate_signed_transaction(
[max_sent_amount - 1],
[ph],
)
except ValueError:
assert ValueError
assert under_limit_tx is not None
# 2) Generate transaction that is equal to limit
at_limit_tx = None
try:
at_limit_tx = await cc_wallet.generate_signed_transaction(
[max_sent_amount],
[ph],
)
except ValueError:
assert ValueError
assert at_limit_tx is not None
# 3) Generate transaction that is greater than limit
above_limit_tx = None
try:
above_limit_tx = await cc_wallet.generate_signed_transaction(
[max_sent_amount + 1],
[ph],
)
except ValueError:
pass
assert above_limit_tx is None
|
[
"87711356+Olive-blockchain@users.noreply.github.com"
] |
87711356+Olive-blockchain@users.noreply.github.com
|
43470fa1896c46eb2705c59d8033d19d37490dc1
|
466e62d8226af6b2277c9c927bfbf55cfe02158e
|
/music/apps.py
|
442abb81a6ed596a98636e0238c15c0259d87fcf
|
[] |
no_license
|
gbip/djRDO
|
f9cae89a29560ed3f7f02b17824579cf67123c56
|
e2d6a0336c7934cae71f833cb34a1f5f21db2d02
|
refs/heads/master
| 2023-08-27T15:05:23.601486
| 2021-11-04T17:53:20
| 2021-11-04T17:53:20
| 355,814,216
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 93
|
py
|
from django.apps import AppConfig
class MusicImporterConfig(AppConfig):
name = "music"
|
[
"perso@florencepaul.com"
] |
perso@florencepaul.com
|
8a3ea212efe2d9f38ce16796b86ef36e7e2d6b41
|
4bbfe889885084e33767020124d17607c120a5b2
|
/demisto_sdk/commands/common/content/objects/pack_objects/pack.py
|
3db84be477490e031a0caa6dfbc5dbaf69561abf
|
[
"MIT"
] |
permissive
|
shubgwal-gif/demisto-sdk
|
fc80163b7a2c2fc00d5b664a47326a9dbebf6dd6
|
35b15113ff8488c7c9051da2c2cbcbc7c6915e8d
|
refs/heads/master
| 2023-01-29T11:31:36.928267
| 2020-12-08T16:37:20
| 2020-12-08T16:37:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,444
|
py
|
from typing import Any, Iterator, Optional, Union
from demisto_sdk.commands.common.constants import (CLASSIFIERS_DIR,
CONNECTIONS_DIR,
DASHBOARDS_DIR,
DOC_FILES_DIR,
INCIDENT_FIELDS_DIR,
INCIDENT_TYPES_DIR,
INDICATOR_FIELDS_DIR,
INDICATOR_TYPES_DIR,
INTEGRATIONS_DIR,
LAYOUTS_DIR, PLAYBOOKS_DIR,
RELEASE_NOTES_DIR,
REPORTS_DIR, SCRIPTS_DIR,
TEST_PLAYBOOKS_DIR,
TOOLS_DIR, WIDGETS_DIR)
from demisto_sdk.commands.common.content.objects.pack_objects import (
AgentTool, Classifier, ClassifierMapper, Connection, Dashboard, DocFile,
IncidentField, IncidentType, IndicatorField, IndicatorType, Integration,
Layout, OldClassifier, PackIgnore, PackMetaData, Playbook, Readme,
ReleaseNote, Report, Script, SecretIgnore, Widget)
from demisto_sdk.commands.common.content.objects_factory import \
path_to_pack_object
from wcmatch.pathlib import Path
class Pack:
def __init__(self, path: Union[str, Path]):
self._path = Path(path)
def _content_files_list_generator_factory(self, dir_name: str, suffix: str) -> Iterator[Any]:
"""Generic content objcets iterable generator
Args:
dir_name: Directory name, for example: Integrations, Documentations etc.
suffix: file suffix to search for, if not supplied then any suffix.
Returns:
object: Any valid content object found in the given directory.
"""
objects_path = (self._path / dir_name).glob(patterns=[f"*.{suffix}", f"*/*.{suffix}"])
for object_path in objects_path:
yield path_to_pack_object(object_path)
def _content_dirs_list_generator_factory(self, dir_name) -> Iterator[Any]:
"""Generic content objcets iterable generator
Args:
dir_name: Directory name, for example: Tools.
Returns:
object: Any valid content object found in the given directory.
"""
objects_path = (self._path / dir_name).glob(patterns=["*/"])
for object_path in objects_path:
yield path_to_pack_object(object_path)
@property
def id(self) -> str:
return self._path.parts[-1]
@property
def path(self) -> Path:
return self._path
@property
def integrations(self) -> Iterator[Integration]:
return self._content_files_list_generator_factory(dir_name=INTEGRATIONS_DIR,
suffix="yml")
@property
def scripts(self) -> Iterator[Script]:
return self._content_files_list_generator_factory(dir_name=SCRIPTS_DIR,
suffix="yml")
@property
def playbooks(self) -> Iterator[Playbook]:
return self._content_files_list_generator_factory(dir_name=PLAYBOOKS_DIR,
suffix="yml")
@property
def reports(self) -> Iterator[Report]:
return self._content_files_list_generator_factory(dir_name=REPORTS_DIR,
suffix="json")
@property
def dashboards(self) -> Iterator[Dashboard]:
return self._content_files_list_generator_factory(dir_name=DASHBOARDS_DIR,
suffix="json")
@property
def incident_types(self) -> Iterator[IncidentType]:
return self._content_files_list_generator_factory(dir_name=INCIDENT_TYPES_DIR,
suffix="json")
@property
def incident_fields(self) -> Iterator[IncidentField]:
return self._content_files_list_generator_factory(dir_name=INCIDENT_FIELDS_DIR,
suffix="json")
@property
def layouts(self) -> Iterator[Layout]:
return self._content_files_list_generator_factory(dir_name=LAYOUTS_DIR,
suffix="json")
@property
def classifiers(self) -> Iterator[Union[Classifier, OldClassifier, ClassifierMapper]]:
return self._content_files_list_generator_factory(dir_name=CLASSIFIERS_DIR,
suffix="json")
@property
def indicator_types(self) -> Iterator[IndicatorType]:
return self._content_files_list_generator_factory(dir_name=INDICATOR_TYPES_DIR,
suffix="json")
@property
def indicator_fields(self) -> Iterator[IndicatorField]:
return self._content_files_list_generator_factory(dir_name=INDICATOR_FIELDS_DIR,
suffix="json")
@property
def connections(self) -> Iterator[Connection]:
return self._content_files_list_generator_factory(dir_name=CONNECTIONS_DIR,
suffix="json")
@property
def test_playbooks(self) -> Iterator[Union[Playbook, Script]]:
return self._content_files_list_generator_factory(dir_name=TEST_PLAYBOOKS_DIR,
suffix="yml")
@property
def widgets(self) -> Iterator[Widget]:
return self._content_files_list_generator_factory(dir_name=WIDGETS_DIR,
suffix="json")
@property
def release_notes(self) -> Iterator[ReleaseNote]:
return self._content_files_list_generator_factory(dir_name=RELEASE_NOTES_DIR,
suffix="md")
@property
def tools(self) -> Iterator[AgentTool]:
return self._content_dirs_list_generator_factory(dir_name=TOOLS_DIR)
@property
def doc_files(self) -> Iterator[DocFile]:
return self._content_files_list_generator_factory(dir_name=DOC_FILES_DIR,
suffix="*")
@property
def pack_metadata(self) -> Optional[PackMetaData]:
obj = None
file = self._path / "pack_metadata.json"
if file.exists():
obj = PackMetaData(file)
return obj
@property
def secrets_ignore(self) -> Optional[SecretIgnore]:
obj = None
file = self._path / ".secrets-ignore"
if file.exists():
obj = SecretIgnore(file)
return obj
@property
def pack_ignore(self) -> Optional[PackIgnore]:
obj = None
file = self._path / ".pack-ignore"
if file.exists():
obj = PackIgnore(file)
return obj
@property
def readme(self) -> Optional[Readme]:
obj = None
file = self._path / "README.md"
if file.exists():
obj = Readme(file)
return obj
|
[
"noreply@github.com"
] |
shubgwal-gif.noreply@github.com
|
266aa42a7d13fb27c4027899a7e959bbff0a81b6
|
935a3f949041bb43433bd02bb0988519f0fd8c4e
|
/ex2.py
|
0690e9624d92f0461f54cda7f1aff4db3d6a16f9
|
[] |
no_license
|
damiankoper/iobLab
|
c5acae472be66b27f6c58d2453a8419129e33eb6
|
819f02e3aa8a8bbfbfab27362f2982fed12fb03a
|
refs/heads/master
| 2021-03-21T22:12:26.707722
| 2020-05-08T19:34:39
| 2020-05-08T19:34:39
| 247,330,989
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,584
|
py
|
#!/usr/bin/env python3
import numpy as np
import urllib.request
import cv2
import binascii
import lorem
import math
import matplotlib.pyplot as plt
def encode_as_binary_array(msg):
"""Encode a message as a binary string."""
msg = msg.encode("utf-8")
msg = msg.hex()
msg = [msg[i:i + 2] for i in range(0, len(msg), 2)]
msg = [bin(int(el, base=16))[2:] for el in msg]
msg = ["0" * (8 - len(el)) + el for el in msg]
return "".join(msg)
def decode_from_binary_array(array):
"""Decode a binary string to utf8."""
array = [array[i:i+8] for i in range(0, len(array), 8)]
if len(array[-1]) != 8:
array[-1] = array[-1] + "0" * (8 - len(array[-1]))
array = [hex(int(el, 2))[2:].zfill(2) for el in array]
array = "".join(array)
result = binascii.unhexlify(array)
return result.decode("utf-8", errors="replace")
def hide_message(image, message, nbits=1):
"""Hide a message in an image (LSB).
nbits: number of least significant bits
"""
nbits = clamp(nbits, 1, 8)
shape = image.shape
image = np.copy(image).flatten()
if len(message) > len(image) * nbits:
raise ValueError("Message is to long :(")
chunks = [message[i:i + nbits] for i in range(0, len(message), nbits)]
for i, chunk in enumerate(chunks):
byte = str(bin(image[i]))[2:].zfill(8)
new_byte = byte[:-nbits] + chunk
image[i] = int(new_byte, 2)
return image.reshape(shape)
def clamp(n, minn, maxn):
"""Clamp the n value to be in range (minn, maxn)."""
return max(min(maxn, n), minn)
def reveal_message(image, nbits=1, length=0):
"""Reveal the hidden message.
nbits: number of least significant bits
length: length of the message in bits.
"""
nbits = clamp(nbits, 1, 8)
shape = image.shape
image = np.copy(image).flatten()
length_in_pixels = math.ceil(length/nbits)
if len(image) < length_in_pixels or length_in_pixels <= 0:
length_in_pixels = len(image)
message = ""
i = 0
while i < length_in_pixels:
byte = str(bin(image[i]))[2:].zfill(8)
message += byte[-nbits:]
i += 1
mod = length % -nbits
if mod != 0:
message = message[:mod]
return message
print("Downloading image!")
path = 'https://picsum.photos/500/500'
resp = urllib.request.urlopen(path)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
print("Image downloaded!")
message = lorem.text()*1000
secret = encode_as_binary_array(message)
resultImageRow1 = None
resultImageRow2 = None
nbitsList = range(1, 9)
nbitsMSE = []
for nbits in nbitsList:
print(nbits)
imageSecret = hide_message(image, secret[:int(image.size*0.8)], nbits)
mse = ((imageSecret - image)**2).mean()
nbitsMSE.append(mse)
if nbits <= 4:
resultImageRow1 = imageSecret if resultImageRow1 is None else np.hstack(
[resultImageRow1, imageSecret])
else:
resultImageRow2 = imageSecret if resultImageRow2 is None else np.hstack(
[resultImageRow2, imageSecret])
plt.plot(nbitsList, nbitsMSE)
plt.xlabel('nbits')
plt.ylabel('MSE')
cv2.namedWindow("Result", cv2.WINDOW_NORMAL)
cv2.imshow('Result', np.vstack([resultImageRow1, resultImageRow2]))
cv2.imwrite('ex2_encoded.png', np.vstack([resultImageRow1, resultImageRow2]))
cv2.waitKey(1)
plt.savefig('ex2_plot.png')
plt.show()
cv2.waitKey()
# Dla nbits=7,8 MSE zmalał, ponieważ widoczna jest większa część bazowego obrazu
# - wiadomość zapisano na mniejszej liczbie plkseli
|
[
"kopernickk@gmail.com"
] |
kopernickk@gmail.com
|
1e89fbf40a207c845671b836a424fd324fc46b79
|
30834d127caf5044959ae7e17fdf0cf03b1db4d0
|
/41. First Missing Positive.py
|
cce102c99c3c7dc206eb0ff957e525b0b7a3676a
|
[] |
no_license
|
xigaoli/lc-challenges
|
9c4dfe866340b42a2780994719e12c3a5a513af2
|
eb06aab20129fca8239717842582c0dc2bb69d53
|
refs/heads/main
| 2023-02-11T09:00:51.777126
| 2021-01-13T23:27:46
| 2021-01-13T23:27:46
| 308,232,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
from collections import defaultdict
class Solution:
def firstMissingPositive(self, nums: List[int]) -> int:
numdict = defaultdict(int)
for n in nums:
#print(n)
numdict[n]+=1
for i in range(1,len(nums)+1):
if(numdict[i]==0):
return i
#if nums is [1,2,...k] then len(nums)+1==k+1 is answer
return len(nums)+1
|
[
"lxgfrom2009@gmail.com"
] |
lxgfrom2009@gmail.com
|
38a16b4a5bc7e81a08051d387e1db8cf70226add
|
8a07250aaa7aaa0756e1abc36579f6dcc8f2db1d
|
/solutions/143. Reorder List.py
|
60bf5e8ba6870141ce8613dd85cb08f74571864b
|
[] |
no_license
|
atriekak/LeetCode
|
f92db0f5f2efb3ba97393b496374f7adc8b92545
|
8438f8a53fbc44d9d10a1e0b96f7ba78dc38eb88
|
refs/heads/main
| 2023-07-17T08:29:45.625297
| 2021-07-04T04:22:43
| 2021-07-04T04:22:43
| 324,005,296
| 0
| 0
| null | 2021-09-02T21:23:06
| 2020-12-23T21:29:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,622
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reorderList(self, head: ListNode) -> None:
"""
Do not return anything, modify head in-place instead.
"""
#Time Complexity: O(n)
#Space Complexity: O(1)
if not head:
return None
slow_pointer = head
fast_pointer = head
while fast_pointer.next and fast_pointer.next.next: #finding mid-point
slow_pointer = slow_pointer.next
fast_pointer = fast_pointer.next.next
fast_pointer = self.reverse(slow_pointer.next) #reversing second half
slow_pointer.next = None #severing two lists
slow_pointer = head
while fast_pointer: #merging two lists
temp = slow_pointer.next
slow_pointer.next = fast_pointer
fast_pointer = fast_pointer.next
slow_pointer.next.next = temp
slow_pointer = temp
def reverse(self, root):
prev = None
curr = root
while curr:
temp = curr.next
curr.next = prev
prev = curr
curr = temp
return prev
|
[
"atriekak@gmail.com"
] |
atriekak@gmail.com
|
3468f110d4766d5a44d5adf35a319949bf68ada8
|
03b8cd4f58fd7e6c2d40bc8b29c5bfacf3fe9424
|
/server/src/posts/migrations/0009_comment.py
|
283b594542c8fddc470592dff6a8bc73650e0205
|
[
"MIT"
] |
permissive
|
AlecLangford/SnapShare
|
545667d25ae94b3dcd04e3f9aad005a426598c5f
|
c6883ca21d6f86aad025829a49a5d845893cdd9b
|
refs/heads/master
| 2023-07-10T00:43:23.850214
| 2018-08-29T12:06:12
| 2018-08-29T12:06:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 940
|
py
|
# Generated by Django 2.0.1 on 2018-03-19 13:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0008_auto_20180316_1109'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now=True)),
('text', models.CharField(max_length=500)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.Post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"gupta.chetan1997@gmail.com"
] |
gupta.chetan1997@gmail.com
|
bf18367780339e77ed828796cf86505757387a00
|
e6a1c4389d1ea3582b3b5e18228c2e79eb0cece5
|
/0x0A-python-inheritance/11-square.py
|
9c04159e897287b30f41171d87bc7659e4ddfc96
|
[] |
no_license
|
TMcMac/holbertonschool-higher_level_programming
|
4cd5dcf25163306c1bf4f5434cb9e5d07118f750
|
7449dc4642a8d5a04c731aea8c5fb176ea31e4ce
|
refs/heads/master
| 2023-03-02T19:00:56.483666
| 2021-02-12T21:41:37
| 2021-02-12T21:41:37
| 290,367,732
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,111
|
py
|
#!/usr/bin/python3
"""Docstring because reasons"""
class BaseGeometry:
"""
A mostly empty class with a few error conditions
"""
def area(self):
"""
Just an error for this one
"""
raise Exception("area() is not implemented")
def integer_validator(self, name, value):
"""
Just meant to raise errors for now
parameters - name: a string much like key in dict
value: (int) much like value in a dict
"""
if type(value) is not int:
raise TypeError("{} must be an integer".format(name))
elif value <= 0:
raise ValueError("{} must be greater than 0".format(name))
class Rectangle(BaseGeometry):
"""
Inheriets from the class BaseGeometry which is mostly full or errors
"""
def __init__(self, width, height):
"""
initializes with a validity check for ints
parameters - width (int): the width
height (int): the height
"""
self.integer_validator("width", width)
self.__width = width
self.integer_validator("height", height)
self.__height = height
def area(self):
"""
calcualtes the area of our rectangle
"""
return (self.__height * self.__width)
def __str__(self):
"""
returns a string representation of the object
"""
return("[Rectangle] {}/{}".format(self.__width, self.__height))
class Square(Rectangle):
"""
Inheriets from class rectangle
"""
def __init__(self, size):
"""
Initializes a square of size size after
validating size to be an int
parameter - size (int): the size
"""
self.integer_validator("size", size)
self.__size = size
def area(self):
"""
Returns the area of the square
"""
return (self.__size ** 2)
def __str__(self):
"""
returns a string representation of the object
"""
return("[Square] {}/{}".format(self.__size, self.__size))
|
[
"timmcmackenjr@gmail.com"
] |
timmcmackenjr@gmail.com
|
0bdc8c06e4802b8ff2829a1cf4c8094fc2f85d2d
|
c83ee3848bbc0bceea5f93eaa98660381e226efa
|
/Exercise-3/Sap_Router/Sap_app/apps.py
|
1b29525f90cd7480e9a102029dc1fa4cc4bf2c71
|
[] |
no_license
|
malikdipti/DIPTIRANJAN-MALIK_PYTHON
|
2f59ef72c1df1078fcf2c38f4bd16eec08d9a08a
|
17559ff4440225a5c386cc80e1e65d394e4f9fe0
|
refs/heads/master
| 2022-12-26T13:14:41.127720
| 2020-10-03T13:28:52
| 2020-10-03T13:28:52
| 300,805,136
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 88
|
py
|
from django.apps import AppConfig
class SapAppConfig(AppConfig):
name = 'Sap_app'
|
[
"44806816+malikdipti@users.noreply.github.com"
] |
44806816+malikdipti@users.noreply.github.com
|
d8d88d318e27c532dc2b61b297b8f211d573a61d
|
92e26b93057723148ecb8ca88cd6ad755f2e70f1
|
/cov_exp/plain30_dfconv/network.py
|
8f490c139b1dc143c53b076304e5055ea7a21755
|
[] |
no_license
|
lyuyanyii/CIFAR
|
5906ad9fbe1377edf5b055098709528e06b5ace2
|
d798834942d6a9d4e3295cda77488083c1763962
|
refs/heads/master
| 2021-08-30T20:09:52.819883
| 2017-12-19T08:37:37
| 2017-12-19T08:37:37
| 112,701,370
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,308
|
py
|
import numpy as np
from megskull.network import Network
from megskull.opr.all import (
Conv2D, Pooling2D, FullyConnected, Softmax,
CrossEntropyLoss, Dropout, ElementwiseAffine, Concat,
Floor, Ceil, ones, Cumsum, Min, Max,
AdvancedIndexing, Astype, Linspace, IndexingRemap,
Equal, ZeroGrad,
)
from megskull.opr.helper.elemwise_trans import ReLU, Identity
from megskull.graph.query import GroupNode
from megskull.opr.netsrc import DataProvider, ConstProvider
import megskull.opr.helper.param_init as pinit
from megskull.opr.helper.param_init import AutoGaussianParamInitializer as G
from megskull.opr.helper.param_init import ConstantParamInitializer as C
from megskull.opr.regularizer import BatchNormalization as BN
import megskull.opr.arith as arith
global idx
idx = 0
def conv_bn(inp, ker_shape, stride, padding, out_chl, isrelu):
global idx
idx += 1
l1 = Conv2D(
"conv{}".format(idx), inp, kernel_shape = ker_shape, stride = stride, padding = padding,
output_nr_channel = out_chl,
W = G(mean = 0, std = ((1 + int(isrelu)) / (ker_shape**2 * inp.partial_shape[1]))**0.5),
nonlinearity = Identity()
)
l2 = BN("bn{}".format(idx), l1, eps = 1e-9)
l2 = ElementwiseAffine("bnaff{}".format(idx), l2, shared_in_channels = False, k = C(1), b = C(0))
if isrelu:
l2 = arith.ReLU(l2)
return l2, l1
def dfconv(inp, chl, isrelu, ker_shape = 3, stride = 1, padding = 1, dx = [-1, 0, 1], dy = [-1, 0, 1]):
inp = Conv2D(
name + "conv", inp, kernel_shape = 3, stride = 1, padding = 1,
output_nr_channel = ker_shape**2,
W = G(mean = 0, std = ((1) / (ker_shape**2 * inp.partial_shape[1]))**0.5),
nonlinearity = Identity()
)
inp = BN(name + "BN", inp, eps = 1e-9)
global idx
#idx += 1
gamma = 0.001
offsetx = inp.partial_shape[2] * Conv2D(
"conv{}_offsetx".format(idx + 1), inp, kernel_shape = ker_shape, stride = stride,
padding = padding,
output_nr_channel = ker_shape**2,
W = G(mean = 0, std = gamma / (ker_shape**2 * inp.partial_shape[2])),
nonlinearity = Identity()
)
offsety = inp.partial_shape[3] * Conv2D(
"conv{}_offsety".format(idx + 1), inp, kernel_shape = ker_shape, stride = stride,
padding = padding,
output_nr_channel = ker_shape**2,
W = G(mean = 0, std = gamma / (ker_shape**2 * inp.partial_shape[3])),
nonlinearity = Identity()
)
outputs = []
for sx in range(2):
for sy in range(2):
if sx == 0:
ofx = Floor(offsetx)
bilx = offsetx - ofx
else:
ofx = Ceil(offsetx)
bilx = ofx - offsetx
if sy == 0:
ofy = Floor(offsety)
bily = offsety - ofy
else:
ofy = Ceil(offsety)
bily = ofy - offsety
"""
No padding
padding1 = ConstProvider(np.zeros((inp.partial_shape[0], inp.partial_shape[1], 1, inp.partial_shape[3])))
padding2 = ConstProvider(np.zeros((inp.partial_shape[0], inp.partial_shape[1], inp.partial_shape[2] + 2, 1)))
arg_fea = Concat([padding1, inp, padding1], axis = 2)
arg_fea = Concat([padding2, arg_fea, padding2], axis = 3)
"""
arg_fea = inp
#one_mat = ConstProvider(np.ones((inp.partial_shape[2], inp.partial_shape[3])), dtype = np.int32)
one_mat = ConstProvider(1, dtype = np.int32).add_axis(0).broadcast((ofx.partial_shape[2], ofx.partial_shape[3]))
affx = (Cumsum(one_mat, axis = 0) - 1) * stride
affy = (Cumsum(one_mat, axis = 1) - 1) * stride
ofx = ofx + affx.dimshuffle('x', 'x', 0, 1)
ofy = ofy + affy.dimshuffle('x', 'x', 0, 1)
one_mat = ConstProvider(np.ones((ker_shape, ofx.partial_shape[2], ofx.partial_shape[3])))
#ofx[:, :ker_shape, :, :] -= 1
#ofx[:, ker_shape*2:, :, :] += 1
ofx += Concat([one_mat * i for i in dx], axis = 0).dimshuffle('x', 0, 1, 2)
#ofy[:, ::3, :, :] -= 1
#ofy[:, 2::3, :, :] += 1
one_mat = ones((1, ofx.partial_shape[2], ofx.partial_shape[3]))
one_mat = Concat([one_mat * i for i in dy], axis = 0)
one_mat = Concat([one_mat] * ker_shape, axis = 0)
ofy += one_mat.dimshuffle('x', 0, 1, 2)
ofx = Max(Min(ofx, arg_fea.partial_shape[2] - 1), 0)
ofy = Max(Min(ofy, arg_fea.partial_shape[3] - 1), 0)
def DeformReshape(inp, ker_shape):
inp = inp.reshape(inp.shape[0], ker_shape, ker_shape, inp.shape[2], inp.shape[3])
inp = inp.dimshuffle(0, 3, 1, 4, 2)
inp = inp.reshape(inp.shape[0], inp.shape[1] * inp.shape[2], inp.shape[3] * inp.shape[4])
return inp
ofx = DeformReshape(ofx, ker_shape)
ofy = DeformReshape(ofy, ker_shape)
bilx = DeformReshape(bilx, ker_shape)
bily = DeformReshape(bily, ker_shape)
of = ofx * arg_fea.shape[2] + ofy
arg_fea = arg_fea.reshape(arg_fea.shape[0], arg_fea.shape[1], -1)
of = of.reshape(ofx.shape[0], -1)
of = of.dimshuffle(0, 'x', 1)
#of = Concat([of] * arg_fea.partial_shape[1], axis = 1)
of = of.broadcast((of.shape[0], arg_fea.shape[1], of.shape[2]))
arx = Linspace(0, arg_fea.shape[0], arg_fea.shape[0], endpoint = False)
arx = arx.add_axis(1).add_axis(2).broadcast(of.shape)
ary = Linspace(0, arg_fea.shape[1], arg_fea.shape[1], endpoint = False)
ary = ary.add_axis(0).add_axis(2).broadcast(of.shape)
of = of.add_axis(3)
arx = arx.add_axis(3)
ary = ary.add_axis(3)
idxmap = Astype(Concat([arx, ary, of], axis = 3), np.int32)
"""
sample = []
for i in range(arg_fea.partial_shape[0]):
for j in range(arg_fea.partial_shape[1]):
sample.append(arg_fea[i][j].ai[of[i][j]].dimshuffle('x', 0))
sample = Concat(sample, axis = 0)
"""
sample = IndexingRemap(arg_fea, idxmap).reshape(inp.shape[0], inp.shape[1], bilx.shape[1], -1)
bilx = bilx.dimshuffle(0, 'x', 1, 2).broadcast(sample.shape)
bily = bily.dimshuffle(0, 'x', 1, 2).broadcast(sample.shape)
sample *= bilx * bily
outputs.append(sample)
output = outputs[0]
for i in outputs[1:]:
output += i
return conv_bn(output, ker_shape, 3, 0, chl, isrelu)
def dfpooling(name, inp, window = 2, padding = 0, dx = [0, 1], dy = [0, 1]):
#inp = ConstProvider([[[[1, 2], [3, 4]]]], dtype = np.float32)
"""
Add a new conv&bn to insure that the scale of the feature map is variance 1.
"""
ker_shape = window
stride = window
offsetlay = Conv2D(
name + "conv", inp, kernel_shape = 3, stride = 1, padding = 1,
output_nr_channel = ker_shape**2,
W = G(mean = 0, std = ((1) / (3**2 * inp.partial_shape[1]))**0.5),
nonlinearity = Identity()
)
#offsetlay = BN(name + "BN", offsetlay, eps = 1e-9)
offsetx = Conv2D(
name + "conv1x", offsetlay, kernel_shape = ker_shape, stride = stride,
padding = padding,
output_nr_channel = ker_shape**2,
W = G(mean = 0, std = (1 / (ker_shape**2 * inp.partial_shape[2]))**0.5),
nonlinearity = Identity()
)
offsety = Conv2D(
name + "conv1y", offsetlay, kernel_shape = ker_shape, stride = stride,
padding = padding,
output_nr_channel = ker_shape**2,
W = G(mean = 0, std = (1 / (ker_shape**2 * inp.partial_shape[3]))**0.5),
nonlinearity = Identity()
)
offset = Concat([offsetx, offsety], axis = 1)
ndim = ker_shape**2 * offsetx.partial_shape[2] * offsetx.partial_shape[3] * 2
offset = FullyConnected(
name + "offset", offsetx, output_dim = ndim,
W = G(mean = 0, std = (1 / ndim)**2),
#W = C(0),
b = C(0),
nonlinearity = Identity()
)
offsetx = offset[:, :ndim // 2].reshape(offsetx.shape)
offsety = offset[:, ndim // 2:].reshape(offsety.shape)
"""
offsetx = FullyConnected(
name + "offsetx", offsetx, output_dim = ndim,
W = G(mean = 0, std = gamma / ndim),
b = C(0),
nonlinearity = Identity()
)
offsetx = offsetx.reshape(offsety.shape)
offsety = FullyConnected(
name + "offsety", offsety, output_dim = ndim,
W = G(mean = 0, std = gamma / ndim),
b = C(0),
nonlinearity = Identity()
)
offsety = offsety.reshape(offsetx.shape)
print(offsety.partial_shape)
"""
#offsetx = ZeroGrad(offsetx)
#offsety = ZeroGrad(offsety)
outputs = []
for sx in range(2):
for sy in range(2):
if sx == 0:
ofx = Floor(offsetx)
bilx = 1 - (offsetx - ofx)
else:
ofx = Ceil(offsetx)
bilx = 1 - (ofx - offsetx)
if sy == 0:
ofy = Floor(offsety)
bily = 1 - (offsety - ofy)
else:
ofy = Ceil(offsety)
bily = 1 - (ofy - offsety)
"""
No padding
padding1 = ConstProvider(np.zeros((inp.partial_shape[0], inp.partial_shape[1], 1, inp.partial_shape[3])))
padding2 = ConstProvider(np.zeros((inp.partial_shape[0], inp.partial_shape[1], inp.partial_shape[2] + 2, 1)))
arg_fea = Concat([padding1, inp, padding1], axis = 2)
arg_fea = Concat([padding2, arg_fea, padding2], axis = 3)
"""
arg_fea = inp
#one_mat = ConstProvider(np.ones((inp.partial_shape[2], inp.partial_shape[3])), dtype = np.int32)
one_mat = ConstProvider(1, dtype = np.int32).add_axis(0).broadcast((ofx.shape[2], ofx.shape[3]))
affx = (Cumsum(one_mat, axis = 0) - 1) * stride
affy = (Cumsum(one_mat, axis = 1) - 1) * stride
ofx = ofx + affx.dimshuffle('x', 'x', 0, 1)
ofy = ofy + affy.dimshuffle('x', 'x', 0, 1)
one_mat = ConstProvider(np.ones((ker_shape, ofx.partial_shape[2], ofx.partial_shape[3])))
#ofx[:, :ker_shape, :, :] -= 1
#ofx[:, ker_shape*2:, :, :] += 1
ofx += Concat([one_mat * i for i in dx], axis = 0).dimshuffle('x', 0, 1, 2)
#ofy[:, ::3, :, :] -= 1
#ofy[:, 2::3, :, :] += 1
one_mat = ones((1, ofx.partial_shape[2], ofx.partial_shape[3]))
one_mat = Concat([one_mat * i for i in dy], axis = 0)
one_mat = Concat([one_mat] * ker_shape, axis = 0)
ofy += one_mat.dimshuffle('x', 0, 1, 2)
ofx = Max(Min(ofx, arg_fea.partial_shape[2] - 1), 0)
ofy = Max(Min(ofy, arg_fea.partial_shape[3] - 1), 0)
def DeformReshape(inp, ker_shape):
inp = inp.reshape(inp.shape[0], ker_shape, ker_shape, inp.shape[2], inp.partial_shape[3])
inp = inp.dimshuffle(0, 3, 1, 4, 2)
inp = inp.reshape(inp.shape[0], inp.shape[1] * inp.shape[2], inp.shape[3] * inp.shape[4])
return inp
ofx = DeformReshape(ofx, ker_shape)
ofy = DeformReshape(ofy, ker_shape)
bilx = DeformReshape(bilx, ker_shape)
bily = DeformReshape(bily, ker_shape)
of = ofx * arg_fea.partial_shape[2] + ofy
arg_fea = arg_fea.reshape(arg_fea.shape[0], arg_fea.shape[1], -1)
of = of.reshape(ofx.shape[0], -1)
of = of.dimshuffle(0, 'x', 1)
#of = Concat([of] * arg_fea.partial_shape[1], axis = 1)
of = of.broadcast((of.shape[0], arg_fea.shape[1], of.shape[2]))
arx = Linspace(0, arg_fea.shape[0], arg_fea.shape[0], endpoint = False)
arx = arx.add_axis(1).add_axis(2).broadcast(of.shape)
ary = Linspace(0, arg_fea.shape[1], arg_fea.shape[1], endpoint = False)
ary = ary.add_axis(0).add_axis(2).broadcast(of.shape)
of = of.add_axis(3)
arx = arx.add_axis(3)
ary = ary.add_axis(3)
idxmap = Astype(Concat([arx, ary, of], axis = 3), np.int32)
"""
sample = []
for i in range(arg_fea.partial_shape[0]):
for j in range(arg_fea.partial_shape[1]):
sample.append(arg_fea[i][j].ai[of[i][j]].dimshuffle('x', 0))
sample = Concat(sample, axis = 0)
"""
sample = IndexingRemap(arg_fea, idxmap).reshape(inp.shape[0], inp.shape[1], bilx.shape[1], -1)
bilx = bilx.dimshuffle(0, 'x', 1, 2).broadcast(sample.shape)
bily = bily.dimshuffle(0, 'x', 1, 2).broadcast(sample.shape)
sample *= bilx * bily
outputs.append(sample)
output = outputs[0]
for i in outputs[1:]:
output += i
return Pooling2D(name, output, window = 2, mode = "AVERAGE")
def make_network(minibatch_size = 128):
patch_size = 32
inp = DataProvider("data", shape = (minibatch_size, 3, patch_size, patch_size))
label = DataProvider("label", shape = (minibatch_size, ))
#lay = bn_relu_conv(inp, 3, 1, 1, 16, False, False)
lay, conv = conv_bn(inp, 3, 1, 1, 16, True)
out = [conv]
for chl in [32, 64, 128]:
for i in range(10):
lay, conv = conv_bn(lay, 3, 1, 1, chl, True)
out.append(conv)
if chl != 128:
lay = dfpooling("pooling{}".format(chl), lay)
#global average pooling
print(lay.partial_shape)
feature = lay.mean(axis = 2).mean(axis = 2)
#feature = Pooling2D("glbpoling", lay, window = 8, stride = 8, mode = "AVERAGE")
pred = Softmax("pred", FullyConnected(
"fc0", feature, output_dim = 10,
W = G(mean = 0, std = (1 / feature.partial_shape[1])**0.5),
b = C(0),
nonlinearity = Identity()
))
network = Network(outputs = [pred] + out)
network.loss_var = CrossEntropyLoss(pred, label)
return network
if __name__ == '__main__':
make_network()
|
[
"315603442@qq.com"
] |
315603442@qq.com
|
f2a847b465f50e422b5e138f8c78615c567e22df
|
8f494a0663fa056812ca1edb5fbb866ddb22aa37
|
/bin/plot_stars_and_frames.py
|
9bd401dcd4065ae7446637628d6c08725bc9e99c
|
[] |
permissive
|
desihub/teststand
|
c2ea0ea3efbcb2e7e5d041261d40a3b7378c88c8
|
fc1bc9630b6a295140ef98ed35ebfe0acfb199d2
|
refs/heads/master
| 2020-02-26T16:37:39.111241
| 2020-02-25T23:48:49
| 2020-02-25T23:48:49
| 55,595,894
| 0
| 1
|
BSD-3-Clause
| 2019-04-26T17:51:22
| 2016-04-06T10:10:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,445
|
py
|
#!/usr/bin/env python
import os
import sys
import argparse
import numpy as np
import astropy.io.fits as pyfits
import astropy.units as units
import matplotlib.pyplot as plt
from desispec.io import read_frame
from desispec.interpolation import resample_flux
from desispec.io.filters import load_legacy_survey_filter
band="R"
fluxunits = 1e-17 * units.erg / units.s / units.cm**2 / units.Angstrom
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Display spectra, looping over targets if targetid not set, and optionally show best fit from redrock"
)
parser.add_argument('--cframes', type = str, default = None, required = True, nargs="*",
help = 'path to cframe fits files')
parser.add_argument('--stdstars', type = str, default = None, required = True,
help = 'path to stdstars fits files')
args = parser.parse_args()
stars_filename = args.stdstars #"stdstars-0-00051001.fits"
frame_filenames = args.cframes #["cframe-r0-00051001.fits","cframe-b0-00051001.fits","cframe-z0-00051001.fits"]
h=pyfits.open(stars_filename)
h.info()
fibers=h["FIBERS"].data
table=h["METADATA"].data
print("std stars fibers=",fibers)
model_wave = h["WAVELENGTH"].data
model_flux = h["FLUX"].data
frames=[]
for frame_filename in frame_filenames :
frame = read_frame(frame_filename)
selection=np.intersect1d(frame.fibermap["FIBER"],fibers)
frame = frame[selection]
frames.append(frame)
for i,fiber in enumerate(fibers) :
j=np.where(frame.fibermap['FIBER']==fiber)[0][0]
print("fiber={}, i={}, j={}".format(fiber,i,j))
photsys = frame.fibermap['PHOTSYS'][j]
filter_response=load_legacy_survey_filter("R",photsys)
model_mag=filter_response.get_ab_magnitude(model_flux[i]*fluxunits,model_wave)
fiber_mag=-2.5*np.log10(frame.fibermap['FLUX_R'][j])+22.5
print("model mag={:4.2f} fiber mag={:4.2f}".format(model_mag,fiber_mag))
a=0
for frame in frames :
mflux = resample_flux(frame.wave,model_wave,model_flux[i])
rflux = frame.R[j].dot(mflux)
plt.plot(frame.wave,frame.flux[j])
plt.plot(frame.wave,rflux,c="k",alpha=0.6)
if a==0 :
a=np.sum(frame.flux[j]*rflux)/np.sum(rflux**2)
print("scale a={}".format(a))
plt.plot(frame.wave,rflux*a,c="gray",alpha=0.6)
plt.show()
|
[
"jguy@lbl.gov"
] |
jguy@lbl.gov
|
2f6fb73fff92314c3df9607f2a9a43dde4896e14
|
f949d54a1bdb26a124fb0f690f16c46cfcd6ecc3
|
/2 - Even Fibonacci Numbers/Brute Force.py
|
c81379ba6ac5f903349a5172e55ef50db5604624
|
[] |
no_license
|
Brain13/Project-Euler
|
d57b35b7e7ca19081f94d21a70bece7a25caa329
|
b88e54aa9c37174b37cde3f10b480dfce9fc7776
|
refs/heads/master
| 2021-01-19T07:13:32.147420
| 2015-04-11T05:18:22
| 2015-04-11T05:20:01
| 33,696,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
#!/usr/bin/env python3
MAX_VALUE = 4000000
# iterative fibonacci
# this sequence starts with 1, 2, 3, 5...
fib1 = 1
fib2 = 2
runningTotal = 0
while fib1 < MAX_VALUE:
if fib1 % 2 == 0:
runningTotal += fib1
fib3 = fib1 + fib2
fib1 = fib2
fib2 = fib3
print(runningTotal)
|
[
"BrianKlinect@gmail.com"
] |
BrianKlinect@gmail.com
|
39cdc6f248a4a68259c53e7807d24f3070950075
|
788e202b4a9d33b419e0b32dc1aaf1325e5cc3db
|
/Lib/site-packages/pybitbucket/build.py
|
33ad6bf82c6deb3d5fe9ebcf3c6753465338addc
|
[] |
no_license
|
DShaw14/supportal-web
|
a8de71042d5c277e7702fbfaf116070022e3aac7
|
152c8f8030a673791de2af50f92106c4752af6ed
|
refs/heads/master
| 2022-10-15T07:21:54.517723
| 2021-01-27T04:58:10
| 2021-01-27T04:58:10
| 74,401,106
| 0
| 1
| null | 2022-10-05T13:19:43
| 2016-11-21T20:03:15
|
Python
|
UTF-8
|
Python
| false
| false
| 4,375
|
py
|
# -*- coding: utf-8 -*-
"""
Defines the BuildStatus resource and registers the type with the Client.
Classes:
- BuildStatusStates: enumerates the possible states of a build status
- BuildStatus: represents the result of a build
"""
from uritemplate import expand
from pybitbucket.bitbucket import Bitbucket, BitbucketBase, Client, enum
BuildStatusStates = enum(
'BuildStatusStates',
INPROGRESS='INPROGRESS',
SUCCESSFUL='SUCCESSFUL',
FAILED='FAILED')
class BuildStatus(BitbucketBase):
id_attribute = 'key'
resource_type = 'build'
@staticmethod
def is_type(data):
return (BuildStatus.has_v2_self_url(data))
@staticmethod
def make_payload(
key,
state,
url,
name=None,
description=None):
BuildStatusStates.expect_valid_value(state)
payload = {
'key': key,
'state': state,
'url': url,
}
# Since server defaults may change, method defaults are None.
# If the parameters are not provided, then don't send them
# so the server can decide what defaults to use.
if name is not None:
payload.update({'name': name})
if description is not None:
payload.update({'description': description})
return payload
@staticmethod
def create_buildstatus(
owner,
repository_name,
revision,
key,
state,
url,
name=None,
description=None,
client=Client()):
template = (
'{+bitbucket_url}' +
'/2.0/repositories{/owner,repository_name}' +
'/commit{/revision}/statuses/build')
# owner, repository_name, and revision are required
api_url = expand(
template, {
'bitbucket_url': client.get_bitbucket_url(),
'owner': owner,
'repository_name': repository_name,
'revision': revision
})
payload = BuildStatus.make_payload(
key=key,
state=state,
url=url,
name=name,
description=description)
return BuildStatus.post(api_url, json=payload, client=client)
"""
A convenience method for changing the current build status.
"""
def modify(
self,
key=None,
state=None,
url=None,
name=None,
description=None):
if (state is None):
state = self.state
if (key is None):
key = self.key
if (url is None):
url = self.url
if (name is None):
name = self.name
if (description is None):
description = self.description
payload = self.make_payload(
state=state,
key=key,
name=name,
url=url,
description=description)
return self.put(json=payload)
"""
A convenience method for finding a specific build status.
In contrast to the pure hypermedia driven method on the Bitbucket
class, this method returns a BuildStatus object, instead of the
generator.
"""
@staticmethod
def find_buildstatus_for_repository_commit_by_key(
repository_name,
revision,
key,
owner=None,
client=Client()):
if (owner is None):
owner = client.get_username()
return next(
Bitbucket(client=client).repositoryCommitBuildStatusByKey(
owner=owner,
repository_name=repository_name,
revision=revision,
key=key))
"""
A convenience method for finding build statuses
for a repository's commit.
The method is a generator BuildStatus objects.
"""
@staticmethod
def find_buildstatuses_for_repository_commit(
repository_name,
revision,
owner=None,
client=Client()):
if (owner is None):
owner = client.get_username()
return Bitbucket(client=client).repositoryCommitBuildStatuses(
owner=owner,
repository_name=repository_name,
revision=revision)
Client.bitbucket_types.add(BuildStatus)
|
[
"david14shaw@gmail.com"
] |
david14shaw@gmail.com
|
0cbd04b6ef65ee08c9dc3e2028930c50f679cbb3
|
c800cba645625b24ff9b2e5fd75812f950f3aa2d
|
/main/migrations/0001_initial.py
|
33c4d1498022ccb09c506d1454b26d526450c953
|
[] |
no_license
|
ZICCORP/booknow
|
0d68592b8604a89432da6b1d0c9733da72d967b4
|
7815471c03d503cdebeee5ba9e96cff2f62a4add
|
refs/heads/master
| 2022-12-13T05:21:25.688799
| 2020-09-12T01:24:26
| 2020-09-12T01:24:26
| 290,068,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
# Generated by Django 2.2 on 2020-09-03 15:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('description', models.TextField(blank=True)),
('price', models.DecimalField(decimal_places=2, max_digits=6)),
('slug', models.SlugField(max_length=48)),
('active', models.BooleanField(default=True)),
('in_stock', models.BooleanField(default=True)),
('date_updated', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"frankchuka250@gmail.com"
] |
frankchuka250@gmail.com
|
4002df1f037029048dc4fdc1f9909bf1a602c0d6
|
4c0707c00eb437fe80cbae46ebcf90ae28690430
|
/experts/migrations/0027_auto__del_field_langue_nom__add_field_langue_nomlangue.py
|
a51fa413e427f4c5b84a94f66c553534a52537dd
|
[] |
no_license
|
matnode/lesexpertsauf
|
4d2ea8c9a7348a825eeb648df6c11385425a608b
|
106b1982ac4b35015b91e4b9487c402a334ddae3
|
refs/heads/master
| 2021-01-02T22:32:34.014121
| 2015-09-29T11:34:03
| 2015-09-29T11:34:03
| 42,111,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,688
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Langue.nom'
db.delete_column('experts_langue', 'nom')
# Adding field 'Langue.nomlangue'
db.add_column('experts_langue', 'nomlangue',
self.gf('django.db.models.fields.CharField')(default=0, max_length=255),
keep_default=False)
def backwards(self, orm):
# Adding field 'Langue.nom'
db.add_column('experts_langue', 'nom',
self.gf('django.db.models.fields.CharField')(default=0, max_length=255),
keep_default=False)
# Deleting field 'Langue.nomlangue'
db.delete_column('experts_langue', 'nomlangue')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'experts.competence': {
'Meta': {'object_name': 'Competence'},
'description': ('django.db.models.fields.TextField', [], {}),
'human': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['experts.Human']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nom': ('django.db.models.fields.TextField', [], {})
},
'experts.entreprise': {
'Meta': {'object_name': 'Entreprise'},
'activite': ('django.db.models.fields.TextField', [], {}),
'adresse': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'codepostale': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'datedefondation': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nom': ('django.db.models.fields.TextField', [], {}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'siteweb': ('django.db.models.fields.TextField', [], {}),
'taille': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'ville': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'experts.formation': {
'Meta': {'object_name': 'Formation'},
'datedebut': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'datefin': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {}),
'diplome': ('django.db.models.fields.TextField', [], {}),
'ecole': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'human': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['experts.Human']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intitule': ('django.db.models.fields.TextField', [], {}),
'lieu': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'experts.human': {
'Meta': {'object_name': 'Human'},
'adresse': ('django.db.models.fields.TextField', [], {}),
'civilite': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'codepostale': ('django.db.models.fields.TextField', [], {}),
'datecreation': ('django.db.models.fields.DateTimeField', [], {}),
'datenaissance': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'niveauetude': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'nom': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'online': ('django.db.models.fields.IntegerField', [], {}),
'pays': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'prenom': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'signature': ('django.db.models.fields.TextField', [], {}),
'siteweb': ('django.db.models.fields.TextField', [], {}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'ville': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'experts.langue': {
'Meta': {'object_name': 'Langue'},
'human': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['experts.Human']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'niveau': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nomlangue': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'experts.loisir': {
'Meta': {'object_name': 'Loisir'},
'description': ('django.db.models.fields.TextField', [], {}),
'human': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['experts.Human']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'titre': ('django.db.models.fields.TextField', [], {})
},
'experts.mission': {
'Meta': {'object_name': 'Mission'},
'competence': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['experts.Competence']", 'symmetrical': 'False'}),
'competenceutilisee': ('django.db.models.fields.TextField', [], {}),
'datedebut': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'datefin': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {}),
'entreprise': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fonction': ('django.db.models.fields.TextField', [], {}),
'human': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['experts.Human']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'titre': ('django.db.models.fields.TextField', [], {})
},
'experts.offre': {
'Meta': {'object_name': 'Offre'},
'contactoffre': ('django.db.models.fields.TextField', [], {}),
'datedebut': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'datefin': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'description': ('django.db.models.fields.TextField', [], {}),
'entreprise': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['experts.Entreprise']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intitule': ('django.db.models.fields.TextField', [], {}),
'mission': ('django.db.models.fields.TextField', [], {}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'salairemax': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'salairemin': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'secteuractivite': ('django.db.models.fields.TextField', [], {}),
'typeoffre': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'ville': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'experts.typecompte': {
'Meta': {'object_name': 'Typecompte'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'typedecompte': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['experts']
|
[
"martial.nodem@auf.org"
] |
martial.nodem@auf.org
|
97fd972b1c27f2217820fd2196cd6048b8d568eb
|
abb3daa870fc818980af90893e197517b95467af
|
/DataCollection/eventbriteData.py
|
cec994bb110bd8558443457374bd3db244f47938
|
[] |
no_license
|
souless94/main-1
|
9ecc19954ba7caa1e083f812faee8d2ce3219c21
|
2b95ab940c0f618e67087d80a7215d46800fe1ef
|
refs/heads/master
| 2020-05-07T12:25:23.815342
| 2019-06-04T04:00:25
| 2019-06-04T04:00:25
| 179,941,599
| 0
| 0
| null | 2019-04-07T09:08:34
| 2019-04-07T09:08:34
| null |
UTF-8
|
Python
| false
| false
| 3,283
|
py
|
# file to collect data from eventbrite
from requests import get
import os
from datetime import datetime
class EventbriteData:
def __init__(self):
# f = open(r"C:\Users\wen kai\Downloads\y4s2\event-advisor\DataCollection\token.txt","r")
# credentials = f.read()
# f.close()
# txt_arr = credentials.split("\n")
# self._token = txt_arr[0]
self._token = os.environ.get('event-token')
self._url = "https://www.eventbriteapi.com/v3/events/search"
self.category_dict = {"Arts": "Performing & Visual Arts",
"Business": "Business & Professional",
"Charity": "Charity & Causes",
"Culture": "Community & Culture",
"Education": "Family & Education",
"Family": "Family & Education",
"Fashion": "Fashion & Beauty",
"Film": "Film, Media & Entertainment",
"Food": "Food & Drink",
"Health": "Health & Wellness",
"Hobbies": "Hobbies & Special Interest",
"Music": "Music",
"Outdoors": "Travel & Outdoor",
"Religion": "Religion & Spirituality",
"Tech": "Science & Technology",
"Sports": "Sports & Fitness"}
def getData(self,page,searchtxt):
eventsearchtxt = self.category_dict.get(searchtxt)
if (eventsearchtxt is not None):
searchtxt = eventsearchtxt
payload = {"q":searchtxt,
"location.address":"singapore",
"page":page,
"sort_by": "date",
"expand":"venue",
"token":self._token}
response = get(self._url,params=payload).json()["events"]
return response
def getEventName(self,items):
names = []
for i in range(len(items)):
name = "Eventbrite " + items[i].get("name").get("text")
names.append(name)
return names
def getEventUrl(self,items):
urls = []
for i in range(len(items)):
url = items[i].get("url")
urls.append(url)
return urls
def getEventlocation(self,items):
eventLocations=[]
for i in range(len(items)):
eventLocation = items[i].get('venue').get('address')
eventAddress = eventLocation.get('localized_address_display')
eventLocations.append(eventAddress)
return eventLocations
def getEventTime(self,items):
eventTimes=[]
for i in range(len(items)):
# Date & Start Time
date_time = items[i].get("start")["local"].split("T")
starttime = datetime.strptime(date_time[1],"%H:%M:%S")
eventTimes.append(str(starttime))
return eventTimes
def getEventDate(self,items):
eventDates=[]
for i in range(len(items)):
# Date & Start Time
date_time = items[i].get("start")["local"].split("T")
date = datetime.strptime(date_time[0], "%Y-%m-%d")
date = datetime.strftime(date,"%Y-%m-%d")
eventDates.append(str(date))
return eventDates
|
[
"cl.wk@hotmail.com"
] |
cl.wk@hotmail.com
|
cdefd2ad395a69ada95e1118b6ce8f7617d09066
|
acfab9011d276323ce4aa24075aee35f470c17b8
|
/13. Exceptions/99.2.IndexError.py
|
f2da757e4c9c5d3013194297b85368b872a98dc8
|
[] |
no_license
|
Engi20/Python-Programming-for-Beginners
|
ad84d66a5ce9dd4e6ab23acde13f74607b87ead1
|
fa02fcd265f8d7145e554267435c7e73ed562e36
|
refs/heads/master
| 2022-06-07T05:56:17.326070
| 2020-05-02T17:41:20
| 2020-05-02T17:41:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
l = [10,20,30,40]
#print(l[10])
#print(l)
try:
print(l[10])
except IndexError as e:
print(e)
print(l)
|
[
"noreply@github.com"
] |
Engi20.noreply@github.com
|
6ad3be5505734d9c10992339771c730efc9c9099
|
3fbd28e72606e5358328bfe4b99eb0349ca6a54f
|
/.history/a_Young_Physicist_20210607192618.py
|
36507965512f61ea782fbecec4eb9fd7112b6c47
|
[] |
no_license
|
Tarun1001/codeforces
|
f0a2ef618fbd45e3cdda3fa961e249248ca56fdb
|
576b505d4b8b8652a3f116f32d8d7cda4a6644a1
|
refs/heads/master
| 2023-05-13T04:50:01.780931
| 2021-06-07T21:35:26
| 2021-06-07T21:35:26
| 374,399,423
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 75
|
py
|
n= int(input())
x=[]
for i in range(n):
p=list(map(int,input().split())
|
[
"tarunsivasai8@gmail.com"
] |
tarunsivasai8@gmail.com
|
abcbf44cdf9d64ce61f4ab61659e75a0b56b5847
|
83fe081b5ab66e63116774858bebd32a8e5f50ce
|
/resolution/harmonic.py
|
daf618fc0c9d0f8e0a1c603472afdcddae85006c
|
[] |
no_license
|
dborzov/Light-in-Flight
|
d0ff638f0ea9aaade242d84d2d1c02d2898e3597
|
798627e4d98d5d5b66ecbc68bd665bc1263b98d9
|
refs/heads/master
| 2020-06-09T02:34:30.702774
| 2013-07-15T00:50:05
| 2013-07-15T00:50:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 899
|
py
|
import numpy as np
import matplotlib.pyplot as plt
def component(m):
flips = [x/float(m) + 1./float(2.*m) for x in range(m)]
flips.append(1.)
return flips
def l2(interval,m):
sign = (-1.)**len([x for x in component(m) if interval[0] >= x])
inside = [x for x in component(m) if interval[0] < x and interval[1] > x]
array = [interval[0]] + inside + [interval[1]]
l2 = sign * sum([(-1)**i*(array[i+1]-x) for i,x in enumerate(array[:-1])])
return l2
INTERVAL = [0.07,0.3]
fourier = [l2(INTERVAL,n) for n in range(1,100)]
norma = np.abs(fourier[0])
significant = [i for i,x in enumerate(fourier) if np.abs(x) > 0.05*norma]
threshold = max(significant)
print threshold
xx = [x for x, _ in enumerate(fourier)]
print fourier
plt.axhline(0, color='black', lw=2)
plt.vlines(xx,[0],fourier)
plt.vlines(threshold,-norma,norma,color='r')
plt.savefig('fourier.png')
plt.clf()
|
[
"tihoutrom@gmail.com"
] |
tihoutrom@gmail.com
|
532e2122130f05f313aa1e7cd3a9aed542525cf1
|
f3d5a903c4dfb87b0a53d46c047a1cd39e147577
|
/colorplor.py
|
a153d33e0a57f2969cf4816f3b5d57d2b11517e1
|
[] |
no_license
|
vishalgolcha/Computational-Physics
|
60b40272cb5265822bc4717adee1f0ddd0070217
|
dd9c97bc160cb4860cf63bb2fc078d66ac374713
|
refs/heads/master
| 2021-06-10T17:09:36.634995
| 2017-01-19T04:10:20
| 2017-01-19T04:10:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
import numpy as np
import matplotlib
# import matplotlib
matplotlib.use('QT4Agg')
import matplotlib.pyplot as plt
H = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]]) # added some commas and array creation code
fig = plt.figure(figsize=(6, 3.2))
ax = fig.add_subplot(111)
ax.set_title('colorMap')
plt.imshow(H)
ax.set_aspect('equal')
cax = fig.add_axes([0.12, 0.1, 0.78, 0.8])
cax.get_xaxis().set_visible(False)
cax.get_yaxis().set_visible(False)
cax.patch.set_alpha(0)
cax.set_frame_on(False)
plt.colorbar(orientation='vertical')
plt.show()
plt.savefig("colorplot.png")
|
[
"vishalgolcha@hotmail.com"
] |
vishalgolcha@hotmail.com
|
78b88ce44a1b69163f787739f2f16b29b7c3a456
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02400/s247086956.py
|
5fa80293019ffc5a8ea215926eec8e94cb127c10
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
import math
r = float(input())
s = r * r * math.pi
l = r * 2.0 * math.pi
print("{:f} {:f}".format(s, l))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9167ad416df88889048241c2117b86f525de23fb
|
f27d87c7039a978824251a2f8bbef7c3524f5e7b
|
/whole/lat.py
|
5acb0af39bc504a6a99980b256d8c8bc91fa65c5
|
[] |
no_license
|
tonyre4/CSPmio
|
4ecd22d4511fb7a79183fa8551f2d9e1b7b4b07f
|
4a037973bce2694f247914e872a51bf088a8472c
|
refs/heads/master
| 2022-05-20T02:26:40.817934
| 2020-03-19T00:40:23
| 2020-03-19T00:40:23
| 242,209,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 915
|
py
|
from pylatex import (Document, TikZ, TikZNode,
TikZDraw, TikZCoordinate,
NoEscape,TikZUserPath, TikZOptions)
import pylatex as pl
def cutReport(num_part,data):
space = 0.8
s = ""
s+= """\\begin{center}Reporte de cortes\\end{center}\\newline\\newline\bNumero de parte:
%s\\hline\\hline\\newline\\newline""" %(num_part)
for cut in data:
s+= "x %d" % cut[0]
for c in cut[1]:
cc = c*space
s+= """\\begin{tikzpicture}
\\draw (0,0) rectangle (%f\\linewidth,1) node[pos=0.5] {Test};
\\end{tikzpicture}""" % cc
s+="\\hline\\newline"
doc = Document()
doc.append(TikZ())
doc.append(pl.utils.NoEscape(s))
doc.generate_pdf('PDFexit', clean_tex=False)
print (s)
cutReport("6050",[[20,[0.5,0.1,0.1,0.2,0.15,0.15],[0.0,0.0]],[21,[0.1,0.1,0.1],[0.1,17.0]]])
|
[
"tonyre4@gmail.com"
] |
tonyre4@gmail.com
|
e3f5ce740dabae5d2ac3f96257e4f3b50c6647a7
|
e7a88984a771e34f125fa9ba550bfd5ba47f2c13
|
/test/test.py
|
f67d51f44c7ca0b50ebd51bc4dc7fe7b7864218d
|
[] |
no_license
|
ljljlj23/flask
|
b53d3e7835f8cd9440022420ae8ff35bd5593e16
|
02a099611e347e2c3818eb8b848c7aa70d0d3ecc
|
refs/heads/master
| 2022-12-10T17:09:54.243851
| 2019-10-15T12:55:11
| 2019-10-15T12:55:11
| 214,779,820
| 0
| 0
| null | 2022-11-23T03:33:10
| 2019-10-13T07:37:39
|
CSS
|
UTF-8
|
Python
| false
| false
| 113
|
py
|
def test(**kwargs):
print(kwargs)
def newfunc(**kwargs):
test(**kwargs)
newfunc(name='zhangsan',age=90)
|
[
"1074819863@qq.com"
] |
1074819863@qq.com
|
5f23b71eab551abbda9843c431936e26edeb3eb1
|
6d7a3764b52fa29cc258974e8390e1f1d3c714ad
|
/2task5.py
|
94b1e1e0e42c31ab70e9bb206775fa21f93f1254
|
[] |
no_license
|
LinJiaB00755804/inwk6312fall
|
1cae60946245f95e458c75a7d9487297a97d4bbc
|
b3e8cd6f8f9e591227c506a51f5aa212474acf0a
|
refs/heads/master
| 2021-07-06T21:02:31.268074
| 2017-10-03T20:47:23
| 2017-10-03T20:47:23
| 103,432,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
import turtle
def lt(t,s):
t.lt(s)
def polygon(t,l,n):
for i in range(n):
t.fd(l)
lt(t,360/n)
bob = turtle.Turtle()
print(bob)
polygon(bob,100,6)
turtle.mainloop()
|
[
"noreply@github.com"
] |
LinJiaB00755804.noreply@github.com
|
05bc7e1cddab918e4effdc029f0e775a0bcfd668
|
6afe8d05916871d08547afe184674acbe5a10ef3
|
/binary_tree.py
|
7ca32e970397816abc58c285d1cf7eae6f5d76af
|
[] |
no_license
|
ryenumu2/Python-Database
|
063988e13ca1da8d8cbb3c79c555d16cfba79af2
|
699cec17c4530843a0c14723cd5aa4230015f029
|
refs/heads/master
| 2022-11-30T15:32:57.011820
| 2020-08-10T23:33:48
| 2020-08-10T23:33:48
| 285,759,750
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 921
|
py
|
from logic import LogicBase
class BinaryTree(LogicBase): #the binary search tree implementation: child nodes to the left are smaller than their parent nodes and child nodes to the right are larger than their parent nodes
#def __init__(self, node):
# self._pathTo(node) = None
def _get(self,node,key): #iterate through the binary search tree by comparing the passed in key with the node that the BST is currently on.
while node != None:
if key < node.key:
node = self._pathTo(node.left_ref)
elif node.key < key:
node = self._pathTo(node.right_ref)
else:
return self._pathTo(node.value_ref)
raise KeyError
def _insert(self, node, key, value_ref): #recursively run this function definition to add a new node to the binary search tree
if node == None:
newest_node = BinaryNode()
|
[
"ryenumu2@ncsu.edu"
] |
ryenumu2@ncsu.edu
|
d591953bb2e16de6a6843ab053f2d020532add83
|
c4223c042fbb2087b7008ee924b4f2cd1af6276d
|
/deals/models.py
|
9174e722f7f2859226b481f5a57af5f761d6ddc4
|
[] |
no_license
|
Difroz/test
|
fcd51437fa6a0f84dd784c4a88918bef31d7cb68
|
69b88e10d3a01d12c6d597f5c7677d18450b677f
|
refs/heads/main
| 2023-07-09T01:22:56.206728
| 2020-12-17T04:16:07
| 2020-12-17T04:16:07
| 322,042,227
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,039
|
py
|
from django.db import models, transaction
import csv
import io
class Deal(models.Model):
customer = models.CharField(max_length=200)
item = models.CharField(max_length=200)
total = models.DecimalField(max_digits=7, decimal_places=2)
quantity = models.PositiveIntegerField()
date = models.DateTimeField()
def __str__(self):
return self.customer
@classmethod
def upload_data(cls, csv_file):
"""
Загружает информацию из csv файла в БД
"""
file = csv_file.read().decode('utf-8')
reader = csv.DictReader(io.StringIO(file))
data = [line for line in reader]
with transaction.atomic():
for row in data:
Deal.objects.get_or_create(**row)
return data
@classmethod
def data_processing(cls, start_date=None, end_date=None):
"""
Обрабатывае загруженные в БД данные
"""
result_list = []
if start_date and end_date:
data = Deal.objects.filter(models.Q(date__gte=start_date) & models.Q(date__lte=end_date))
else:
data = Deal.objects.all()
users = data.values('customer').annotate(spend_money=models.Sum('total')).order_by('-spend_money')[:5]
gems_list = Deal.objects.values('item', 'customer').filter(
customer__in=users.values_list('customer', flat=True))
gems = gems_list.values('item').annotate(unique_usr=models.Count('customer', distinct=True)).filter(
unique_usr__gte=2)
for i in users:
user_dict = {}
user_dict['username'] = i['customer']
user_dict['spend_money'] = i['spend_money']
gem = gems_list.values('item').filter(models.Q(item__in=gems.values('item')) & models.Q(customer=i['customer'])).distinct()
user_dict['gems'] = list(gem.values_list('item', flat=True))
result_list.append(user_dict)
return result_list
|
[
"proger@company.com"
] |
proger@company.com
|
171a3cdb427b5ed0f0934add26311b837838c58b
|
645d1e9b73f382da28e2d0f6494b05fa5e278bf2
|
/answers/lowestCommonAncestor.py
|
f81cf6aa48df56412fb40064598eba03974344dd
|
[] |
no_license
|
xxbeam/leetcode-python
|
63efcba4f0fc1c09ceebca725778dacd9dfd27fd
|
5f1282abb64651c4b67ce0b262456920827fe7dc
|
refs/heads/master
| 2023-07-17T23:33:08.783011
| 2021-09-10T01:47:46
| 2021-09-10T01:47:46
| 345,580,038
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
# 236. 二叉树的最近公共祖先
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
if p == q:
return p
node_map = {}
queue = [root]
p_flag = False
q_flag = False
while queue:
temp = []
for node in queue:
if node == p:
p_flag = True
if node == q:
q_flag = True
if p_flag and q_flag:
break
if node.left:
node_map[node.left] = node
temp.append(node.left)
if node.right:
node_map[node.right] = node
temp.append(node.right)
queue = temp
visit = set()
while q:
visit.add(q)
if q in node_map:
q = node_map[q]
else:
q = None
while p:
if p in visit:
return p
if p in node_map:
p = node_map[p]
else:
p = None
return None
|
[
"xiongxin@songxiaocai.com"
] |
xiongxin@songxiaocai.com
|
4837ebd038cc18b587ef85ec7d63576591ad3473
|
89ba31bb75f4f3c9b693131512952e21af050350
|
/P04_graficos_B_double.py
|
2f2bf593546a17e6f1f197626495d5873e88ddb4
|
[] |
no_license
|
pablo14simon/pablo-simon-P0
|
1754001accd9d52f584a8abb8389ed690a8803a3
|
3d72b757a933f06df7a0489040932281d64899bf
|
refs/heads/main
| 2023-07-17T23:12:40.652225
| 2021-09-03T23:40:54
| 2021-09-03T23:40:54
| 392,175,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,841
|
py
|
import matplotlib.pyplot as plt
arch1 = open("Caso B.1 con float64 (double).txt", "r")
arch2 = open("Caso B.2 con float64 (double).txt", "r")
arch3 = open("Caso B.3 con float64 (double).txt", "r")
arch4 = open("Caso B.4 con float64 (double).txt", "r")
arch5 = open("Caso B.5 con float64 (double).txt", "r")
arch6 = open("Caso B.6 con float64 (double).txt", "r")
arch6 = open("Caso B.6 con float64 (double).txt", "r")
arch7 = open("Caso B.7 con float64 (double).txt", "r")
arch8 = open("Caso B.8 con float64 (double).txt", "r")
arch9 = open("Caso B.9 con float64 (double).txt", "r")
mat1 = arch1.read()
mat1 = mat1.split("\n")
mat1.pop(0)
mat1.pop(-1)
x=0
uno=0
un=0
e=0
tiempo1 = []
tamaño1 = []
prom1 =[]
M = int(len(mat1)/2)
for d in mat1:
l=d.split()
tiempo1.append(float(l[0]))
tamaño1.append(float(l[1]))
x+=1
while uno<len(tiempo1):
while e<len(mat1):
un+=tiempo1[e]
e+=M
prom1.append(un/10)
e+=1
uno+=10
arch1.close()
mat2 = arch2.read()
mat2 = mat2.split("\n")
mat2.pop(0)
mat2.pop(-1)
x=0
tiempo2 = []
tamaño2 = []
prom=[]
M = int(len(mat2)/2)
for d in mat2:
l=d.split()
tiempo2.append(float(l[0]))
tamaño2.append(float(l[1]))
prom.append((float(l[0]))/10)
x+=1
arch2.close()
mat3 = arch3.read()
mat3 = mat3.split("\n")
mat3.pop(0)
mat3.pop(-1)
x=0
tiempo3 = []
tamaño3 = []
prom3=[]
M = int(len(mat3)/2)
for d in mat2:
l=d.split()
tiempo3.append(float(l[0]))
tamaño3.append(float(l[1]))
prom3.append((float(l[0]))/10)
x+=1
arch3.close()
mat4 = arch4.read()
mat4 = mat4.split("\n")
mat4.pop(0)
mat4.pop(-1)
x=0
uno=0
un=0
e=0
tiempo4 = []
tamaño4 = []
prom4 =[]
M = int(len(mat4)/2)
for d in mat4:
l=d.split()
tiempo1.append(float(l[0]))
tamaño1.append(float(l[1]))
x+=1
while uno<len(tiempo4):
while e<len(mat4):
un+=tiempo4[e]
e+=M
prom4.append(un/10)
e+=1
uno+=10
arch4.close()
mat5 = arch5.read()
mat5 = mat5.split("\n")
mat5.pop(0)
mat5.pop(-1)
x=0
uno=0
un=0
e=0
tiempo5 = []
tamaño5 = []
prom5 =[]
M = int(len(mat5)/2)
for d in mat5:
l=d.split()
tiempo5.append(float(l[0]))
tamaño5.append(float(l[1]))
x+=1
while uno<len(tiempo5):
while e<len(mat5):
un+=tiempo5[e]
e+=M
prom5.append(un/10)
e+=1
uno+=10
arch5.close()
mat6 = arch6.read()
mat6 = mat6.split("\n")
mat6.pop(0)
mat6.pop(-1)
x=0
uno=0
un=0
e=0
tiempo6 = []
tamaño6 = []
prom6 =[]
M = int(len(mat6)/2)
for d in mat6:
l=d.split()
tiempo6.append(float(l[0]))
tamaño6.append(float(l[1]))
x+=1
while uno<len(tiempo6):
while e<len(mat6):
un+=tiempo6[e]
e+=M
prom6.append(un/10)
e+=1
uno+=10
arch6.close()
mat7 = arch7.read()
mat7 = mat7.split("\n")
mat7.pop(0)
mat7.pop(-1)
x=0
uno=0
un=0
e=0
tiempo7 = []
tamaño7 = []
prom7 =[]
M = int(len(mat7)/2)
for d in mat7:
l=d.split()
tiempo7.append(float(l[0]))
tamaño7.append(float(l[1]))
x+=1
while uno<len(tiempo7):
while e<len(mat7):
un+=tiempo1[e]
e+=M
prom7.append(un/10)
e+=1
uno+=10
arch7.close()
mat8 = arch8.read()
mat8 = mat8.split("\n")
mat8.pop(0)
mat8.pop(-1)
x=0
uno=0
un=0
e=0
tiempo8 = []
tamaño8 = []
prom8 =[]
M = int(len(mat8)/2)
for d in mat8:
l=d.split()
tiempo8.append(float(l[0]))
tamaño8.append(float(l[1]))
x+=1
while uno<len(tiempo8):
while e<len(mat8):
un+=tiempo1[e]
e+=M
prom8.append(un/10)
e+=1
uno+=10
arch8.close()
mat9 = arch9.read()
mat9 = mat9.split("\n")
mat9.pop(0)
mat9.pop(-1)
x=0
uno=0
un=0
e=0
tiempo9 = []
tamaño9 = []
prom9 =[]
M = int(len(mat9)/2)
for d in mat9:
l=d.split()
tiempo9.append(float(l[0]))
tamaño9.append(float(l[1]))
x+=1
while uno<len(tiempo9):
while e<len(mat9):
un+=tiempo1[e]
e+=M
prom9.append(un/10)
e+=1
uno+=10
arch9.close()
x1 = ["10","20","50","100","200","500","1000","2000","5000","10000","20000"]
T1 = [10,20,50,100,200,500,1000,2000,5000,10000,20000]
y1 = ["0.1 ms", "1 ms", "10 ms","0.1 s", "1 s", "10 s", "1 min", "10 min"]
dt1 = [0.1/1000,1/1000,10/1000,0.1,1,10,60,60*10]
y2 = ["1 KB", "10KB", "100 KB", "1 MB", "10 MB", "100 MB", "1 GB", "10 GB"]
m1 = [1*10**3,10*10**3,100*10**3,1*10**6,10*10**6,100*10**6,1*10**9,10*10**9]
plt.figure(1)
plt.subplot(2,1,1)
plt.title("Rendimiento B todos los casos double")
i=0
j=0
while i < 10:
while j < len(mat1):
plt.loglog(tamaño1[j:j+22],tiempo1[0:22], "o-")
plt.loglog(tamaño2[0:22],tiempo2[0:22], "o-")
plt.loglog(tamaño3[0:22],tiempo3[0:22], "o-")
plt.loglog(tamaño4[j:j+22],tiempo4[0:22], "o-")
plt.loglog(tamaño5[0:22],tiempo5[0:22], "o-")
plt.loglog(tamaño6[0:22],tiempo6[0:22], "o-")
plt.loglog(tamaño7[0:22],tiempo7[0:22], "o-")
plt.loglog(tamaño8[0:22],tiempo8[0:22], "o-")
plt.loglog(tamaño9[0:22],tiempo9[0:22], "o-")
j+=M
i+=1
plt.yticks(dt1, y1)
plt.xticks(T1, x1, rotation=45)
plt.xticks(T1, ["","","","","","","","","",""])
plt.xlim(right=20000)
plt.grid()
plt.ylabel("Tiempo transcurrido (s)")
plt.xlabel("Tamaño matriz M")
plt.savefig("Redimiento de inversion")
plt.show()
|
[
"noreply@github.com"
] |
pablo14simon.noreply@github.com
|
aebecf84a9bd58c8a5a36840047bb1925f631d96
|
bb150497a05203a718fb3630941231be9e3b6a32
|
/framework/e2e/jit/test_Linear_base.py
|
609e726630a951b88424fbc92f5769c4d4b3c8f1
|
[] |
no_license
|
PaddlePaddle/PaddleTest
|
4fb3dec677f0f13f7f1003fd30df748bf0b5940d
|
bd3790ce72a2a26611b5eda3901651b5a809348f
|
refs/heads/develop
| 2023-09-06T04:23:39.181903
| 2023-09-04T11:17:50
| 2023-09-04T11:17:50
| 383,138,186
| 42
| 312
| null | 2023-09-13T11:13:35
| 2021-07-05T12:44:59
|
Python
|
UTF-8
|
Python
| false
| false
| 617
|
py
|
#!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test jit cases
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "utils"))
from utils.yaml_loader import YamlLoader
from jittrans import JitTrans
yaml_path = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "yaml", "nn.yml")
yml = YamlLoader(yaml_path)
def test_Linear_base():
"""test Linear_base"""
jit_case = JitTrans(case=yml.get_case_info("Linear_base"))
jit_case.jit_run()
|
[
"825276847@qq.com"
] |
825276847@qq.com
|
7a9be2675b22dfe3ccc0eefb4cdbc51cb3cb3e1c
|
1f05f79f4dfe196da7849632243f98473cb16be7
|
/knosk/__init__.py
|
cce2a0c3f10b54770b9a5a818808b68d43b50f54
|
[
"MIT"
] |
permissive
|
knosk/knosk-core
|
b15c01b2b2bc38557e8df9d0916dab2340f8a200
|
1d26f7c2f64ace8bed49a42eb1ed50f03ffb6dfe
|
refs/heads/master
| 2020-08-04T13:52:27.139931
| 2019-10-07T18:53:44
| 2019-10-07T18:53:44
| 212,158,152
| 2
| 1
|
MIT
| 2019-10-07T18:53:45
| 2019-10-01T17:31:09
|
Python
|
UTF-8
|
Python
| false
| false
| 141
|
py
|
from knosk.choosers import *
from knosk.core import *
from knosk.fields import *
from knosk.matchers import *
from knosk.suggesters import *
|
[
"makcimkos@gmail.com"
] |
makcimkos@gmail.com
|
61298a4f8b3d7a48d59e50876e618227c7e7ce24
|
518fd7db1a3bf65f142a913378a9d9e2eff0f6bd
|
/tests/remotes/ssh.py
|
c0514b363fb5b09476916c73807415b3a2b71817
|
[
"Apache-2.0"
] |
permissive
|
backwardn/dvc
|
6cd95c45862235e2554f65b7f6cae879f90acb34
|
83a2afc7b05e38014aca224ab85949b0974d9bec
|
refs/heads/master
| 2020-09-13T16:50:22.918981
| 2020-06-26T23:36:56
| 2020-06-26T23:36:56
| 222,845,893
| 0
| 0
|
Apache-2.0
| 2019-11-20T03:49:09
| 2019-11-20T03:49:07
| null |
UTF-8
|
Python
| false
| false
| 3,043
|
py
|
import getpass
import os
from subprocess import CalledProcessError, check_output
import pytest
from funcy import cached_property
from dvc.utils import env2bool
from .base import Base
from .local import Local
TEST_SSH_USER = "user"
TEST_SSH_KEY_PATH = os.path.join(
os.path.abspath(os.path.dirname(__file__)), f"{TEST_SSH_USER}.key"
)
class SSH:
@staticmethod
def should_test():
do_test = env2bool("DVC_TEST_SSH", undefined=None)
if do_test is not None:
return do_test
# FIXME: enable on windows
if os.name == "nt":
return False
try:
check_output(["ssh", "-o", "BatchMode=yes", "127.0.0.1", "ls"])
except (CalledProcessError, OSError):
return False
return True
@staticmethod
def get_url():
return "ssh://{}@127.0.0.1:22{}".format(
getpass.getuser(), Local.get_storagepath()
)
class SSHMocked(Base):
@staticmethod
def get_url(user, port):
path = Local.get_storagepath()
if os.name == "nt":
# NOTE: On Windows Local.get_storagepath() will return an
# ntpath that looks something like `C:\some\path`, which is not
# compatible with SFTP paths [1], so we need to convert it to
# a proper posixpath.
# To do that, we should construct a posixpath that would be
# relative to the server's root.
# In our case our ssh server is running with `c:/` as a root,
# and our URL format requires absolute paths, so the
# resulting path would look like `/some/path`.
#
# [1]https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-6
drive, path = os.path.splitdrive(path)
assert drive.lower() == "c:"
path = path.replace("\\", "/")
url = f"ssh://{user}@127.0.0.1:{port}{path}"
return url
def __init__(self, server):
self.server = server
@cached_property
def url(self):
return self.get_url(TEST_SSH_USER, self.server.port)
@cached_property
def config(self):
return {
"url": self.url,
"keyfile": TEST_SSH_KEY_PATH,
}
@pytest.fixture
def ssh_server():
import mockssh
users = {TEST_SSH_USER: TEST_SSH_KEY_PATH}
with mockssh.Server(users) as s:
yield s
@pytest.fixture
def ssh_connection(ssh_server):
from dvc.remote.ssh.connection import SSHConnection
yield SSHConnection(
host=ssh_server.host,
port=ssh_server.port,
username=TEST_SSH_USER,
key_filename=TEST_SSH_KEY_PATH,
)
@pytest.fixture
def ssh(ssh_server, monkeypatch):
from dvc.remote.ssh import SSHRemoteTree
# NOTE: see http://github.com/iterative/dvc/pull/3501
monkeypatch.setattr(SSHRemoteTree, "CAN_TRAVERSE", False)
return SSHMocked(ssh_server)
@pytest.fixture
def ssh_remote(tmp_dir, dvc, ssh):
tmp_dir.add_remote(config=ssh.config)
yield ssh
|
[
"noreply@github.com"
] |
backwardn.noreply@github.com
|
3e59e5eacd253275ed1c9ad571b36ac64bf88496
|
ae5af2cadc237e18a23d1e4dd3d4a89d2df544d2
|
/ansible_syntax_check.py
|
f68dd7973e702cf7924082ee5012825f9999164e
|
[] |
no_license
|
balasaajay/python_ops_automation
|
96a1f31129040f7aa7a026f127865a6b09fbe433
|
49cf0fec69ad43529e749a91bb34ebeec38bd88b
|
refs/heads/develop
| 2021-01-20T12:12:32.696309
| 2016-06-12T07:06:38
| 2016-06-12T07:06:38
| 59,611,419
| 1
| 1
| null | 2016-05-26T05:19:22
| 2016-05-24T21:56:25
|
Python
|
UTF-8
|
Python
| false
| false
| 362
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# syntax check for an ansible yaml
import yaml
import sys
try:
playbook = yaml.load(open('/path/to/playbook.yml','r')) # Update with path to your playbook
except:
print "Error loading the ansible-playbook, must be a yaml syntax problem"
sys.exit(1)
else:
print "YAML syntax looks good."
|
[
"ajaybre1987@gmail.com"
] |
ajaybre1987@gmail.com
|
ac58df38bea86bc7de2141fe27a069d62b652058
|
74d8b2f864e0e6656c41697e9d2ea180036924ba
|
/0416/0416/ntust/mysite/cms/models.py
|
d44e7259f00997dc43412b00a9e05a71fcb4cc73
|
[] |
no_license
|
ssandylin/JerryHW
|
da124e249d5726668b66ae835b3a8be360512a93
|
313867d4320d70d9f848680128822ae7152799e0
|
refs/heads/master
| 2022-12-06T22:47:56.369762
| 2018-06-04T04:25:20
| 2018-06-04T04:25:20
| 125,645,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
from django.db import models
class person(models.Model):
name = models.CharField(max_length=10)
birthday = models.CharField(max_length=10)
is_girl = models.BooleanField(default=0)
def __str__(self):
return self.name
# Create your models here.
|
[
"37054703+ssandylin@users.noreply.github.com"
] |
37054703+ssandylin@users.noreply.github.com
|
ce1852be828f534ad3ec8cc0ebe37b8417b276a9
|
51899c24fe2ab1ee1a0dd40b58867532f5428004
|
/DC/main.py
|
f5e64c5b20c5ab51afe661e607e07513c179847f
|
[] |
no_license
|
763272955/python
|
a2d07ee37d31c919d0fecb0a798a87bc024b6970
|
b1952ee702227d5624a76410e365dfc38e9beb5c
|
refs/heads/master
| 2021-05-11T11:33:55.246035
| 2018-01-16T06:20:28
| 2018-01-16T06:20:28
| 117,638,934
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,288
|
py
|
# -*- coding:utf-8 -*-
import os
import urlparse
from methods import google_
from methods import see_page
import sys
reload(sys)
sys.setdefaultencoding("utf-8" )
class DC_Main(object):
def __init__(self, url):
self.url = url
self.keyword = []
self.google = ['inurl:', 'site:']
def get_Keyword(self):
file = open('keyword.txt', 'r')
key = file.readlines()
for k in key:
k = k.replace('\n', '')
if k == '':
continue
self.keyword.append(k)
def run(self):
self.get_Keyword()
e_main = ''
print u'====== 检测网站 ======'
print 'url: %s' % self.url
try:
obj = see_page.See_MainPage(self.url, self.keyword)
reason = obj.run()
except IOError, e:
e_main = e.message
print '_______________________'
try:
obj = google_.Google(self.url, self.google, self.keyword)
print u'== google搜索暗链地址 =='
dc = obj.run()
if e_main == '':
if len(dc) == 0 and reason == False:
file = open('output/no.txt', 'a+')
file.write(self.url + '\n')
file.close()
elif len(dc) !=0 and reason == False:
file = open('output/' + urlparse.urlparse(self.url).netloc + '.txt', 'a+')
file.write(u"主页未发现暗链" + '\n')
file.write("________________________________" + '\n')
for x in dc:
file.write(x + '\n')
file.close()
else:
file = open('output/' + urlparse.urlparse(self.url).netloc + '.txt', 'a+')
file.write("________________________________" + '\n')
for x in dc:
file.write(x + '\n')
file.close()
else:
if len(dc) != 0:
file = open('output/' + urlparse.urlparse(self.url).netloc + '.txt', 'a+')
file.write(u"主页连接失败, 原因: %s" % e_main + '\n')
file.write("________________________________" + '\n')
for x in dc:
file.write(x + '\n')
file.close()
else:
file = open('output/no.txt', 'a+')
file.write(self.url + '\n')
file.close()
print '_______________________'
except IOError, e:
e_google = e.message
print e_google
if __name__ == "__main__":
file = open('url.txt', 'r')
for url in file.readlines():
url = url.replace('\n', '')
if url == '':
continue
url_parser = urlparse.urlparse(url)
if url_parser.scheme != '':
url = url_parser.netloc
if os.path.exists('output/' + url + '.txt'):
file = open('output/' + url + '.txt', 'w')
file.write('')
file.close()
url_parser = urlparse.urlparse(url)
if url_parser.scheme == '':
url = 'http://' + url
DC_Main(url).run()
file.close()
|
[
"33079717+763272955@users.noreply.github.com"
] |
33079717+763272955@users.noreply.github.com
|
f7f5297ba162fe5008c618168f67f3604586d3e7
|
2dd26e031162e75f37ecb1f7dd7f675eeb634c63
|
/scripts/speech_recognition/confidence/benchmark_asr_confidence.py
|
8922fe09176db4b0b0767adb7d1e7ad616a721cd
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/NeMo
|
1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1
|
c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7
|
refs/heads/main
| 2023-08-21T15:28:04.447838
| 2023-08-21T00:49:36
| 2023-08-21T00:49:36
| 200,722,670
| 7,957
| 1,986
|
Apache-2.0
| 2023-09-14T18:49:54
| 2019-08-05T20:16:42
|
Python
|
UTF-8
|
Python
| false
| false
| 12,042
|
py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from dataclasses import dataclass, is_dataclass
from pathlib import Path
from typing import Optional
import pytorch_lightning as pl
import torch
from omegaconf import MISSING, OmegaConf
from sklearn.model_selection import ParameterGrid
from nemo.collections.asr.metrics.rnnt_wer import RNNTDecodingConfig
from nemo.collections.asr.metrics.wer import CTCDecodingConfig
from nemo.collections.asr.models import ASRModel, EncDecRNNTModel
from nemo.collections.asr.parts.utils.asr_confidence_benchmarking_utils import (
apply_confidence_parameters,
run_confidence_benchmark,
)
from nemo.collections.asr.parts.utils.asr_confidence_utils import ConfidenceConfig
from nemo.core.config import hydra_runner
from nemo.utils import logging
"""
Get confidence metrics and curve plots for a given model, dataset, and confidence parameters.
# Arguments
model_path: Path to .nemo ASR checkpoint
pretrained_name: Name of pretrained ASR model (from NGC registry)
dataset_manifest: Path to dataset JSON manifest file (in NeMo format)
output_dir: Output directory to store a report and curve plot directories
batch_size: batch size during inference
num_workers: number of workers during inference
cuda: Optional int to enable or disable execution of model on certain CUDA device
amp: Bool to decide if Automatic Mixed Precision should be used during inference
audio_type: Str filetype of the audio. Supported = wav, flac, mp3
target_level: Word- or token-level confidence. Supported = word, token, auto (for computing both word and token)
confidence_cfg: Config with confidence parameters
grid_params: Dictionary with lists of parameters to iteratively benchmark on
# Usage
ASR model can be specified by either "model_path" or "pretrained_name".
Data for transcription are defined with "dataset_manifest".
Results are returned as a benchmark report and curve plots.
python benchmark_asr_confidence.py \
model_path=null \
pretrained_name=null \
dataset_manifest="" \
output_dir="" \
batch_size=64 \
num_workers=8 \
cuda=0 \
amp=True \
target_level="word" \
confidence_cfg.exclude_blank=False \
'grid_params="{\"aggregation\": [\"min\", \"prod\"], \"alpha\": [0.33, 0.5]}"'
"""
def get_experiment_params(cfg):
"""Get experiment parameters from a confidence config and generate the experiment name.
Returns:
List of experiment parameters.
String with the experiment name.
"""
blank = "no_blank" if cfg.exclude_blank else "blank"
aggregation = cfg.aggregation
method_name = cfg.measure_cfg.name
alpha = cfg.measure_cfg.alpha
if method_name == "entropy":
entropy_type = cfg.measure_cfg.entropy_type
entropy_norm = cfg.measure_cfg.entropy_norm
experiment_param_list = [
aggregation,
str(cfg.exclude_blank),
method_name,
entropy_type,
entropy_norm,
str(alpha),
]
experiment_str = "-".join([aggregation, blank, method_name, entropy_type, entropy_norm, str(alpha)])
else:
experiment_param_list = [aggregation, str(cfg.exclude_blank), method_name, "-", "-", str(alpha)]
experiment_str = "-".join([aggregation, blank, method_name, str(alpha)])
return experiment_param_list, experiment_str
@dataclass
class ConfidenceBenchmarkingConfig:
# Required configs
model_path: Optional[str] = None # Path to a .nemo file
pretrained_name: Optional[str] = None # Name of a pretrained model
dataset_manifest: str = MISSING
output_dir: str = MISSING
# General configs
batch_size: int = 32
num_workers: int = 4
# Set `cuda` to int to define CUDA device. If 'None', will look for CUDA
# device anyway, and do inference on CPU only if CUDA device is not found.
# If `cuda` is a negative number, inference will be on CPU only.
cuda: Optional[int] = None
amp: bool = False
audio_type: str = "wav"
# Confidence configs
target_level: str = "auto" # Choices: "word", "token", "auto" (for both word- and token-level confidence)
confidence_cfg: ConfidenceConfig = ConfidenceConfig(preserve_word_confidence=True, preserve_token_confidence=True)
grid_params: Optional[str] = None # a dictionary with lists of parameters to iteratively benchmark on
@hydra_runner(config_name="ConfidenceBenchmarkingConfig", schema=ConfidenceBenchmarkingConfig)
def main(cfg: ConfidenceBenchmarkingConfig):
torch.set_grad_enabled(False)
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
if cfg.model_path is None and cfg.pretrained_name is None:
raise ValueError("Both cfg.model_path and cfg.pretrained_name cannot be None!")
# setup GPU
if cfg.cuda is None:
if torch.cuda.is_available():
device = [0] # use 0th CUDA device
accelerator = 'gpu'
else:
device = 1
accelerator = 'cpu'
else:
device = [cfg.cuda]
accelerator = 'gpu'
map_location = torch.device('cuda:{}'.format(device[0]) if accelerator == 'gpu' else 'cpu')
# setup model
if cfg.model_path is not None:
# restore model from .nemo file path
model_cfg = ASRModel.restore_from(restore_path=cfg.model_path, return_config=True)
classpath = model_cfg.target # original class path
imported_class = model_utils.import_class_by_path(classpath) # type: ASRModel
logging.info(f"Restoring model : {imported_class.__name__}")
asr_model = imported_class.restore_from(
restore_path=cfg.model_path, map_location=map_location
) # type: ASRModel
else:
# restore model by name
asr_model = ASRModel.from_pretrained(
model_name=cfg.pretrained_name, map_location=map_location
) # type: ASRModel
trainer = pl.Trainer(devices=device, accelerator=accelerator)
asr_model.set_trainer(trainer)
asr_model = asr_model.eval()
# Check if ctc or rnnt model
is_rnnt = isinstance(asr_model, EncDecRNNTModel)
# Check that the model has the `change_decoding_strategy` method
if not hasattr(asr_model, 'change_decoding_strategy'):
raise RuntimeError("The asr_model you are using must have the `change_decoding_strategy` method.")
# get filenames and reference texts from manifest
filepaths = []
reference_texts = []
if os.stat(cfg.dataset_manifest).st_size == 0:
logging.error(f"The input dataset_manifest {cfg.dataset_manifest} is empty. Exiting!")
return None
manifest_dir = Path(cfg.dataset_manifest).parent
with open(cfg.dataset_manifest, 'r') as f:
for line in f:
item = json.loads(line)
audio_file = Path(item['audio_filepath'])
if not audio_file.is_file() and not audio_file.is_absolute():
audio_file = manifest_dir / audio_file
filepaths.append(str(audio_file.absolute()))
reference_texts.append(item['text'])
# setup AMP (optional)
autocast = None
if cfg.amp and torch.cuda.is_available() and hasattr(torch.cuda, 'amp') and hasattr(torch.cuda.amp, 'autocast'):
logging.info("AMP enabled!\n")
autocast = torch.cuda.amp.autocast
# do grid-based benchmarking if grid_params is provided, otherwise a regular one
work_dir = Path(cfg.output_dir)
os.makedirs(work_dir, exist_ok=True)
report_legend = (
",".join(
[
"model_type",
"aggregation",
"blank",
"method_name",
"entropy_type",
"entropy_norm",
"alpha",
"target_level",
"auc_roc",
"auc_pr",
"auc_nt",
"nce",
"ece",
"auc_yc",
"std_yc",
"max_yc",
]
)
+ "\n"
)
model_typename = "RNNT" if is_rnnt else "CTC"
report_file = work_dir / Path("report.csv")
if cfg.grid_params:
asr_model.change_decoding_strategy(
RNNTDecodingConfig(fused_batch_size=-1, strategy="greedy_batch", confidence_cfg=cfg.confidence_cfg)
if is_rnnt
else CTCDecodingConfig(confidence_cfg=cfg.confidence_cfg)
)
params = json.loads(cfg.grid_params)
hp_grid = ParameterGrid(params)
hp_grid = list(hp_grid)
logging.info(f"==============================Running a benchmarking with grid search=========================")
logging.info(f"Grid search size: {len(hp_grid)}")
logging.info(f"Results will be written to:\nreport file `{report_file}`\nand plot directories near the file")
logging.info(f"==============================================================================================")
with open(report_file, "tw", encoding="utf-8") as f:
f.write(report_legend)
f.flush()
for i, hp in enumerate(hp_grid):
logging.info(f"Run # {i + 1}, grid: `{hp}`")
asr_model.change_decoding_strategy(apply_confidence_parameters(asr_model.cfg.decoding, hp))
param_list, experiment_name = get_experiment_params(asr_model.cfg.decoding.confidence_cfg)
plot_dir = work_dir / Path(experiment_name)
results = run_confidence_benchmark(
asr_model,
cfg.target_level,
filepaths,
reference_texts,
cfg.batch_size,
cfg.num_workers,
plot_dir,
autocast,
)
for level, result in results.items():
f.write(f"{model_typename},{','.join(param_list)},{level},{','.join([str(r) for r in result])}\n")
f.flush()
else:
asr_model.change_decoding_strategy(
RNNTDecodingConfig(fused_batch_size=-1, strategy="greedy_batch", confidence_cfg=cfg.confidence_cfg)
if is_rnnt
else CTCDecodingConfig(confidence_cfg=cfg.confidence_cfg)
)
param_list, experiment_name = get_experiment_params(asr_model.cfg.decoding.confidence_cfg)
plot_dir = work_dir / Path(experiment_name)
logging.info(f"==============================Running a single benchmarking===================================")
logging.info(f"Results will be written to:\nreport file `{report_file}`\nand plot directory `{plot_dir}`")
with open(report_file, "tw", encoding="utf-8") as f:
f.write(report_legend)
f.flush()
results = run_confidence_benchmark(
asr_model,
cfg.batch_size,
cfg.num_workers,
cfg.target_level,
filepaths,
reference_texts,
plot_dir,
autocast,
)
for level, result in results.items():
f.write(f"{model_typename},{','.join(param_list)},{level},{','.join([str(r) for r in result])}\n")
logging.info(f"===========================================Done===============================================")
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
NVIDIA.noreply@github.com
|
5382140e9fa0a39c22d11d9d2e26a67d161e55f9
|
396787df1b472ddfab7d934c149b150352342f03
|
/python_fundemental/80_Consecutive_Numbers_Sum.py
|
621b036d6cad85320f5dc18d8916f11e8fd3ff97
|
[] |
no_license
|
Deanwinger/python_project
|
a47b50a9dfc88853a5557da090b0a2ac3f3ce191
|
8c0c2a8bcd51825e6902e4d03dabbaf6f303ba83
|
refs/heads/master
| 2022-07-10T16:41:56.853165
| 2019-07-21T13:08:48
| 2019-07-21T13:08:48
| 107,653,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39
|
py
|
# leetcode 829. Consecutive Numbers Sum
|
[
"a541203951@163.com"
] |
a541203951@163.com
|
aaf603f12269c28ff5c4caaab8ec16a82eecd936
|
29dfec7f0cccfba38d1fcb3eca586284290e3211
|
/data_pre-processing.py
|
5c339f05a4a43d4877f597ecf4bdb98ee00e1a85
|
[] |
no_license
|
anondo1969/baseline-emotion-recognizer
|
a85855bcc2a0810411ac7fff2260dfbc457f66cf
|
f53acdc5b5c79a3b989a9ebfbb27fe83b8df87a8
|
refs/heads/master
| 2020-12-30T06:30:16.646665
| 2016-06-25T19:29:09
| 2016-06-25T19:29:09
| 238,893,378
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,982
|
py
|
#=====================================================================================
#this script will eliminate the unwanted portions of feature file-
#generated by OpenSmile and store them in Numpy array formatted text files.
# Written by Mahbub Ul Alam
#=====================================================================================
#!/usr/bin/python
import os
import sys
#location of the feature file generated from openSmile.
#if you are using newer version of python then please use "input" instead of "raw_input" .
file_path = raw_input("Enter OpenSmile generated Feature containing CSV File location: ")
#if K-fold cross-validation is needed to be performed on data
#then please select All (A).
data_type = raw_input("Enter output Data Type, T for Training, D for Development, A for All: ")
# processed data will be saved in text files.
if(data_type=="T"):
output_file_name = open("training_data.txt",'w')
print("Data will be generated shortly, please check training_data.txt")
elif(data_type=="D"):
output_file_name = open("development_data.txt",'w')
print("Data will be generated shortly, please check development_data.txt file")
else:
output_file_name = open("processed_data.txt",'w')
print("Data will be generated shortly, please check processed_data.txt file")
sys.stdout = output_file_name
#data will be saved in Numpy array format
input_file = open(file_path)
count=-1
token=-1
for line in input_file.read().split('\n'):
count=-1
token=-1
if(line!='') :
if(line[0]!='@'):
for value in line.split(","):
count+=1
if(count==0):
for word in value.split("_"):
token+=1
if(token==0):
break
break;
line = line.replace(value,word)
line = line.replace('\'','')
line = line.replace(",?",'')
print (line)
|
[
"alammb@ims.uni-stuttgart.de"
] |
alammb@ims.uni-stuttgart.de
|
ace9b59b193f5bf5eee08edf92be7370d22caaa6
|
d437120d191e37691f9ec824e753faa05ddb3b31
|
/Practice/Interview/6.从尾到头打印链表.py
|
14b9ca41a2ab57f23f0188834edd6fed59ef760f
|
[] |
no_license
|
ICESDHR/Bear-and-Pig
|
c1153345c13ec52f0b000acccede773ad2421ad4
|
2fb98240258de285b43eae92c187bf36372c9668
|
refs/heads/master
| 2022-03-06T09:35:35.164032
| 2022-02-24T08:49:23
| 2022-02-24T08:49:23
| 127,283,094
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 702
|
py
|
# -*- coding:utf-8 -*-
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def PrintListFromTailToHead(listNode):
if listNode != None:
temp = [listNode.val]
nodes = listNode
else:
return []
while nodes.next != None:
temp.append(nodes.next.val)
nodes = nodes.next
return temp[::-1]
# 使用递归
def PrintListFromTailToHead2(listNode):
if listNode != None:
return PrintListFromTailToHead2(listNode.next)+[listNode.val]
else:
return []
if __name__ == '__main__':
listNode = ListNode(5)
listNode.next = ListNode(7)
ans = PrintListFromTailToHead2(listNode)
print(ans)
|
[
"smilewangyizhe@163.com"
] |
smilewangyizhe@163.com
|
bdad26231a915897f8611a7c72d2e4aad1eaed8a
|
2c3f5692e50fa4e7c7561faced7501650de7f83d
|
/App.py + csvfiles/app.py
|
4f1f33aaa8a07f6e115c8b3bd3e6edeb1026d6ba
|
[] |
no_license
|
cageofan21/sqlalchemy-challenge
|
6c4a9c764d589a52278d77f59dc45a9f2b9190f7
|
f3313f8055d2fdd909b48a9c1f87ff776d387ace
|
refs/heads/master
| 2020-12-28T13:57:28.520671
| 2020-04-09T07:59:49
| 2020-04-09T07:59:49
| 238,359,600
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,109
|
py
|
import numpy as np
import pandas as pd
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///Hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
station = Base.classes.station
measurement = Base.classes.measurement
##########################################
# Flask Setup
##########################################
session = Session(engine)
app = Flask(__name__)
##########################################
# Flask Routes
##########################################
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start<br/>"
f"/api/v1.0/start-end"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
# Create our session (link) from Python to the DB
session = Session(engine)
last_12months = (dt.date(2017, 8, 23)) - (dt.timedelta(days=365))
lastyr_precip = session.query(measurement.date, func.avg(measurement.prcp)).\
filter(measurement.date >= last_12months).\
group_by(measurement.date).all()
all_precip = []
for date, prcp in lastyr_precip:
rain_dict = {}
rain_dict["date"] = date
rain_dict["prcp"] = prcp
all_precip.append(rain_dict)
return jsonify(all_precip)
@app.route("/api/v1.0/stations")
def stations():
station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
results = session.query(station.station, station.name).all()
stations_list = []
for station, name in results:
stations_dict = {}
stations_dict["station"] = station
stations_dict["name"] = name
stations_list.append(stations_dict)
return jsonify(stations_list)
@app.route("/api/v1.0/tobs")
def tobs():
# Create our session (link) from Python to the DB
session = Session(engine)
last_12months = (dt.date(2017, 8, 23)) - (dt.timedelta(days=365))
tobs_observ = session.query(measurement.date, measurement.tobs).\
filter(measurement.date >= last_12months).\
group_by(measurement.date).all()
tobs_list = []
for date, tobs in tobs_observ:
tobs_dict = {}
tobs_dict["date"] = date
tobs_dict["tobs"] = tobs
tobs_list.append(tobs_dict)
return jsonify(tobs_list)
@app.route("/api/v1.0/start")
def temp (start = "2016-08-23"):
# Create our session (link) from Python to the DB
session = Session(engine)
# Given start date, to calculate all dates greater than and equal to the start date.
start = "2016-08-23"
temp_start = session.query(measurement.date, func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\
filter(measurement.date >= start).group_by(measurement.date).all()
tempstart_list=list(temp_start)
return jsonify(tempstart_list)
@app.route("/api/v1.0/start-end")
def tempend(start = "2016-08-23", end = "2017-08-23"):
# Create our session (link) from Python to the DB
session = Session(engine)
# Given start date, to calculate for dates between the start and end date inclusive.
start = "2016-08-23"
end = "2017-08-23"
temp_start1 = session.query(measurement.date, func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\
filter(measurement.date >= start).filter(measurement.date <= end).group_by(measurement.date).all()
tempstart_list1=list(temp_start1)
return jsonify(tempstart_list1)
if __name__ == '__main__':
app.run(debug=True)
|
[
"adib.cena@gmail.com"
] |
adib.cena@gmail.com
|
093b920439e7837b4a8f668ec9e2bfda1feeb331
|
12ce225916bbee1c4fc83104478a695a9afc8b4f
|
/user/migrations/0001_initial.py
|
6400393f1988638c361642e57fe4802875a179d8
|
[] |
no_license
|
suman-kr/prism
|
4985711edbd7f178663e7de8034c13f378777034
|
7b731304bd1a441c29d87edb0944e95026e125a1
|
refs/heads/master
| 2022-12-21T21:37:56.697211
| 2020-09-25T08:58:28
| 2020-09-25T08:58:28
| 298,069,027
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,724
|
py
|
# Generated by Django 3.1.1 on 2020-09-21 15:15
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=255, unique=True, verbose_name='email address')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('contact', models.CharField(max_length=50, verbose_name='Phone Number')),
('h_no', models.CharField(max_length=50, verbose_name='House/Flat No')),
('street_one', models.CharField(blank=True, max_length=50, null=True)),
('street_two', models.CharField(blank=True, max_length=50, null=True)),
('city', models.CharField(max_length=50)),
('state', models.CharField(max_length=50)),
('pin', models.CharField(max_length=50, verbose_name='Pincode')),
('roles', models.CharField(choices=[('ADV', 'Advertiser'), ('PTR', 'Partner')], max_length=3)),
('is_active', models.BooleanField(default=True)),
('is_admin', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
]
|
[
"skcool.123bgp@gmail.com"
] |
skcool.123bgp@gmail.com
|
acb4d49de13e8c5b8f71ab3a8a6a31918cebbe30
|
191969ffc6f6cd164964129f2a986c9f3a3c045e
|
/05day/07-变量.py
|
a635c8e36ead4c3308e5ed749fbe5468dd1d8aa5
|
[] |
no_license
|
2001128/p1805
|
72844bde17b23de85a60308253da8a39750659fb
|
65538d83229639c237ea6de56dc2105bd3ca06a2
|
refs/heads/master
| 2020-03-19T11:32:06.878033
| 2018-06-28T02:50:36
| 2018-06-28T02:50:36
| 136,460,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
# = 赋值运算符
a=2
b=5
c=a+b
print(c)
d=a-b
print(d)
e=a*b
print(e)
f=a/b
print(f)
g=a//b
print(g)
h=a%b
print(h)
i=a**b
print(i)
|
[
"335775879@qq.com"
] |
335775879@qq.com
|
9cdb66c2b0c31cb1f388722770337ffa10ab9818
|
743be419d9af6be760a4c9754a9fb946b84827ec
|
/videos/migrations/0003_video_share_message.py
|
55f26e1f4ad49f07086cc256cd27a73eda0033cb
|
[] |
no_license
|
mathbeal/videomembership-django
|
f76c9debaef1b00171d79e8fd1e9409e24705f68
|
3fa779458197a245aacb82d00ff0e7102a3cb831
|
refs/heads/master
| 2021-06-04T20:13:38.457946
| 2016-09-13T13:37:05
| 2016-09-13T13:37:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-22 13:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('videos', '0002_auto_20160219_1433'),
]
operations = [
migrations.AddField(
model_name='video',
name='share_message',
field=models.TextField(default='\nCheck out this video!\n'),
),
]
|
[
"leo.maltrait@gmail.com"
] |
leo.maltrait@gmail.com
|
334259432a3f79cae1881f83df692c4d55ebd401
|
241dc11ca83565b0e4626277c2b4226d2bb2a7d0
|
/Dhein_Elegans_Projects/Code/draw_plots.py
|
61d7ef9adef8bc5e7aa4d1770432780294a25181
|
[] |
no_license
|
SES591/C.-elegans
|
7badaaf0317e6b5f67fd41e6a9d867d2f569a2cd
|
08f0ef49f7002dd4847b27c7dc9afac8e75da989
|
refs/heads/master
| 2016-08-12T13:39:38.032623
| 2016-05-05T23:26:30
| 2016-05-05T23:26:30
| 50,062,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,149
|
py
|
#!/usr/bin/python
#bionetworks.py
#last update : 14 Aug 2014
__author__ = '''Hyunju Kim'''
import networkx as nx
import os
import sys
import random as ran
from math import log
from optparse import OptionParser, OptionGroup
from scipy import *
from collections import defaultdict
import matplotlib.pyplot as plt
#from info_measure import *
import itertools
from pylab import *
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, HPacker
import operator
import copy
def a_line(xlist, ylist, axis_labels, file_name):
plt.figure(figsize=(12,8))
plt.plot(xlist, ylist, '-o')
plt.xticks(xlist, axis_labels, rotation='vertical')
plt.grid()
plt.margins(0.2)
# Tweak spacing to prevent clipping of tick-labels
plt.subplots_adjust(bottom=0.25)
plt.savefig(file_name)
plt.show()
def plot_AI_scale(dictO, result_file_name, viz_file_name):
dictA = copy.deepcopy(dictO)
#print "DDDDDDD"
xlist = [x for x in range(len(dictA.keys()))]
#list_node_names = list(dicA.keys())
dictA_values = list(dictA.values())
sorted_dicA_values = sorted(dictA_values)
sorted_dicA_values.reverse()
ylist = sorted_dicA_values
axis_labels = []
for u in ylist:
for i, j in dictA.iteritems():
if j == u:
axis_labels.append(i)
del dictA[i]
break
result_file = open(result_file_name, 'w')
for i in range(len(xlist)):
result_file.write('%s\t%f\n'%(axis_labels[i], ylist[i]))
plt.figure(figsize=(12,8))
plt.plot(xlist, ylist, '-o')
plt.xticks(xlist, axis_labels, rotation='vertical')
plt.grid()
plt.margins(0.2)
# Tweak spacing to prevent clipping of tick-labels
plt.subplots_adjust(bottom=0.25)
plt.savefig(viz_file_name)
#plt.show()
def plot_TE_scale(dictO, result_file_name, viz_file_name):
dictA = copy.deepcopy(dictO)
#print "DDDDDDD"
xlist = [x for x in range(len(dictA.keys()))]
#list_node_names = list(dicA.keys())
dictA_values = list(dictA.values())
sorted_dicA_values = sorted(dictA_values)
sorted_dicA_values.reverse()
ylist = sorted_dicA_values
axis_labels = []
for u in ylist:
for i, j in dictA.iteritems():
if j == u:
axis_labels.append(i)
del dictA[i]
break
result_file = open(result_file_name, 'w')
for i in range(len(xlist)):
result_file.write('%s\t%f\n'%(axis_labels[i], ylist[i]))
plt.figure(figsize=(12,8))
plt.plot(xlist, ylist, '-o')
#plt.xticks(xlist, axis_labels, rotation='vertical')
plt.grid()
plt.margins(0.2)
# Tweak spacing to prevent clipping of tick-labels
plt.subplots_adjust(bottom=0.25)
plt.savefig(viz_file_name)
#plt.show()
def a_line_nolabel(xlist, ylist, file_name):
plt.figure(figsize=(12,8))
plt.plot(xlist, ylist, '-o')
plt.grid()
plt.margins(0.2)
# Tweak spacing to prevent clipping of tick-labels
plt.subplots_adjust(bottom=0.25)
plt.savefig(file_name)
#plt.show()
def heatmap(nodes_list, hpcell, output_file_name):
xlabels = list(nodes_list)
# hpcell = {}
# print input_file_name
# input_file = open(input_file_name, 'r')
# for line in input_file:
# items = [x.strip() for x in line.rstrip().split('\t')]
# ynode = items[0]
# xnode = items[1]
# hpcell[(ynode, xnode)] = float(items[2])
M = []
for ynode in xlabels:
rM = []
for xnode in xlabels:
rM.append(hpcell[(ynode, xnode)])
M.append(rM)
M = np.array(M)
fig1 = plt.figure(figsize=(10,8))
plt.yticks(np.arange(len(xlabels))+0.5, xlabels, size = 15, rotation=0, va="center", ha="right")
plt.xticks(np.arange(len(xlabels))+0.5, xlabels, size = 15, rotation=90, va="top", ha="center")
ax1 = fig1.add_subplot(111)
cax1=ax1.pcolor(M, cmap=plt.cm.OrRd)
plt.gca().set_aspect('equal')
#ax1.set_title('Jaccard index of 27 pathways (edges)', size = 30)
fig1.colorbar(cax1)
plt.subplots_adjust(bottom=0.3)
plt.savefig(output_file_name)
#plt.show()
|
[
"Kelle Dhein"
] |
Kelle Dhein
|
adbc022b93ab8d6fd6995816263a766c5680a84f
|
c2640725115d0d62fd815539b9f3d33006586b81
|
/VertexCover.py
|
0cd9697cf61a71b297187fb1d0cf27b8323b1396
|
[] |
no_license
|
GiridharaSPK/Advanced-Analysis-and-Design-of-Algorithms
|
79f91241022075ae604ab287f1505b6c13ccc3be
|
f189d4c99d48b1dd71dd00b9b896c3e0cafbc348
|
refs/heads/master
| 2021-10-29T02:16:43.225151
| 2021-10-19T07:42:30
| 2021-10-19T07:42:30
| 143,556,483
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 738
|
py
|
from pulp import *
A= [[0,1,1,0,0,0,0],
[1,0,1,1,0,0,0],
[1,1,0,0,1,0,1],
[0,1,0,0,0,1,0],
[0,0,1,0,0,1,0],
[0,0,0,1,1,0,1],
[0,0,0,0,0,1,0]
]
prob = LpProblem("VertexCover",LpMinimize)
variables=[]
for i in range(len(A[0])):
variables.append(LpVariable("A_{}".format(i),0,1))
for i in range(len(A[0])):
prob+=lpSum(variables)
#constraints
for i in range(len(A[0])):
for j in range(len(A[0])):
if A[i][j]==1:
prob+=variables[i]+variables[j]>=1
prob.writeLP("VertexCover.lp")
prob.solve()
print("Status:", LpStatus[prob.status])
for v in prob.variables():
print(v.name," : ", v.varValue)
print("objective=", value(prob.objective))
|
[
"noreply@github.com"
] |
GiridharaSPK.noreply@github.com
|
7649769d02263db44beb3e9dabb1074a73afe3bd
|
76a3099faad6720ad1d3f7595cb7fbe660c640cc
|
/reverse.py
|
c0aac686a9a199e07d69b63bab55deabea0b3bf1
|
[] |
no_license
|
AcidPenguin/learning-python
|
fab019020c74945cac9f7dae804272f80932e539
|
8b9282863c33e6f708caf394564c9ece7ad6f023
|
refs/heads/master
| 2021-07-10T12:22:25.533284
| 2020-07-22T22:25:36
| 2020-07-22T22:25:36
| 168,377,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
# print what ever results from the brackets
#
print(" ".join(input().split()[::-1]))
|
[
"addictedpenguins@icloud.com"
] |
addictedpenguins@icloud.com
|
7e47cc01ebb88fb90a91148d4b000cd84c660f05
|
47b6ca131212a3e434b701d5e67d1d2e49ab21b2
|
/Exercise 5.py
|
5afc6bdd2ddaa97d4805057984e4084b7996a05a
|
[] |
no_license
|
ZarkoHDS/MuhammadYusuf_ITP2017_Exercise5
|
472110787f6f9a6fe9d3396113d3bafb0abac97d
|
49fb3ed3d56c3a603051230b6dba6733b90b877f
|
refs/heads/master
| 2021-07-10T12:06:53.856432
| 2017-10-08T11:09:47
| 2017-10-08T11:09:47
| 106,169,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 569
|
py
|
#exercise 5
x=0
def calculator(num1,num2,operator,x):
if operator == "+":
x=num1 + num2
elif operator == "-":
x=num1 - num2
elif operator == "*":
x=num1 * num2
elif operator == "/":
x=num1 / num2
else:
x = num1+num2
if format == "integer":
print(int(x))
elif format == "float":
print(float(x))
operator=input("Insert Your symbols of math :")
num1=int(input("Insert First Number :"))
num2=int(input("Insert Second Number :"))
format=input("Format :")
calculator(num1,num2,operator,x)
|
[
"muhammad.andiyusuf@ymail.com"
] |
muhammad.andiyusuf@ymail.com
|
d7263ad2898835a25b3012e0164c9c1abf279507
|
4c9bfaa5f83d76b00db63f14e5c8484a58ed7d99
|
/ll_env/bin/django-admin
|
f8372abe329761312875b9b51ea2a766734fff95
|
[] |
no_license
|
davidthurman/My-Website
|
0fb14ed4aa45ea905f259c5091fef41f5c2bea2e
|
55bd5aa2e72e2d811da3f01f5a97b2715ad16c1b
|
refs/heads/master
| 2020-12-01T05:27:25.521149
| 2016-10-01T15:27:52
| 2016-10-01T15:27:52
| 67,652,235
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
#!/Users/davidthurman/Desktop/Python/Django/My-Website/ll_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"davidthurmanwork@gmail.com"
] |
davidthurmanwork@gmail.com
|
|
a702e97955980bf12bf92cfbc414efd0e9a8c369
|
35d1b6ba84ffcc682e361e929bfb3b81caf0402f
|
/invenio_records/systemfields/model.py
|
efc74c3cf19daa7ce1f4a3d4d60900de4ba83da4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ppanero/invenio-records
|
59f87f43f02760f4c16d39954e062843b9b545fc
|
b189ce21d89f2e186d51a368931d5ab3e721ffae
|
refs/heads/master
| 2022-09-14T19:34:26.877056
| 2022-03-17T09:13:43
| 2022-03-17T09:13:43
| 179,234,572
| 0
| 0
|
MIT
| 2019-04-03T07:28:48
| 2019-04-03T07:28:47
| null |
UTF-8
|
Python
| false
| false
| 2,645
|
py
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2020 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Constant system field."""
from ..dictutils import dict_lookup
from .base import SystemField
class ModelField(SystemField):
"""Model field for providing get and set access on a model field."""
def __init__(self, model_field_name=None, dump=True, dump_key=None,
dump_type=None):
"""Initialize the field.
:param model_field_name: Name of field on the database model.
:param dump: Set to false to not dump the field.
:param dump_key: The dictionary key to use in dumps.
:param dump_type: The data type used to determine how to serialize the
model field.
"""
self._model_field_name = model_field_name
self.dump = dump
self._dump_key = dump_key
self._dump_type = dump_type
#
# Helpers
#
@property
def model_field_name(self):
"""The name of the SQLAlchemy field on the model.
Defaults to the attribute name used on the class.
"""
return self._model_field_name or self.attr_name
@property
def dump_key(self):
"""The dictionary key to use in dump output.
Note, it's up to the dumper to choose if it respects this name.
The name defaults to the model field name.
"""
return self._dump_key or self.model_field_name
@property
def dump_type(self):
"""The data type used to determine how to serialize the model field.
Defaults to none, meaning the dumper will determine how to dump it.
"""
return self._dump_type
def _set(self, model, value):
"""Internal method to set value on the model's field."""
setattr(model, self.model_field_name, value)
#
# Data descriptor
#
def __get__(self, record, owner=None):
"""Accessing the attribute."""
# Class access
if record is None:
return self
# Instance access
try:
return getattr(record.model, self.model_field_name)
except AttributeError:
return None
def __set__(self, instance, value):
"""Accessing the attribute."""
self._set(instance.model, value)
#
# Record extension
#
def post_init(self, record, data, model=None, field_data=None):
"""Initialise the model field."""
if field_data is not None:
self._set(model, field_data)
|
[
"lars.holm.nielsen@cern.ch"
] |
lars.holm.nielsen@cern.ch
|
2d99be2947ed06a48d470ebeae9181f5060181a0
|
b6eedb13dc8968bff95da19c82cc3c390d57419c
|
/SPrEader.py
|
c036387ed9cb0f59a8a10fa604f75d61df0cc79b
|
[] |
no_license
|
Alirezabln/RubyRead-3
|
65365791385778fb30a80dcb746973a749031301
|
e37cf2469dc995c2dd0ae81c575d0cbc43641824
|
refs/heads/master
| 2023-04-24T18:59:37.297387
| 2021-05-19T18:20:44
| 2021-05-19T18:20:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,065
|
py
|
''' winspec.py - read SPE files created by WinSpec with Princeton Instruments' cameras. '''
import ctypes, os
import struct
import numpy as np
import logging
__all__ = ['SpeFile', 'print_offsets']
__author__ = "Anton Loukianov"
__email__ = "anton.loukianov@gmail.com"
__license__ = "BSD"
__version__ = "0.2.1"
log = logging.getLogger('winspec')
# Definitions of types
spe_byte = ctypes.c_ubyte
spe_word = ctypes.c_ushort
spe_dword = ctypes.c_uint
spe_char = ctypes.c_char # 1 byte
spe_short = ctypes.c_short # 2 bytes
# long is 4 bytes in the manual. It is 8 bytes on my machine
spe_long = ctypes.c_int # 4 bytes
spe_float = ctypes.c_float # 4 bytes
spe_double = ctypes.c_double # 8 bytes
class ROIinfo(ctypes.Structure):
pass
class AxisCalibration(ctypes.Structure):
pass
class Header(ctypes.Structure):
pass
def print_offsets():
''' Print the attribute names, sizes and offsets in the C structure
Assuming that the sizes are correct and add up to an offset of 4100 bytes,
everything should add up correctly. This information was taken from the
WinSpec 2.6 Spectroscopy Software User Manual version 2.6B, page 251.
If this table doesn't add up, something changed in the definitions of the
datatype widths. Fix this in winspec.structs file and let me know!
'''
import inspect, re
A = Header()
for i in [Header, AxisCalibration, ROIinfo]:
fields = []
print('\n{:30s}[{:4s}]\tsize'.format(repr(i), 'offs'))
for name,obj in inspect.getmembers(i):
if inspect.isdatadescriptor(obj) and not inspect.ismemberdescriptor(obj) \
and not inspect.isgetsetdescriptor(obj):
fields.append((name, obj))
fields = sorted(fields, key=lambda x: x[1].offset)
for name, obj in fields:
print('{:30s}[{:4d}]\t{:4d}'.format(name, obj.size, obj.offset))
class SpeFile(object):
''' A file that represents the SPE file.
All details written in the file are contained in the `header` structure. Data is
accessed by using the `data` property.
Once the object is created and data accessed, the file is NOT read again. Create
a new object if you want to reread the file.
'''
# Map between header datatype field and numpy datatype
_datatype_map = {0 : np.float32, 1 : np.int32, 2 : np.int16, 3 : np.uint16}
def __init__(self, name):
''' Open file `name` to read the header.'''
with open(name, mode='rb') as f:
self.header = Header()
self.path = os.path.realpath(name)
self._data = None
self._xaxis = None
self._yaxis = None
# Deprecated method, but FileIO apparently can't be used with numpy
f.readinto(self.header)
# set some useful properties
self.reversed = True if self.header.geometric == 2 else False
self.gain = self.header.gain
if self.header.ADCtype == 8:
self.adc = 'Low Noise'
elif self.header.ADCtype == 9:
self.adc = 'High Capacity'
else:
self.adc = 'Unknown'
if self.header.ADCrate == 12:
self.adc_rate = '2 MHz'
elif self.header.ADCrate == 6:
self.adc_rate = '100 KHz'
else:
self.adc_rate = 'Unknown'
self.readout_time = self.header.ReadoutTime
def _read(self):
''' Read the data segment of the file and create an appropriately-shaped numpy array
Based on the header, the right datatype is selected and returned as a numpy array. I took
the convention that the frame index is the first, followed by the x,y coordinates.
'''
if self._data is not None:
log.debug('using cached data')
return self._data
# In python 2.7, apparently file and FileIO cannot be used interchangably
with open(self.path, mode='rb') as f:
f.seek(4100) # Skip header (4100 bytes)
_count = self.header.xdim * self.header.ydim * self.header.NumFrames
self._data = np.fromfile(f, dtype=SpeFile._datatype_map[self.header.datatype], count=_count)
# Also, apparently the ordering of the data corresponds to how it is stored by the shift register
# Thus, it appears a little backwards...
self._data = self._data.reshape((self.header.NumFrames, self.header.ydim, self.header.xdim))
# Orient the structure so that it is indexed like [NumFrames][x, y]
self._data = np.rollaxis(self._data, 2, 1)
# flip data
if all([self.reversed == True, self.adc == '100 KHz']):
pass
elif any([self.reversed == True, self.adc == '100 KHz']):
self._data = self._data[:, ::-1, :]
log.debug('flipped data because of nonstandard ADC setting ' + \
'or reversed setting')
return self._data
@property
def xaxis(self):
if self._xaxis is not None:
log.debug('using cached xaxis')
return self._xaxis
px, py = self._make_axes()
return px
@property
def yaxis(self):
if self._yaxis is not None:
log.debug('using cached yaxis')
return self._yaxis
px, py = self._make_axes()
return py
@property
def xaxis_label(self):
'''Read the x axis label
'''
return self.header.xcalibration.string.decode('ascii')
@property
def yaxis_label(self):
'''Read the y axis label
'''
return self.header.ycalibration.string.decode('ascii')
def _make_axes(self):
'''Construct axes from calibration fields in header file
'''
xcalib = self.header.xcalibration
ycalib = self.header.ycalibration
xcalib_valid = struct.unpack('?', xcalib.calib_valid)
if xcalib_valid:
xcalib_order, = struct.unpack('>B', xcalib.polynom_order) # polynomial order
px = xcalib.polynom_coeff[:xcalib_order+1]
px = np.array(px[::-1]) # reverse coefficients to use numpy polyval
pixels = np.arange(1, self.header.xdim + 1)
px = np.polyval(px, pixels)
else:
px = np.arange(1, self.header.xdim + 1)
ycalib_valid = struct.unpack('?', ycalib.calib_valid)
if ycalib_valid:
ycalib_order, = struct.unpack('>B', ycalib.polynom_order) # polynomial order
py = ycalib.polynom_coeff[:ycalib_order+1]
py = np.array(py[::-1]) # reverse coefficients to use numpy polyval
pixels = np.arange(1, self.header.ydim + 1)
py = np.polyval(py, pixels)
else:
py = np.arange(1, self.header.ydim + 1)
self._xaxis = px
self._yaxis = py
return px, py
''' Data recorded in the file, returned as a numpy array.
The convention for indexes is that the first index is the frame index, followed by x,y region of
interest.
'''
data = property(fget=_read)
def __str__(self):
return 'SPE File \n\t{:d}x{:d} area, {:d} frames\n\tTaken on {:s}' \
.format(self.header.xdim, self.header.ydim,
self.header.NumFrames, self.header.date.decode())
def __repr__(self):
return str(self)
# Lengths of arrays used in header
HDRNAMEMAX = 120
USERINFOMAX = 1000
COMMENTMAX = 80
LABELMAX = 16
FILEVERMAX = 16
DATEMAX = 10
ROIMAX = 10
TIMEMAX = 7
# Definitions of WinSpec structures
# Region of interest defs
ROIinfo._pack_ = 1
ROIinfo._fields_ = [
('startx', spe_word),
('endx', spe_word),
('groupx', spe_word),
('starty', spe_word),
('endy', spe_word),
('groupy', spe_word)]
# Calibration structure for X and Y axes
AxisCalibration._pack_ = 1
AxisCalibration._fields_ = [
('offset', spe_double),
('factor', spe_double),
('current_unit', spe_char),
('reserved1', spe_char),
('string', spe_char * 40),
('reserved2', spe_char * 40),
('calib_valid', spe_char),
('input_unit', spe_char),
('polynom_unit', spe_char),
('polynom_order', spe_char),
('calib_count', spe_char),
('pixel_position', spe_double * 10),
('calib_value', spe_double * 10),
('polynom_coeff', spe_double * 6),
('laser_position', spe_double),
('reserved3', spe_char),
('new_calib_flag', spe_byte),
('calib_label', spe_char * 81),
('expansion', spe_char * 87)]
# Full header definition
Header._pack_ = 1
Header._fields_ = [
('ControllerVersion', spe_short),
('LogicOutput', spe_short),
('AmpHiCapLowNoise', spe_word),
('xDimDet', spe_word),
('mode', spe_short),
('exp_sec', spe_float),
('VChipXdim', spe_short),
('VChipYdim', spe_short),
('yDimDet', spe_word),
('date', spe_char * DATEMAX),
('VirtualChipFlag', spe_short),
('Spare_1', spe_char * 2), # Unused data
('noscan', spe_short),
('DetTemperature', spe_float),
('DetType', spe_short),
('xdim', spe_word),
('stdiode', spe_short),
('DelayTime', spe_float),
('ShutterControl', spe_word),
('AbsorbLive', spe_short),
('AbsorbMode', spe_word),
('CanDoVirtualChipFlag', spe_short),
('ThresholdMinLive', spe_short),
('ThresholdMinVal', spe_float),
('ThresholdMaxLive', spe_short),
('ThresholdMaxVal', spe_float),
('SpecAutoSpectroMode', spe_short),
('SpecCenterWlNm', spe_float),
('SpecGlueFlag', spe_short),
('SpecGlueStartWlNm', spe_float),
('SpecGlueEndWlNm', spe_float),
('SpecGlueMinOvrlpNm', spe_float),
('SpecGlueFinalResNm', spe_float),
('PulserType', spe_short),
('CustomChipFlag', spe_short),
('XPrePixels', spe_short),
('XPostPixels', spe_short),
('YPrePixels', spe_short),
('YPostPixels', spe_short),
('asynen', spe_short),
('datatype', spe_short), # 0 - float, 1 - long, 2 - short, 3 - ushort
('PulserMode', spe_short),
('PulserOnChipAccums', spe_word),
('PulserRepeatExp', spe_dword),
('PulseRepWidth', spe_float),
('PulseRepDelay', spe_float),
('PulseSeqStartWidth', spe_float),
('PulseSeqEndWidth', spe_float),
('PulseSeqStartDelay', spe_float),
('PulseSeqEndDelay', spe_float),
('PulseSeqIncMode', spe_short),
('PImaxUsed', spe_short),
('PImaxMode', spe_short),
('PImaxGain', spe_short),
('BackGrndApplied', spe_short),
('PImax2nsBrdUsed', spe_short),
('minblk', spe_word),
('numminblk', spe_word),
('SpecMirrorLocation', spe_short * 2),
('SpecSlitLocation', spe_short * 4),
('CustomTimingFlag', spe_short),
('ExperimentTimeLocal', spe_char * TIMEMAX),
('ExperimentTimeUTC', spe_char * TIMEMAX),
('ExposUnits', spe_short),
('ADCoffset', spe_word),
('ADCrate', spe_word),
('ADCtype', spe_word),
('ADCresolution', spe_word),
('ADCbitAdjust', spe_word),
('gain', spe_word),
('Comments', spe_char * 5 * COMMENTMAX),
('geometric', spe_word), # x01 - rotate, x02 - reverse, x04 flip
('xlabel', spe_char * LABELMAX),
('cleans', spe_word),
('NumSkpPerCln', spe_word),
('SpecMirrorPos', spe_short * 2),
('SpecSlitPos', spe_float * 4),
('AutoCleansActive', spe_short),
('UseContCleansInst', spe_short),
('AbsorbStripNum', spe_short),
('SpecSlipPosUnits', spe_short),
('SpecGrooves', spe_float),
('srccmp', spe_short),
('ydim', spe_word),
('scramble', spe_short),
('ContinuousCleansFlag', spe_short),
('ExternalTriggerFlag', spe_short),
('lnoscan', spe_long), # Longs are 4 bytes
('lavgexp', spe_long), # 4 bytes
('ReadoutTime', spe_float),
('TriggeredModeFlag', spe_short),
('Spare_2', spe_char * 10),
('sw_version', spe_char * FILEVERMAX),
('type', spe_short),
('flatFieldApplied', spe_short),
('Spare_3', spe_char * 16),
('kin_trig_mode', spe_short),
('dlabel', spe_char * LABELMAX),
('Spare_4', spe_char * 436),
('PulseFileName', spe_char * HDRNAMEMAX),
('AbsorbFileName', spe_char * HDRNAMEMAX),
('NumExpRepeats', spe_dword),
('NumExpAccums', spe_dword),
('YT_Flag', spe_short),
('clkspd_us', spe_float),
('HWaccumFlag', spe_short),
('StoreSync', spe_short),
('BlemishApplied', spe_short),
('CosmicApplied', spe_short),
('CosmicType', spe_short),
('CosmicThreshold', spe_float),
('NumFrames', spe_long),
('MaxIntensity', spe_float),
('MinIntensity', spe_float),
('ylabel', spe_char * LABELMAX),
('ShutterType', spe_word),
('shutterComp', spe_float),
('readoutMode', spe_word),
('WindowSize', spe_word),
('clkspd', spe_word),
('interface_type', spe_word),
('NumROIsInExperiment', spe_short),
('Spare_5', spe_char * 16),
('controllerNum', spe_word),
('SWmade', spe_word),
('NumROI', spe_short),
('ROIinfblk', ROIinfo * ROIMAX),
('FlatField', spe_char * HDRNAMEMAX),
('background', spe_char * HDRNAMEMAX),
('blemish', spe_char * HDRNAMEMAX),
('file_header_ver', spe_float),
('YT_Info', spe_char * 1000),
('WinView_id', spe_long),
('xcalibration', AxisCalibration),
('ycalibration', AxisCalibration),
('Istring', spe_char * 40),
('Spare_6', spe_char * 25),
('SpecType', spe_byte),
('SpecModel', spe_byte),
('PulseBurstUsed', spe_byte),
('PulseBurstCount', spe_dword),
('PulseBurstPeriod', spe_double),
('PulseBracketUsed', spe_byte),
('PulseBracketType', spe_byte),
('PulseTimeConstFast', spe_double),
('PulseAmplitudeFast', spe_double),
('PulseTimeConstSlow', spe_double),
('PulseAmplitudeSlow', spe_double),
('AnalogGain', spe_short),
('AvGainUsed', spe_short),
('AvGain', spe_short),
('lastvalue', spe_short)]
# ###test = SpeFile('Bi-cell4-ruby7.SPE')
# ###print(test)
# ###print(test.header.ExperimentTimeUTC)
# ###
# ###print(test.data.shape)
# ###print(test.data[0].shape)
# ###print(print(test.header))
|
[
"jssmith@anl.gov"
] |
jssmith@anl.gov
|
67ee694b86cd7dc3c2c0e4b65be8f0f59bff7f81
|
3a343e05afa4a3b2485aa6bba8386011ade0f32c
|
/gohappyserver/authviews.py
|
368e431fb4e8eb9d5e4644f7a7ad4b6c81bedf9b
|
[] |
no_license
|
kazemnejad/gohappy-filemanager-server
|
21c3cc5ede3dadafb3f898513098eb3ad209f2cc
|
e9ca82cb567da62cf3cf48cf1bdeb4dbb0058095
|
refs/heads/master
| 2021-01-10T08:48:41.119086
| 2016-03-27T09:53:15
| 2016-03-27T09:53:15
| 54,634,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,995
|
py
|
from flask import abort
from flask import request, jsonify, render_template
from gohappyserver.database import db_session
from gohappyserver.models import User
from gohappyserver.server import app
class AuthResponceCode:
SUCCESS = 10
FAIL = 11
USER_EXISTS = 12
INVALID_CREDENTIALS = 13
@app.route("/")
def main_page():
return render_template("index.html")
@app.route("/auth/login", methods=['POST'])
def login():
username = request.form['username']
password = request.form['password']
response = {}
user = User.query.filter_by(username=username).first()
if not user or not user.verify_password(password):
response["result"] = AuthResponceCode.FAIL
response["message"] = AuthResponceCode.INVALID_CREDENTIALS
else:
response["result"] = AuthResponceCode.SUCCESS
response["token"] = user.generate_auth_token().decode('ascii')
return jsonify(response), 200,
@app.route("/auth/register", methods=["POST"])
def register():
username = request.form['username']
password = request.form['password']
if len(username) == 0 or len(password) == 0:
abort(400)
response = {}
if User.query.filter_by(username=username).first() is not None:
response["result"] = AuthResponceCode.FAIL
response["message"] = AuthResponceCode.USER_EXISTS
else:
user = User(username=username, password=password)
db_session.add(user)
db_session.commit()
response["result"] = AuthResponceCode.SUCCESS
response["id"] = user.id
response["token"] = user.generate_auth_token().decode('ascii')
print response
return jsonify(response), 200,
@app.route("/users/online", methods=["GET"])
def list_users():
token = str(request.headers.get("Authorization")).split(" ")[1]
if not User.verify_auth_token(token):
abort(403)
online_users = User.query.filter(User.socket_id != None)
return jsonify(online_users), 200,
|
[
"ub.maka@gmail.com"
] |
ub.maka@gmail.com
|
4d0112a32f6eca81ffc2fdd40fa60162f829a9b4
|
07d01fa4ec60b5a6cb0d157e97b4352847f8ef36
|
/.venv/Lib/site-packages/pathspec/tests/test_pathspec.py
|
a76ec0701f627ac13736aeddc56178808c7bd9eb
|
[] |
no_license
|
freddieaviator/simple_python_pipeline
|
9b833df352a3310e10929313d46bc76b23ae9490
|
34c11a04d028906aa8de73d5b8a6a6f143c94de0
|
refs/heads/main
| 2023-06-30T04:41:13.738241
| 2021-08-05T14:25:30
| 2021-08-05T14:25:30
| 392,977,835
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,416
|
py
|
# encoding: utf-8
"""
This script tests ``PathSpec``.
"""
import unittest
import pathspec
class PathSpecTest(unittest.TestCase):
"""
The ``PathSpecTest`` class tests the ``PathSpec`` class.
"""
def test_01_absolute_dir_paths_1(self):
"""
Tests that absolute paths will be properly normalized and matched.
"""
spec = pathspec.PathSpec.from_lines(
"gitwildmatch",
[
"foo",
],
)
results = set(
spec.match_files(
[
"/a.py",
"/foo/a.py",
"/x/a.py",
"/x/foo/a.py",
"a.py",
"foo/a.py",
"x/a.py",
"x/foo/a.py",
]
)
)
self.assertEqual(
results,
{
"/foo/a.py",
"/x/foo/a.py",
"foo/a.py",
"x/foo/a.py",
},
)
def test_01_absolute_dir_paths_2(self):
"""
Tests that absolute paths will be properly normalized and matched.
"""
spec = pathspec.PathSpec.from_lines(
"gitwildmatch",
[
"/foo",
],
)
results = set(
spec.match_files(
[
"/a.py",
"/foo/a.py",
"/x/a.py",
"/x/foo/a.py",
"a.py",
"foo/a.py",
"x/a.py",
"x/foo/a.py",
]
)
)
self.assertEqual(
results,
{
"/foo/a.py",
"foo/a.py",
},
)
def test_01_current_dir_paths(self):
"""
Tests that paths referencing the current directory will be properly
normalized and matched.
"""
spec = pathspec.PathSpec.from_lines(
"gitwildmatch",
[
"*.txt",
"!test1/",
],
)
results = set(
spec.match_files(
[
"./src/test1/a.txt",
"./src/test1/b.txt",
"./src/test1/c/c.txt",
"./src/test2/a.txt",
"./src/test2/b.txt",
"./src/test2/c/c.txt",
]
)
)
self.assertEqual(
results,
{
"./src/test2/a.txt",
"./src/test2/b.txt",
"./src/test2/c/c.txt",
},
)
def test_01_match_files(self):
"""
Tests that matching files one at a time yields the same results as
matching multiples files at once.
"""
spec = pathspec.PathSpec.from_lines(
"gitwildmatch",
[
"*.txt",
"!test1/",
],
)
test_files = [
"src/test1/a.txt",
"src/test1/b.txt",
"src/test1/c/c.txt",
"src/test2/a.txt",
"src/test2/b.txt",
"src/test2/c/c.txt",
]
single_results = set(filter(spec.match_file, test_files))
multi_results = set(spec.match_files(test_files))
self.assertEqual(single_results, multi_results)
def test_01_windows_current_dir_paths(self):
"""
Tests that paths referencing the current directory will be properly
normalized and matched.
"""
spec = pathspec.PathSpec.from_lines(
"gitwildmatch",
[
"*.txt",
"!test1/",
],
)
results = set(
spec.match_files(
[
".\\src\\test1\\a.txt",
".\\src\\test1\\b.txt",
".\\src\\test1\\c\\c.txt",
".\\src\\test2\\a.txt",
".\\src\\test2\\b.txt",
".\\src\\test2\\c\\c.txt",
],
separators=("\\",),
)
)
self.assertEqual(
results,
{
".\\src\\test2\\a.txt",
".\\src\\test2\\b.txt",
".\\src\\test2\\c\\c.txt",
},
)
def test_01_windows_paths(self):
"""
Tests that Windows paths will be properly normalized and matched.
"""
spec = pathspec.PathSpec.from_lines(
"gitwildmatch",
[
"*.txt",
"!test1/",
],
)
results = set(
spec.match_files(
[
"src\\test1\\a.txt",
"src\\test1\\b.txt",
"src\\test1\\c\\c.txt",
"src\\test2\\a.txt",
"src\\test2\\b.txt",
"src\\test2\\c\\c.txt",
],
separators=("\\",),
)
)
self.assertEqual(
results,
{
"src\\test2\\a.txt",
"src\\test2\\b.txt",
"src\\test2\\c\\c.txt",
},
)
def test_02_eq(self):
"""
Tests equality.
"""
first_spec = pathspec.PathSpec.from_lines(
"gitwildmatch",
[
"*.txt",
"!test1/",
],
)
second_spec = pathspec.PathSpec.from_lines(
"gitwildmatch",
[
"*.txt",
"!test1/",
],
)
self.assertEqual(first_spec, second_spec)
def test_02_ne(self):
"""
Tests equality.
"""
first_spec = pathspec.PathSpec.from_lines(
"gitwildmatch",
[
"*.txt",
],
)
second_spec = pathspec.PathSpec.from_lines(
"gitwildmatch",
[
"!*.txt",
],
)
self.assertNotEqual(first_spec, second_spec)
def test_01_addition(self):
"""
Test pattern addition using + operator
"""
first_spec = pathspec.PathSpec.from_lines(
"gitwildmatch", ["test.txt", "test.png"]
)
second_spec = pathspec.PathSpec.from_lines(
"gitwildmatch", ["test.html", "test.jpg"]
)
combined_spec = first_spec + second_spec
results = set(
combined_spec.match_files(
["test.txt", "test.png", "test.html", "test.jpg"], separators=("\\",)
)
)
self.assertEqual(results, {"test.txt", "test.png", "test.html", "test.jpg"})
def test_02_addition(self):
"""
Test pattern addition using += operator
"""
spec = pathspec.PathSpec.from_lines("gitwildmatch", ["test.txt", "test.png"])
spec += pathspec.PathSpec.from_lines("gitwildmatch", ["test.html", "test.jpg"])
results = set(
spec.match_files(
["test.txt", "test.png", "test.html", "test.jpg"], separators=("\\",)
)
)
self.assertEqual(results, {"test.txt", "test.png", "test.html", "test.jpg"})
|
[
"harberg@kth.se"
] |
harberg@kth.se
|
79b993423382b3e092e440f39a6078aea418e1fc
|
5ac5440db74b41e46ca5ac4de10430251a3076f9
|
/Copia de velocidades.py
|
948249c5881035ff6713195c642cc8e24b44fcf9
|
[] |
no_license
|
JMicrobium/progra
|
f75663cfe18b39367fb218919b190aad51581c62
|
47c24c27b1cde1401bde10a760244ee7a21041c7
|
refs/heads/master
| 2020-08-08T06:16:47.236430
| 2019-11-07T03:41:35
| 2019-11-07T03:41:35
| 213,751,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 992
|
py
|
"""# division of lists
# using zip() + list comprehension
res = [i / j for i, j in zip(test_list1, test_list2)]"""
deltasA=[50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110]
tiemposA=[0.0116, 0.0113, 0.0115, 0.0113, 0.0113, 0.0112, 0.0111, 0.0109, 0.0103, 0.0098, 0.0094, 0.0089, 0.0085, 0.0087, 0.0086, 0.0085, 0.0083, 0.0085, 0.0085, 0.0090, 0.0085 ,0.0086, 0.0088, 0.0089, 0.0080, 0.0080, 0.0082, 0.0081, 0.0081, 0.0083, 0.0076, 0.0074, 0.0073, 0.0072, 0.0071, 0.0071, 0.0071, 0.0072, 0.0070, 0.0071, 0.0070, 0.0069, 0.0068, 0.0067, 0.0067, 0.0067, 0.0066, 0.0066, 0.0066, 0.0066, 0.0065, 0.0066, 0.0065, 0.0064, 0.0064, 0.0062, 0.0062, 0.0061, 0.0060, 0.0060, 0.0060]
A=[(i-20) for i in deltasA]
velocidadesA=[k/j for k,j in zip(A,tiemposA)]
print("'velocidadesA':",velocidadesA)
|
[
"54874211+JMicrobium@users.noreply.github.com"
] |
54874211+JMicrobium@users.noreply.github.com
|
32ed04f60ded1e497874c903fcb12015b3432175
|
af217fb6a724a0450917dc365a0dff3c38fbe94f
|
/grocy/model/inline_object7.py
|
33a02074e26fb3493103c6992b18b2322518fd35
|
[] |
no_license
|
fipwmaqzufheoxq92ebc/grocy-python-openapi
|
b8f7ab5eba96ff28d6845ada22493bf233ac1900
|
014c7b3ef88c9a2e11d6a59c4ef2f8037c623bc6
|
refs/heads/main
| 2023-06-02T16:59:20.253117
| 2021-06-24T13:23:02
| 2021-06-24T13:23:02
| 379,931,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,177
|
py
|
"""
grocy REST API
Authentication is done via API keys (header *GROCY-API-KEY* or same named query parameter), which you can manage [here](http://localhost:8111/manageapikeys).<br>Additionally requests from within the frontend are also valid (via session cookie). # noqa: E501
The version of the OpenAPI document: 3.0.1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from grocy.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from grocy.exceptions import ApiAttributeError
class InlineObject7(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'amount': (float,), # noqa: E501
'stock_entry_id': (str,), # noqa: E501
'allow_subproduct_substitution': (bool,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'amount': 'amount', # noqa: E501
'stock_entry_id': 'stock_entry_id', # noqa: E501
'allow_subproduct_substitution': 'allow_subproduct_substitution', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""InlineObject7 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
amount (float): The amount to mark as opened. [optional] # noqa: E501
stock_entry_id (str): A specific stock entry id to open, if used, the amount has to be 1. [optional] # noqa: E501
allow_subproduct_substitution (bool): `True` when any in-stock sub product should be used when the given product is a parent product and currently not in-stock. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineObject7 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
amount (float): The amount to mark as opened. [optional] # noqa: E501
stock_entry_id (str): A specific stock entry id to open, if used, the amount has to be 1. [optional] # noqa: E501
allow_subproduct_substitution (bool): `True` when any in-stock sub product should be used when the given product is a parent product and currently not in-stock. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
[
"29818044+fipwmaqzufheoxq92ebc@users.noreply.github.com"
] |
29818044+fipwmaqzufheoxq92ebc@users.noreply.github.com
|
a8f892dce29287628553c4c94badbe69cb409949
|
e593273909dd6b5148357c775bb3acb15fad3779
|
/b2/part.py
|
5e93e234327b55a13dc365d62b51bae7f652945d
|
[
"MIT"
] |
permissive
|
Python3pkg/B2_Command_Line_Tool
|
e2ed8cfaf1cbd2b13d4fe8479f17b6ecb97d6961
|
15f5a235d831e3fe1987ecfb787893e9e01e279d
|
refs/heads/master
| 2021-01-21T09:23:43.104569
| 2017-05-18T08:45:35
| 2017-05-18T08:45:35
| 91,653,925
| 1
| 0
| null | 2017-05-18T05:50:33
| 2017-05-18T05:50:32
| null |
UTF-8
|
Python
| false
| false
| 1,150
|
py
|
######################################################################
#
# File: b2/part.py
#
# Copyright 2016 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
class PartFactory(object):
@classmethod
def from_list_parts_dict(cls, part_dict):
return Part(
part_dict['fileId'], part_dict['partNumber'], part_dict['contentLength'],
part_dict['contentSha1']
)
class Part(object):
def __init__(self, file_id, part_number, content_length, content_sha1):
self.file_id = file_id
self.part_number = part_number
self.content_length = content_length
self.content_sha1 = content_sha1
def __repr__(self):
return '<%s %s %s %s %s>' % (
self.__class__.__name__, self.file_id, self.part_number, self.content_length,
self.content_sha1
)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
[
"coder@beachfamily.net"
] |
coder@beachfamily.net
|
71967d56f56ea90ba5abb1691448706e5b956b69
|
e7881505ecc7e993b1d87be5bf604448fa2a3e2d
|
/server/taxi/taxi/urls.py
|
a8e995c75829d2cc0fbf6ee7d3e0090012af6d15
|
[] |
no_license
|
pmaturure3/React_Django_Taxi_App
|
8bf7e78174dd6eab6e4cdb1da70aa58af4d2ae05
|
1e69d4120b83be2bb430decea3fbc16d74a77b31
|
refs/heads/master
| 2023-02-11T02:33:18.081483
| 2020-07-26T21:58:46
| 2020-07-26T21:58:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
from django.contrib import admin
from django.urls import include, path
from rest_framework_simplejwt.views import TokenRefreshView
from trips.views import SignUpView, LogInView
urlpatterns = [
path('admin/', admin.site.urls),
path('api/sign_up/', SignUpView.as_view(), name='sign_up'),
path('api/log_in/', LogInView.as_view(), name='log_in'),
path('api/token/refresh/', TokenRefreshView.as_view(),
name='token_refresh'),
path('api/trip/', include('trips.urls', 'trip',)),
]
|
[
"sjogleka@uncc.edu"
] |
sjogleka@uncc.edu
|
c55d8ac50017b3cbc3399ad4b902e24e61709f95
|
e30aa4cbaecf14398ca72eac996e7dfda55d85cd
|
/setup.py
|
2b72191566b82e2d64d07f6393417b32da4e4f65
|
[
"MIT"
] |
permissive
|
DanielLSM/crypto-insider
|
2adfa9fc707a69d58440bc75cb3f2ebb4b6ef14d
|
f668f184e5d748e82918f4ec4b57f8ae9b417804
|
refs/heads/master
| 2021-04-28T18:44:54.614934
| 2018-02-24T21:07:11
| 2018-02-24T21:07:11
| 121,878,453
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 705
|
py
|
from setuptools import setup, find_packages
import sys
if sys.version_info.major != 3:
print('This Python is only compatible with Python 3, but you are running '
'Python {}. The installation will likely fail.'.format(sys.version_info.major))
setup(name='baselines',
packages=[package for package in find_packages()
if package.startswith('crypto-insider')],
install_requires=[
'lxml',
'requests',
],
description='A library to scrap popular crypto currencies websites ',
author='Marta. Daniel Luis',
url='https://github.com/DanielLSM/crypto-insider',
author_email='daniellsmarta@gmail.com',
version='0.0.1')
|
[
"daniellsmarta@gmail.com"
] |
daniellsmarta@gmail.com
|
8a43744f7e3df08e09834cfc54ecc99bb72ea18f
|
c63bf01b632c52dcfb19e78b47c36fb5efcab507
|
/src/components/enemy.py
|
ca6b6fdbea24c7d86a63f385b3af1085c8dd35c9
|
[] |
no_license
|
Grimmys/BubbleTanks2
|
3292173eb6abd66d40aa5306e65af381a47867bd
|
a015ece36b4bea80b92656ffc37e947b0919a536
|
refs/heads/main
| 2023-06-26T12:27:15.150425
| 2021-07-29T19:47:51
| 2021-07-29T19:47:51
| 400,833,006
| 1
| 0
| null | 2021-08-28T15:58:14
| 2021-08-28T15:58:13
| null |
UTF-8
|
Python
| false
| false
| 10,242
|
py
|
from random import uniform
from math import pi, cos, sin, hypot
import pygame as pg
from assets.paths import *
from data.constants import *
from data.enemies import ENEMIES
from components.utils import *
from components.base_mob import BaseMob
from components.enemy_body import EnemyBody
from components.enemy_weapons import EnemyWeapons
from components.enemy_event import EnemyEvent
from components.special_effects import infection_surfaces
sticky_w = H(108.391)
sticky_h = H(99.248)
sticky_image = pg.image.load(STICKY_IMAGE).convert_alpha()
sticky_image = pg.transform.smoothscale(sticky_image, (sticky_w, sticky_h))
class Enemy(BaseMob):
def __init__(self, game, name):
self.name = name
data = ENEMIES[name]
super().__init__(*self.start_pos(), data["max health"], data["max health"],
data["radius"], EnemyBody(self, game.rect, data), EnemyWeapons(self, game, data))
self.game = game
self.death_award = data["death award"]
self.screen_rect = game.rect
self.rect = pg.Rect(0, 0, data["rect size"], data["rect size"])
self.rect.center = self.x, self.y
self.update_component_states()
self.events = [EnemyEvent(self, game, event_data) for event_data in data["events"]]
self.velocity = data["velocity"]
self.vel_x = 0
self.vel_y = 0
self.body.angle = uniform(0, 2*pi) if self.velocity != 0 else 0
self.set_velocity()
self.angle_to_turn = 0
self.last_angle = 0
self.safety_turn = False
self.time_to_turn = 0
self.time_to_hold_turning = 0
self.spawners_data = data["spawners"]
self.killed = False
self.chasing_infectors = set()
self.infected = False
self.infection_time = 0
self.infection_cooldown = 170
self.infection_effect_time = 0
@property
def is_on_screen(self):
return self.rect.colliderect(self.screen_rect)
@property
def about_to_exit(self):
if self.velocity == 0:
return False
distance = self.rect.w/2 + HF(132)
x = self.x + distance * self.vel_x / self.velocity
y = self.y + distance * self.vel_y / self.velocity
return hypot(x - SCR_W2, y - SCR_H2) > ROOM_RADIUS
@staticmethod
def start_pos():
distance = uniform(0, ROOM_RADIUS * 0.7)
angle = uniform(0, 2*pi)
x = SCR_W2 + distance * cos(angle)
y = SCR_H2 - distance * sin(angle)
return x, y
def update_health(self, delta_health: int):
self.health = min(self.max_health, self.health + delta_health)
self.update_component_states()
if self.health <= 0:
self.killed = True
def become_infected(self):
if not self.infected:
self.infected = True
self.body.become_infected()
self.weapons.become_infected()
def update_infected_state(self, dt):
if self.infected:
self.infection_time += dt
if self.infection_time >= self.infection_cooldown:
self.receive_damage(-1, play_sound=False)
self.infection_time = 0
def get_angle_pos(self):
return calculate_angle(SCR_W2, SCR_H2, self.x, self.y)
def set_velocity(self):
self.vel_x = self.velocity * cos(self.body.angle)
self.vel_y = -self.velocity * sin(self.body.angle)
def set_pos(self, x, y):
self.x = x
self.y = y
self.rect.center = x, y
def move_by_time(self, dt):
self.move(self.vel_x * dt, self.vel_y * dt)
def update_angle(self, delta_angle):
self.body.angle += delta_angle
self.set_velocity()
def update_pos(self, dt):
if self.stunned or self.sticky or self.velocity == 0:
self.rect.center = self.x, self.y
self.weapons.update_pos()
return
last_angle_pos = self.get_angle_pos()
self.move_by_time(dt)
about_to_exit = self.about_to_exit
if self.time_to_turn == 0 or (about_to_exit and not self.safety_turn):
if about_to_exit:
angle_pos = self.get_angle_pos()
angle_to_turn = uniform(-pi, -pi/2)
if angle_pos > last_angle_pos:
angle_to_turn *= -1
k = HF(2.4 * 180 / pi)
self.time_to_turn = abs(angle_to_turn) / self.velocity * k
self.angle_to_turn = self.velocity * sign(angle_to_turn) / k
self.time_to_hold_turning = 1800
self.safety_turn = True
else:
self.time_to_turn = max(0, self.time_to_turn - dt)
self.update_angle(self.angle_to_turn * dt)
if self.time_to_hold_turning > 0:
self.time_to_hold_turning -= dt
elif self.time_to_turn == 0:
if dt != 0 and uniform(0, 1000/dt) < 1:
distance = uniform(-100, 100)
k = HF(2.4)
self.time_to_turn = abs(distance) / self.velocity * k
self.angle_to_turn = self.velocity * sign(distance) / k * pi/180
self.safety_turn = False
self.weapons.update_pos()
def update_shape(self, dt):
if self.is_on_screen:
self.body.update_shape(dt)
self.weapons.update_shape(dt)
self.infection_effect_time = (self.infection_effect_time + dt) % 320
def update_shooting(self, dt):
if not self.stunned:
self.weapons.update_shooting(dt)
def receive_damage(self, damage, play_sound=True):
super().receive_damage(damage)
for event in self.events:
if self.health <= event.trigger_value and not event.hit or event.trigger_value == -1:
event.hit = True
event.action()
if self.killed:
self.game.sound_player.play_sound(ENEMY_DEATH)
self.game.pause_menu.update_counter(0, 1)
if play_sound:
self.game.sound_player.play_sound(ENEMY_HIT)
def update(self, dt):
self.update_sticky_state(dt)
self.update_stunned_state(dt)
self.update_infected_state(dt)
self.update_pos(dt)
self.update_shape(dt)
self.update_shooting(dt)
def draw_sticky(self, screen, dx, dy):
x = self.x - dx - sticky_w/2
y = self.y - dy - sticky_h/2
screen.blit(sticky_image, (x, y))
def draw_infected(self, screen, dx, dy):
index = int(17 * self.infection_effect_time/320)
if 9 <= index <= 15:
surface = infection_surfaces[index - 9]
x = self.x - dx - surface.get_width()/2
y = self.y - dy - surface.get_height()/2
screen.blit(surface, (x, y))
def draw(self, screen, dx=0, dy=0):
if self.is_on_screen:
self.body.draw(screen, dx, dy)
self.weapons.draw(screen, dx, dy)
if self.sticky:
self.draw_sticky(screen, dx, dy)
if self.infected:
self.draw_infected(screen, dx, dy)
class BossHead(Enemy):
def __init__(self, game, name):
super().__init__(game, name)
self.body.angle = -0.5 * pi
self.delta_angle = 0
self.target = game.player
self.rect_offset = HF(192.575)
@staticmethod
def start_pos():
return SCR_W2, -HF(480)
def collide_bullet(self, bullet) -> bool:
return (self.rect.colliderect(bullet.rect) and
circle_collidepoint(*self.rect.center, self.radius + bullet.radius, bullet.x, bullet.y))
def move(self, dx, dy):
self.x += dx
self.y += dy
self.rect.centerx = self.x + self.rect_offset * cos(self.body.angle)
self.rect.centery = self.y - self.rect_offset * sin(self.body.angle)
def update_pos(self, dt):
if self.sticky or self.stunned:
self.weapons.update_pos()
return
angle = calculate_angle(self.x, self.y, self.target.x, self.target.y) + 0.5 * pi
if angle > pi:
angle = -angle + 0.5 * pi
if angle > self.delta_angle:
self.delta_angle = min(angle, self.delta_angle + 0.00072 * dt, 0.23 * pi)
else:
self.delta_angle = max(angle, self.delta_angle - 0.00072 * dt, -0.23 * pi)
self.body.angle = -0.5 * pi + self.delta_angle
self.rect.centerx = self.x + self.rect_offset * cos(self.body.angle)
self.rect.centery = self.y - self.rect_offset * sin(self.body.angle)
self.weapons.update_pos()
class BossLeg(Enemy):
def __init__(self, game, name):
super().__init__(game, name)
self.body.angle = 0.5 * pi
self.rect_offset = HF(124.374)
self.rect.centerx = self.x + self.rect_offset * cos(self.body.angle)
self.rect.centery = self.y - self.rect_offset * sin(self.body.angle)
@staticmethod
def start_pos():
return SCR_W2, HF(1280)
def collide_bullet(self, bullet) -> bool:
return (self.rect.colliderect(bullet.rect) and
circle_collidepoint(*self.rect.center, self.radius + bullet.radius, bullet.x, bullet.y))
def move(self, dx, dy):
self.x += dx
self.y += dy
self.rect.centerx = self.x + self.rect_offset * cos(self.body.angle)
self.rect.centery = self.y - self.rect_offset * sin(self.body.angle)
def update_pos(self, dt):
self.weapons.update_pos()
class BossHand(Enemy):
def __init__(self, game, name):
super().__init__(game, name)
if self.name == "BossLeftHand":
self.body.angle = -0.2 * pi
else:
self.body.angle = -0.8 * pi
def start_pos(self):
if self.name == "BossLeftHand":
return SCR_W2 - HF(600), -HF(80)
return SCR_W2 + HF(600), -HF(80)
def update_pos(self, dt):
self.weapons.update_pos()
def make_enemy(game, name):
if name == "BossHead":
return BossHead(game, name)
if name == "BossLeg":
return BossLeg(game, name)
if name in ("BossLeftHand", "BossRightHand"):
return BossHand(game, name)
return Enemy(game, name)
__all__ = ["Enemy", "make_enemy"]
|
[
"ildar.239@mail.ru"
] |
ildar.239@mail.ru
|
c8dcd6449bc4e03816534630d254503edabffe65
|
f42431fa2dd76706ebf694bd421e8416b9ba83a5
|
/training/train_with_action_masking_3/callbacks.py
|
6067e2a9855f116eb7ffc5acdced23c3cc65c800
|
[] |
no_license
|
Woitoxx/bomberman-rl
|
95e633bc711ebd6f42914c68a7f7565199d58fba
|
6071bfcb5a8d4d5398e5c9e43221d83361932209
|
refs/heads/master
| 2023-03-26T13:10:52.269182
| 2021-03-29T13:34:44
| 2021-03-29T13:34:44
| 338,549,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,682
|
py
|
import copy
import random
from typing import Dict
import numpy as np
from ray.rllib import RolloutWorker, BaseEnv, Policy, SampleBatch
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.evaluation import MultiAgentEpisode
class MyCallbacks(DefaultCallbacks):
def __init__(self):
super().__init__()
self.policies = []
self.player_scores = []
self.opponent_scores = []
def on_episode_start(self, worker: RolloutWorker, base_env: BaseEnv,
policies: Dict[str, Policy],
episode: MultiAgentEpisode, **kwargs):
pass
def on_episode_step(self, worker: RolloutWorker, base_env: BaseEnv,
episode: MultiAgentEpisode, **kwargs):
pass
def on_episode_end(self, worker: RolloutWorker, base_env: BaseEnv,
policies: Dict[str, Policy], episode: MultiAgentEpisode,
**kwargs):
self.player_scores.append(episode.last_info_for(f'agent_0'))
for i in range(1,4):
self.opponent_scores.append(episode.last_info_for(f'agent_{i}'))
def on_sample_end(self, worker: RolloutWorker, samples: SampleBatch,
**kwargs):
print(f'Player max score: {np.max(self.player_scores)}')
print(f'Player avg score: {np.average(self.player_scores)}')
print(f'Opp max score: {np.max(self.opponent_scores)}')
print(f'Opp avg score: {np.average(self.opponent_scores)}')
self.player_scores.clear()
self.opponent_scores.clear()
pass
@staticmethod
# probably no longer required
def copy_weights(src_policy, dest_policy):
P0key_P1val = {} # temp storage with "policy_0" keys & "policy_1" values
for (k, v), (k2, v2) in zip(dest_policy.get_weights().items(),
src_policy.items()):
P0key_P1val[k] = v2
# set weights
dest_policy.set_weights(P0key_P1val)
def on_train_result(self, trainer, result: dict, **kwargs):
print("trainer.train() result: {} -> {} episodes".format(
trainer, result["episodes_this_iter"]))
# Add current policy to the menagerie
current_policy = trainer.get_policy('policy_01').get_weights()
if result["policy_reward_mean"]["policy_01"] > 0.02 or len(self.policies) == 0:
self.policies.append(current_policy)
# Maintain only the latest 100 previous policies
if len(self.policies) > 100:
self.policies.pop(0)
#self.copy_weights(current_policy if np.random.rand() > 0.2 else np.random.choice(self.policies), trainer.get_policy('policy_02'))
# Choose either current policy (80%) or random previous policy (20%) for our opponents
#new_policy = current_policy if np.random.rand() > 0.2 else random.choice(self.policies)
#trainer.workers.foreach_worker(lambda w: w.get_policy('policy_02').set_weights(new_policy))
trainer.workers.foreach_worker(lambda w: self.copy_weights(current_policy if np.random.rand() > 0.2 else np.random.choice(self.policies), w.get_policy('policy_02')))
# Checkpoint
if result["iterations_since_restore"] % 10 == 0:
print(f'Checkpoint saved at iter {result["iterations_since_restore"]}')
trainer.save()
def on_postprocess_trajectory(
self, worker: RolloutWorker, episode: MultiAgentEpisode,
agent_id: str, policy_id: str, policies: Dict[str, Policy],
postprocessed_batch: SampleBatch,
original_batches: Dict[str, SampleBatch], **kwargs):
pass
|
[
"15731141+Woitoxx@users.noreply.github.com"
] |
15731141+Woitoxx@users.noreply.github.com
|
6fde26ec65ad1921499ea7fa51e39286788804c3
|
b21a81b7f8ad98c87321b735487c10069e6fc72a
|
/attendance/urls.py
|
08a5e7f46a8c08376ca3c2f4d83d6381095701d0
|
[] |
no_license
|
cabilangan112/Attendace
|
2bf809a1ce6db849cc9dcf7f311600e453f52f9c
|
bfb4aa86293f4321cd2f121a5cdc957d61ea704a
|
refs/heads/master
| 2021-08-24T11:29:53.448940
| 2017-11-21T05:52:28
| 2017-11-21T05:52:28
| 111,504,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 766
|
py
|
"""attendace URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
[
"jassencabilangan@gmail.com"
] |
jassencabilangan@gmail.com
|
c533d73f97e5d29a1450cb193a09ef1e06419fc7
|
bf01f2ee9dc903810dfaba3bff6dda56d31f41b9
|
/ctpnsource/lib/fast_rcnn/train.py
|
abfaf281cb80815ea2f43a088ffa22019ae3286f
|
[
"MIT"
] |
permissive
|
cwxcode/OCR_caption_recognition
|
e978741ccae2889ceb9cda055e9694dad40b80b0
|
b2c5641e37e0d98b52e659ff42f549eb33840ea2
|
refs/heads/master
| 2020-04-07T01:31:35.243289
| 2018-11-17T03:42:45
| 2018-11-17T03:42:45
| 157,943,881
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,190
|
py
|
import numpy as np
import os
import tensorflow as tf
from lib.roi_data_layer.layer import RoIDataLayer
from lib.utils.timer import Timer
from lib.roi_data_layer import roidb as rdl_roidb
from lib.fast_rcnn.config import cfg
_DEBUG = False
class SolverWrapper(object):
def __init__(self, sess, network, imdb, roidb, output_dir, logdir, pretrained_model=None):
"""Initialize the SolverWrapper."""
self.net = network
self.imdb = imdb
self.roidb = roidb
self.output_dir = output_dir
self.pretrained_model = pretrained_model
print('Computing bounding-box regression targets...')
if cfg.TRAIN.BBOX_REG:
self.bbox_means, self.bbox_stds = rdl_roidb.add_bbox_regression_targets(roidb)
print('done')
# For checkpoint
self.saver = tf.train.Saver(max_to_keep=100,write_version=tf.train.SaverDef.V2)
self.writer = tf.summary.FileWriter(logdir=logdir,
graph=tf.get_default_graph(),
flush_secs=5)
def snapshot(self, sess, iter):
net = self.net
if cfg.TRAIN.BBOX_REG and 'bbox_pred' in net.layers and cfg.TRAIN.BBOX_NORMALIZE_TARGETS:
# save original values
with tf.variable_scope('bbox_pred', reuse=True):
weights = tf.get_variable("weights")
biases = tf.get_variable("biases")
orig_0 = weights.eval()
orig_1 = biases.eval()
# scale and shift with bbox reg unnormalization; then save snapshot
weights_shape = weights.get_shape().as_list()
sess.run(weights.assign(orig_0 * np.tile(self.bbox_stds, (weights_shape[0],1))))
sess.run(biases.assign(orig_1 * self.bbox_stds + self.bbox_means))
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX
if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')
filename = (cfg.TRAIN.SNAPSHOT_PREFIX + infix +
'_iter_{:d}'.format(iter+1) + '.ckpt')
filename = os.path.join(self.output_dir, filename)
self.saver.save(sess, filename)
print('Wrote snapshot to: {:s}'.format(filename))
if cfg.TRAIN.BBOX_REG and 'bbox_pred' in net.layers:
# restore net to original state
sess.run(weights.assign(orig_0))
sess.run(biases.assign(orig_1))
def build_image_summary(self):
# A simple graph for write image summary
log_image_data = tf.placeholder(tf.uint8, [None, None, 3])
log_image_name = tf.placeholder(tf.string)
# import tensorflow.python.ops.gen_logging_ops as logging_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.framework import ops as _ops
log_image = gen_logging_ops._image_summary(log_image_name, tf.expand_dims(log_image_data, 0), max_images=1)
_ops.add_to_collection(_ops.GraphKeys.SUMMARIES, log_image)
# log_image = tf.summary.image(log_image_name, tf.expand_dims(log_image_data, 0), max_outputs=1)
return log_image, log_image_data, log_image_name
def train_model(self, sess, max_iters, restore=False):
"""Network training loop."""
data_layer = get_data_layer(self.roidb, self.imdb.num_classes)
total_loss,model_loss, rpn_cross_entropy, rpn_loss_box=self.net.build_loss(ohem=cfg.TRAIN.OHEM)
# scalar summary
tf.summary.scalar('rpn_reg_loss', rpn_loss_box)
tf.summary.scalar('rpn_cls_loss', rpn_cross_entropy)
tf.summary.scalar('model_loss', model_loss)
tf.summary.scalar('total_loss',total_loss)
summary_op = tf.summary.merge_all()
log_image, log_image_data, log_image_name =\
self.build_image_summary()
# optimizer
lr = tf.Variable(cfg.TRAIN.LEARNING_RATE, trainable=False)
if cfg.TRAIN.SOLVER == 'Adam':
opt = tf.train.AdamOptimizer(cfg.TRAIN.LEARNING_RATE)
elif cfg.TRAIN.SOLVER == 'RMS':
opt = tf.train.RMSPropOptimizer(cfg.TRAIN.LEARNING_RATE)
else:
# lr = tf.Variable(0.0, trainable=False)
momentum = cfg.TRAIN.MOMENTUM
opt = tf.train.MomentumOptimizer(lr, momentum)
global_step = tf.Variable(0, trainable=False)
with_clip = True
if with_clip:
tvars = tf.trainable_variables()
grads, norm = tf.clip_by_global_norm(tf.gradients(total_loss, tvars), 10.0)
train_op = opt.apply_gradients(list(zip(grads, tvars)), global_step=global_step)
else:
train_op = opt.minimize(total_loss, global_step=global_step)
# intialize variables
sess.run(tf.global_variables_initializer())
restore_iter = 0
# load vgg16
if self.pretrained_model is not None and not restore:
try:
print(('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model))
self.net.load(self.pretrained_model, sess, True)
except:
raise Exception('Check your pretrained model {:s}'.format(self.pretrained_model))
# resuming a trainer
if restore:
try:
ckpt = tf.train.get_checkpoint_state(self.output_dir)
print('Restoring from {}...'.format(ckpt.model_checkpoint_path), end=' ')
self.saver.restore(sess, ckpt.model_checkpoint_path)
stem = os.path.splitext(os.path.basename(ckpt.model_checkpoint_path))[0]
restore_iter = int(stem.split('_')[-1])
sess.run(global_step.assign(restore_iter))
print('done')
except:
raise 'Check your pretrained {:s}'.format(ckpt.model_checkpoint_path)
last_snapshot_iter = -1
timer = Timer()
for iter in range(restore_iter, max_iters):
timer.tic()
# learning rate
if iter != 0 and iter % cfg.TRAIN.STEPSIZE == 0:
sess.run(tf.assign(lr, lr.eval() * cfg.TRAIN.GAMMA))
print(lr)
# get one batch
blobs = data_layer.forward()
feed_dict={
self.net.data: blobs['data'],
self.net.im_info: blobs['im_info'],
self.net.keep_prob: 0.5,
self.net.gt_boxes: blobs['gt_boxes'],
self.net.gt_ishard: blobs['gt_ishard'],
self.net.dontcare_areas: blobs['dontcare_areas']
}
res_fetches=[]
fetch_list = [total_loss,model_loss, rpn_cross_entropy, rpn_loss_box,
summary_op,
train_op] + res_fetches
total_loss_val,model_loss_val, rpn_loss_cls_val, rpn_loss_box_val, \
summary_str, _ = sess.run(fetches=fetch_list, feed_dict=feed_dict)
self.writer.add_summary(summary=summary_str, global_step=global_step.eval())
_diff_time = timer.toc(average=False)
if (iter) % (cfg.TRAIN.DISPLAY) == 0:
print('iter: %d / %d, total loss: %.4f, model loss: %.4f, rpn_loss_cls: %.4f, rpn_loss_box: %.4f, lr: %f'%\
(iter, max_iters, total_loss_val,model_loss_val,rpn_loss_cls_val,rpn_loss_box_val,lr.eval()))
print('speed: {:.3f}s / iter'.format(_diff_time))
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
if cfg.TRAIN.HAS_RPN:
rdl_roidb.prepare_roidb(imdb)
else:
rdl_roidb.prepare_roidb(imdb)
print('done')
return imdb.roidb
def get_data_layer(roidb, num_classes):
"""return a data layer."""
if cfg.TRAIN.HAS_RPN:
if cfg.IS_MULTISCALE:
# obsolete
# layer = GtDataLayer(roidb)
raise "Calling caffe modules..."
else:
layer = RoIDataLayer(roidb, num_classes)
else:
layer = RoIDataLayer(roidb, num_classes)
return layer
def train_net(network, imdb, roidb, output_dir, log_dir, pretrained_model=None, max_iters=40000, restore=False):
"""Train a Fast R-CNN network."""
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allocator_type = 'BFC'
config.gpu_options.per_process_gpu_memory_fraction = 0.75
with tf.Session(config=config) as sess:
sw = SolverWrapper(sess, network, imdb, roidb, output_dir, logdir= log_dir, pretrained_model=pretrained_model)
print('Solving...')
sw.train_model(sess, max_iters, restore=restore)
print('done solving')
|
[
"kl1411@126.com"
] |
kl1411@126.com
|
eded561f07a96acb72afcdd9528b380067981cdc
|
402125c206c68ddb33203d0d106e7dc681e47c9d
|
/13/part1.py
|
34cffa3e7708354d5b387e93143ab04435d311f7
|
[] |
no_license
|
zacharyliu/advent-of-code-2020
|
bcd02f6e2b4eaaa3ac35024b2e70e2e697bdebf4
|
0f2e69130d050a98ca9f83925a08345779edda58
|
refs/heads/main
| 2023-02-08T02:27:50.094890
| 2020-12-25T06:07:58
| 2020-12-25T06:07:58
| 321,265,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,623
|
py
|
'''
Your ferry can make it safely to a nearby port, but it won't get much further. When you call to book another ship, you discover that no ships embark from that port to your vacation island. You'll need to get from the port to the nearest airport.
Fortunately, a shuttle bus service is available to bring you from the sea port to the airport! Each bus has an ID number that also indicates how often the bus leaves for the airport.
Bus schedules are defined based on a timestamp that measures the number of minutes since some fixed reference point in the past. At timestamp 0, every bus simultaneously departed from the sea port. After that, each bus travels to the airport, then various other locations, and finally returns to the sea port to repeat its journey forever.
The time this loop takes a particular bus is also its ID number: the bus with ID 5 departs from the sea port at timestamps 0, 5, 10, 15, and so on. The bus with ID 11 departs at 0, 11, 22, 33, and so on. If you are there when the bus departs, you can ride that bus to the airport!
Your notes (your puzzle input) consist of two lines. The first line is your estimate of the earliest timestamp you could depart on a bus. The second line lists the bus IDs that are in service according to the shuttle company; entries that show x must be out of service, so you decide to ignore them.
To save time once you arrive, your goal is to figure out the earliest bus you can take to the airport. (There will be exactly one such bus.)
For example, suppose you have the following notes:
939
7,13,x,x,59,x,31,19
Here, the earliest timestamp you could depart is 939, and the bus IDs in service are 7, 13, 59, 31, and 19. Near timestamp 939, these bus IDs depart at the times marked D:
time bus 7 bus 13 bus 59 bus 31 bus 19
929 . . . . .
930 . . . D .
931 D . . . D
932 . . . . .
933 . . . . .
934 . . . . .
935 . . . . .
936 . D . . .
937 . . . . .
938 D . . . .
939 . . . . .
940 . . . . .
941 . . . . .
942 . . . . .
943 . . . . .
944 . . D . .
945 D . . . .
946 . . . . .
947 . . . . .
948 . . . . .
949 . D . . .
The earliest bus you could take is bus ID 59. It doesn't depart until timestamp 944, so you would need to wait 944 - 939 = 5 minutes before it departs. Multiplying the bus ID by the number of minutes you'd need to wait gives 295.
What is the ID of the earliest bus you can take to the airport multiplied by the number of minutes you'll need to wait for that bus?
'''
def run():
cmds = []
earliest = {}
with open('./input.txt') as f:
time = int(f.readline().strip())
for t in f.readline().strip().split(','):
if t == 'x':
continue
t = int(t)
cur = 0
while cur < time:
cur += t
earliest[t] = cur
best_id = None
best = None
for bus, t in earliest.items():
if best == None or t < best:
best_id = bus
best = t
return best_id * (best - time)
print(run())
|
[
"zach@zliu.io"
] |
zach@zliu.io
|
059b0a3788bb9be3058493f22296697b9d146e09
|
ec38bb5972e1c9bc401bc62c3d1bd9b2e9b2e8f6
|
/project/app/models/tortoiseORM.py
|
1d4a34f005af92195649b6f25df82efad6bcf767
|
[] |
no_license
|
AimVoma/fastapi-tdd-docker
|
87a4217bba5156dfb099ed46ab96530dbe177dd4
|
2046bda82d25a6fb824508a348eb829c7feb82e5
|
refs/heads/master
| 2023-05-09T05:20:37.985464
| 2021-05-29T17:04:10
| 2021-05-29T17:04:10
| 367,978,226
| 1
| 0
| null | 2021-06-06T19:08:03
| 2021-05-16T20:24:42
|
Python
|
UTF-8
|
Python
| false
| false
| 401
|
py
|
# project/app/models/tortoise.py
from tortoise import fields, models
from tortoise.contrib.pydantic import pydantic_model_creator # new
class TextSummary(models.Model):
url = fields.TextField()
summary = fields.TextField()
created_at = fields.DatetimeField(auto_now_add=True)
def __str__(self):
return self.url
SummarySchema = pydantic_model_creator(TextSummary) # new
|
[
"aim.voma@outlook.com"
] |
aim.voma@outlook.com
|
aea11a35bba0d061397f5bdd20233c0cf6b90b7c
|
7928e262f4a02f660708a7e5c68df88d0687076f
|
/python/autscore/app/__init__.py
|
e9add9e0e15d85c5218949b4ee9361ae4a70d7df
|
[] |
no_license
|
sunyinggang/w_project
|
e61619922a4ea2c27ac8f40242a211ecaf9fb7a3
|
2c65457e91d84fc39cd96d3e2e8010af03b1ddd0
|
refs/heads/master
| 2022-12-14T20:10:22.552572
| 2020-09-05T11:09:47
| 2020-09-05T11:09:47
| 233,967,595
| 0
| 0
| null | 2022-12-08T04:22:17
| 2020-01-15T01:01:52
|
HTML
|
UTF-8
|
Python
| false
| false
| 761
|
py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = "mysql+pymysql://root:123456@localhost/wautscore"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config['SECRET_KEY'] = '8906ced739ec4d3a80c0bcecfb15fb8c'
app.debug = True
db = SQLAlchemy(app)
from app.home import home as home_blueprint
from app.admin import admin as admin_blueprint
from app.teacher import teacher as teacher_blueprint
from app.student import student as student_blueprint
app.register_blueprint(home_blueprint)
app.register_blueprint(admin_blueprint, url_prefix="/admin")
app.register_blueprint(teacher_blueprint, url_prefix="/teacher")
app.register_blueprint(student_blueprint, url_prefix="/student")
|
[
"136080416@qq.com"
] |
136080416@qq.com
|
b984ec0ae65ac9b2c3d218e69774959c6ec17e79
|
ed70c80a4dcd8438fd5873ad9f0166c49e51fb1d
|
/datasets/Vessels.py
|
4003140289681cb8eb281d4266f9060d74ae28af
|
[
"MIT"
] |
permissive
|
texsmv/point_cloud_reconstruction
|
65ae96aee53c2eef4ee79947adba91c75de899cc
|
ef348b0884245c818674d2bb5e3ba05cc832a639
|
refs/heads/master
| 2021-11-30T20:55:30.757450
| 2021-11-20T06:07:14
| 2021-11-20T06:07:14
| 205,484,127
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,278
|
py
|
from os import listdir, makedirs, remove
from os.path import exists, join
import os
import pickle
import pandas as pd
import numpy as np
import itertools
from torch.utils.data import Dataset
from utils.data import load_obj, load_obj_features, basis_point_set_random, load_HKS_features
class VesselDataset(Dataset):
def __init__(self, root_dir = "D/3D Models", split = 'train'):
self.root_dir = root_dir
self.pc_names = []
self.categories = listdir(root_dir)
#todo: some of the objets in these categories are corrupted
self.categories.remove("Amphora")
self.categories.remove("All Models")
self.categories.remove("Modern-Glass")
self.split = split
self.get_names()
def get_names(self):
self.pc_names = [ [file for file in listdir(join(self.root_dir, category))] for category in self.categories]
for i in range(len(self.categories)):
for j in range(len(self.pc_names[i])):
self.pc_names[i][j] = join(self.categories[i], self.pc_names[i][j])
if(self.split == 'train'):
self.pc_names = [ pc[:int(0.85 * len(pc))] for pc in self.pc_names ]
elif(self.split == 'test'):
self.pc_names = [ pc[int(0.85 * len(pc)):] for pc in self.pc_names ]
self.pc_names = list(itertools.chain.from_iterable(self.pc_names))
self.pc_names = np.array(self.pc_names)
self.pc_names = self.pc_names.flatten()
def __len__(self):
return len(self.pc_names)
def __getitem__(self, idx):
pc_filename = self.pc_names[idx]
pc_filepath = join(self.root_dir, pc_filename)
sample = load_obj(pc_filepath)
return sample, 0
class VesselDataset2(Dataset):
def __init__(self, root_dir = "/media/D/Datasets/Tesis/SimplifiedManifolds"):
# def __init__(self, root_dir = "/media/data/Datasets/Tesis/3D Models/Abstract"):
self.root_dir = root_dir
self.pc_names = []
self.get_names()
def get_names(self):
self.pc_names = np.array(listdir(self.root_dir))
self.pc_names.sort()
def __len__(self):
return len(self.pc_names)
def __getitem__(self, idx):
pc_filename = self.pc_names[idx]
pc_filepath = join(self.root_dir, pc_filename)
sample = load_obj(pc_filepath)
return sample, 0
class VesselDataset_Pset(Dataset):
def __init__(self, root_dir = "/home/texs/Documents/Repositorios/point_cloud_reconstruction/data/SimplifiedManifolds"):
self.root_dir = root_dir
self.pc_names = []
self.get_names()
self.basis_pset = []
if os.path.exists("basis_pytorch.pkl"):
self.basis_pset = pickle.load( open( "basis_pytorch.pkl", "rb" ) )
else:
self.basis_pset = basis_point_set_random(1.0, 1000)
pickle.dump( self.basis_pset, open( "basis_pytorch.pkl", "wb" ) )
def get_names(self):
self.pc_names = np.array(listdir(self.root_dir))
self.pc_names.sort()
def __len__(self):
return len(self.pc_names)
def __getitem__(self, idx):
pc_filename = self.pc_names[idx]
pc_filepath = join(self.root_dir, pc_filename)
features, points = load_obj_features(pc_filepath, self.basis_pset)
return features, points
class VesselDataset_HKS1(Dataset):
def __init__(self, root_dir_hks = "/home/texs/Documents/Repositorios/point_cloud_reconstruction/data/150", root_dir_pc = "/home/texs/Documents/Repositorios/point_cloud_reconstruction/data/SimplifiedManifolds"):
self.root_dir_hks = root_dir_hks
self.root_dir_pc = root_dir_pc
self.pc_names = []
self.get_names()
def get_names(self):
self.pc_names = np.array(listdir(self.root_dir_pc))
self.pc_names.sort()
self.pc_names_hks = np.array(listdir(self.root_dir_hks))
self.pc_names_hks.sort()
def __len__(self):
return len(self.pc_names)
def __getitem__(self, idx):
pc_filename = self.pc_names[idx]
pc_filepath = join(self.root_dir_pc, pc_filename)
pc_filename_hks = self.pc_names_hks[idx]
pc_filepath_hks = join(self.root_dir_hks, pc_filename_hks)
features, points = load_HKS_features(pc_filepath, pc_filepath_hks)
return features, points
class VesselDataset4(Dataset):
def __init__(self, root_dir = "/home/texs/Documents/Repositorios/point_cloud_reconstruction/data/SimplifiedManifolds"):
self.root_dir = root_dir
self.pc_names = []
self.get_names()
self.basis_pset = basis_point_set_random(1.0, 1000)
def get_names(self):
self.pc_names = np.array(listdir(self.root_dir))
self.pc_names.sort()
def __len__(self):
return len(self.pc_names)
def __getitem__(self, idx):
pc_filename = self.pc_names[idx]
pc_filepath = join(self.root_dir, pc_filename)
features, points = load_obj_features(pc_filepath, self.basis_pset)
return features, points
def get_noise(batch_size, n_points):
return np.random.normal(size=(batch_size, n_points, 3))
|
[
"texs.mv@gmail.com"
] |
texs.mv@gmail.com
|
9dd531a45b01bb96b38f0b077553e091a9191659
|
6daab8f7776ef1f7dcfa656e7caefe2c100d214a
|
/part05/if elif else.py
|
dff6bacbbc29328652a27175e3ce5fcfaf821d4d
|
[] |
no_license
|
yoga-nugroho129/python-dasar-kelasterbuka
|
5b46bf20eb6fa5dfe4e58aa703c67c66bca83366
|
33dd66a01f048fea69d8e7775150905f55552eca
|
refs/heads/master
| 2022-07-16T16:42:45.768305
| 2020-05-21T04:00:03
| 2020-05-21T04:00:03
| 265,750,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,165
|
py
|
### PENGKONDISIAN/PENGKONDISIAN
nilai = 80
### syntax if dalam python :
### if ..kondisi.. :
### ..aksi..
### cara 1) dengan 'sama dengan' (==)
if nilai == 75:
print("Nilai anda", nilai)
### cara 2) dengan 'is'
if nilai is 75:
print("Nilai anda", nilai)
### NEGASI bisa dengan menggunakan '!=' atau 'is not'
if nilai != 75:
print("Nilai anda bukan 75")
if 80 <= nilai <=100:
print("Nilai anda A")
elif 70 <= nilai <80:
print("Nilai anda B")
elif 60 <= nilai <70:
print("Nilai anda C")
elif 50 <= nilai <60:
print("Nilai anda D")
else:
print("Anda Tidak Lulus, Nilai anda E")
print(100*"-")
### pengkondisian dalam logika list/array
### cara 1
menu = ["pecel", "semur", "sate", "gule", "rendang"]
beli = "soto"
if beli in menu:
print("Baik,", beli ,"akan segera kami antar")
else:
print("Maaf,",beli,"tidak ada pada menu")
### cara 2
beli2 = "sate"
if beli2 not in menu:
print("Maaf,",beli2,"tidak ada pada menu")
else:
print("Baik,", beli2, "akan segera kami proses")
### pengkondisian diatas bisa juga untuk memeriksa karakter dalam string maupun tipe data lain
|
[
"yoga.nugroho129@gmail.com"
] |
yoga.nugroho129@gmail.com
|
af907f12b024fb1fc96229918c602bc5b99accf1
|
5f85a871faaffecf55f766cdd58d1ea6b48ad679
|
/natlas-server/app/auth/__init__.py
|
8695e60d1ef501afb4cb16063fe656b7e9f9d362
|
[
"Apache-2.0"
] |
permissive
|
fdcarl/natlas
|
859bbd56cf08d1cfb7e7a9d40c80e690d8c231f9
|
551038f3b7100546721a9185e194107521abbfc5
|
refs/heads/main
| 2022-10-22T04:01:43.747947
| 2020-05-12T03:58:26
| 2020-05-12T03:58:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 114
|
py
|
from flask import Blueprint
bp = Blueprint('auth', __name__)
from app.auth import routes, wrappers # noqa: F401
|
[
"0xdade@users.noreply.github.com"
] |
0xdade@users.noreply.github.com
|
626d43a51f20589955b459724b2c27d00796debf
|
43b50da74e752bc20bb3f71f04ad4b0d6b9d4d67
|
/6pro.py
|
0f5a2f7dc58a286421631edbb5fc357732f5d4a6
|
[] |
no_license
|
abinayavarshini/python
|
a547b35b0790a863267a218bec7d052bfd397555
|
1f2a5298ff1dea92fa503e8ff9623cae0a18d558
|
refs/heads/master
| 2020-06-12T07:50:46.982132
| 2019-08-12T16:33:58
| 2019-08-12T16:33:58
| 194,237,394
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
py
|
gh1=int(input())
gh2=list(map(int,input().split()))
ant=0
for j in range(len(gh2)-2):
for k in range(j+1,len(gh2)-1):
for l in range(k+1,len(gh2)):
if gh2[j]<gh2[k]<gh2[l] and j<k<l:
ant=ant+1
print(ant)
|
[
"noreply@github.com"
] |
abinayavarshini.noreply@github.com
|
a3724821f4d69568eb22265b90af7dd176e3a666
|
e72f1268c4f2737ae24f27f250c6b36e795dd94b
|
/lesson5/hashtable_add.py
|
5f63c16200baf2435e9ee891a9c336548f4de74f
|
[] |
no_license
|
maykjony90/intro_to_cs
|
33201aaf93114f5fb60cd22b58020d97b8fdd30c
|
39b52c566c7403163f99d8e18bdb5f2cf7e8228b
|
refs/heads/master
| 2021-01-20T13:10:47.726371
| 2017-10-09T20:56:05
| 2017-10-09T20:56:05
| 90,456,852
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,263
|
py
|
# Define a procedure,
#
# hashtable_add(htable,key,value)
#
# that adds the key to the hashtable (in
# the correct bucket), with the correct
# value and returns the new hashtable.
#
# (Note that the video question and answer
# do not return the hashtable, but your code
# should do this to pass the test cases.)
def hashtable_add(htable,keyword,value):
htable[hash_string(keyword, len(htable))].append([keyword, value])
return htable
def hashtable_get_bucket(htable,keyword):
return htable[hash_string(keyword,len(htable))]
def hash_string(keyword,buckets):
out = 0
for s in keyword:
out = (out + ord(s)) % buckets
return out
def make_hashtable(nbuckets):
table = []
for unused in range(0,nbuckets):
table.append([])
return table
#table = make_hashtable(5)
#hashtable_add(table,'Bill', 17)
#hashtable_add(table,'Coach', 4)
#hashtable_add(table,'Ellis', 11)
#hashtable_add(table,'Francis', 13)
#hashtable_add(table,'Louis', 29)
#hashtable_add(table,'Nick', 2)
#hashtable_add(table,'Rochelle', 4)
#hashtable_add(table,'Zoe', 14)
#print table
#>>> [[['Ellis', 11], ['Francis', 13]], [], [['Bill', 17], ['Zoe', 14]],
#>>> [['Coach', 4]], [['Louis', 29], ['Nick', 2], ['Rochelle', 4]]]
|
[
"aykilkilic@gmail.com"
] |
aykilkilic@gmail.com
|
79c1df71b44a8417307bb340d793675fc6e22870
|
5d42cfb9340f2b8bb3eae2de4fe459d16c1aba28
|
/Python_Challenge/PyBank/Main.py
|
66514060a866e3d41285372b3460c4a4442a6191
|
[] |
no_license
|
saniyasule/Python-Challenge
|
c699e951c030867cbd242c0ed536e0161831bf99
|
948c948f784c209e957726fc7b04141a90ad979c
|
refs/heads/master
| 2020-09-23T13:25:21.501095
| 2020-03-10T14:59:08
| 2020-03-10T14:59:08
| 225,511,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,101
|
py
|
import os
import csv
os.chdir(os.path.dirname(os.path.abspath(__file__)))
csvpath = os.path.join('Resources', 'budget_data.csv')
with open(csvpath) as csvfile:
csvreader = (csv.reader(csvfile, delimiter=","))
csv_header = next(csvreader)
sheet_row = []
sheet_column = []
maxandmin = []
for row in csvreader:
date = row[0]
profit_loss = row[1]
sheet_row.append(date)
sheet_column.append(profit_loss)
row_count = len(sheet_column)
#print (row_count)
with open(csvpath) as csvfile:
csvreader = (csv.reader(csvfile, delimiter=","))
csv_header = next(csvreader)
total = sum(int(r[1]) for r in csv.reader(csvfile))
#print ("total: $" + str(total))
percent = sheet_column[0]
#print (percent)
percent1 = sheet_column[-1]
#print (percent1)
change = (int(percent1) - int(percent))/(int(row_count) -1)
#print (change)
i = 1
for i in range (1,len(sheet_column)):
maxandmin.append(int(sheet_column[i]) - int(sheet_column[i-1]))
max_change = max(maxandmin)
min_change = min(maxandmin)
#print (max_change)
#print (min_change)
max_date = str(sheet_row[maxandmin.index(max(maxandmin))+1])
min_date = str(sheet_row[maxandmin.index(min(maxandmin))+1])
#print (max_date)
#print (min_date)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
output_path = os.path.join("Resources", "pybanktxt.txt")
with open(output_path, 'w', newline='') as txt_file:
txt_file.write("Financial Analysis \n")
txt_file.write("---------------------------------\n")
txt_file.write(f"Total Months : {(row_count)} \n")
txt_file.write(f"total: $ {(total)} \n")
txt_file.write(f"Average Change: ${round(change, 2)} \n")
txt_file.write(f"Greatest Increase in Profits: {(max_date)},(${(max_change)})\n")
txt_file.write(f"Greatest Decrease in Profits: {(min_date)},(${(min_change)})\n")
|
[
"noreply@github.com"
] |
saniyasule.noreply@github.com
|
1ae7156d2599d5a3a932e57176a82951011e5f59
|
4acabcefbdb4ddb8289bced532dc18adc4257558
|
/PA3/simple_de_bruijn.py
|
9208f5e47bbb120e0f43d25400a870ec34ea124e
|
[] |
no_license
|
insomnolent/CM122-projects
|
29f59f5770900a32899edbd2d91da388cf4bed4e
|
67e3baaaaeafe6ed9f4d19bcd526db424c9237d9
|
refs/heads/master
| 2021-01-20T09:21:21.970979
| 2017-03-24T07:32:48
| 2017-03-24T07:32:48
| 82,614,599
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,502
|
py
|
from os.path import join
import sys
import time
from collections import defaultdict, Counter
import sys
import os
sys.path.insert(0, os.path.abspath(".."))
sys.path.insert(0, os.path.abspath("../.."))
from BIOINFO_M260B.helpers import read_reads
def read_assembly_reads(read_fn):
return read_reads(read_fn)
def simple_de_bruijn(sequence_reads, k):
"""
Creates A simple DeBruijn Graph with nodes that correspond to k-mers of size k.
:param sequence_reads: A list of reads from the genome
:param k: The length of the k-mers that are used as nodes of the DeBruijn graph
:return: A DeBruijn graph where the keys are k-mers and the values are the set
of k-mers that
"""
de_bruijn_counter = defaultdict(Counter)
# You may also want to check the in-degree and out-degree of each node
# to help you find the beginnning and end of the sequence.
for read in sequence_reads:
# Cut the read into k-mers
kmers = [read[i: i + k] for i in range(len(read) - k)]
for i in range(len(kmers) - 1):
pvs_kmer = kmers[i]
next_kmer = kmers[i + 1]
de_bruijn_counter[pvs_kmer].update([next_kmer])
# This line removes the nodes from the DeBruijn Graph that we have not seen enough.
de_bruijn_graph = {key: {val for val in de_bruijn_counter[key] if de_bruijn_counter[key][val] > 2}
for key in de_bruijn_counter}
# This line removes the empty nodes from the DeBruijn graph
de_bruijn_graph = {key: de_bruijn_graph[key] for key in de_bruijn_graph if de_bruijn_graph[key]}
return de_bruijn_graph
def de_bruijn_reassemble(de_bruijn_graph):
"""
Traverses the DeBruijn Graph created by simple_de_bruijn and
returns contigs that come from it.
:param de_bruijn_graph: A De Bruijn Graph
:return: a list of the
"""
assembled_strings = []
while True:
n_values = sum([len(de_bruijn_graph[k]) for k in de_bruijn_graph])
if n_values == 0:
break
good_starts = [k for k in de_bruijn_graph if de_bruijn_graph[k]]
# You may want to find a better start
# position by looking at in and out-degrees,
# but this will work okay.
current_point = good_starts[0]
assembled_string = current_point
while True:
try:
next_values = de_bruijn_graph[current_point]
next_edge = next_values.pop()
assembled_string += next_edge[-1]
de_bruijn_graph[current_point] = next_values
current_point = next_edge
except KeyError:
assembled_strings.append(assembled_string)
break
return assembled_strings
if __name__ == "__main__":
chr_name = 'hw3all_A_3_chr_1'
input_folder = './{}'.format(chr_name)
reads_fn = join(input_folder, 'reads_{}.txt'.format(chr_name))
reads = read_assembly_reads(reads_fn)
db_graph = simple_de_bruijn(reads, 25)
for k in db_graph.keys()[:40]:
print k, db_graph[k]
output = de_bruijn_reassemble(db_graph)
output_fn_end = 'assembled_{}.txt'.format(chr_name)
output_fn = join(input_folder, output_fn_end)
with open(output_fn, 'w') as output_file:
output_file.write('>' + chr_name + '\n')
output_file.write('>ASSEMBLY\n')
output_file.write('\n'.join(output))
|
[
"jbcccsun@gmail.com"
] |
jbcccsun@gmail.com
|
c6f5ac6fea9aff3e441cf19f8b673b69c380f927
|
9cabe395035e3e344dcf0d83baa20bcdefec969e
|
/LibreriasFallidas/pippi-master/pippi/wavetables.py
|
13c3e839f624d7a92b869a91c0fd4cf27453e374
|
[] |
no_license
|
dagomankle/LukeBox
|
11703ab6ea0079e98649f41e23b5f780e33d3611
|
2b86167f2c3f6717194751f2e6ee605fb48858a2
|
refs/heads/master
| 2020-03-12T17:50:11.556813
| 2018-04-23T19:29:52
| 2018-04-23T19:29:52
| 130,737,144
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,321
|
py
|
import collections
import random
import numpy as np
from . import interpolation
SINEWAVE_NAMES = set(('sin', 'sine', 'sinewave'))
COSINE_NAMES = set(('cos', 'cosine'))
TRIANGLE_NAMES = set(('tri', 'triangle'))
SAWTOOTH_NAMES = set(('saw', 'sawtooth', 'ramp', 'line', 'lin'))
RSAWTOOTH_NAMES = set(('isaw', 'rsaw', 'isawtooth', 'rsawtooth', 'reversesaw', 'phasor'))
HANNING_NAMES = set(('hanning', 'hann', 'han'))
HAMMING_NAMES = set(('hamming', 'hamm', 'ham'))
BLACKMAN_NAMES = set(('blackman', 'black', 'bla'))
BARTLETT_NAMES = set(('bartlett', 'bar'))
KAISER_NAMES = set(('kaiser', 'kai'))
SQUARE_NAMES = set(('square', 'sq'))
ALL_WINDOWS = SINEWAVE_NAMES | TRIANGLE_NAMES | \
SAWTOOTH_NAMES | RSAWTOOTH_NAMES | \
HANNING_NAMES | HAMMING_NAMES | \
BLACKMAN_NAMES | BARTLETT_NAMES | \
KAISER_NAMES
ALL_WAVETABLES = SINEWAVE_NAMES | COSINE_NAMES | \
TRIANGLE_NAMES | SAWTOOTH_NAMES | \
RSAWTOOTH_NAMES | SQUARE_NAMES
def window(window_type=None, length=None, data=None):
if data is not None:
return interpolation.linear(data, length)
wt = None
if window_type == 'random':
window_type = random.choice(list(ALL_WINDOWS))
if window_type in SINEWAVE_NAMES:
wt = np.linspace(0, np.pi, length, dtype='d')
wt = np.sin(wt)
if window_type in TRIANGLE_NAMES:
wt = np.linspace(0, 2, length, dtype='d')
wt = np.abs(np.abs(wt - 1) - 1)
if window_type in SAWTOOTH_NAMES:
wt = np.linspace(0, 1, length, dtype='d')
if window_type in RSAWTOOTH_NAMES:
wt = np.linspace(1, 0, length, dtype='d')
if window_type in HANNING_NAMES:
wt = np.hanning(length)
if window_type in HAMMING_NAMES:
wt = np.hamming(length)
if window_type in BARTLETT_NAMES:
wt = np.bartlett(length)
if window_type in BLACKMAN_NAMES:
wt = np.blackman(length)
if window_type in KAISER_NAMES:
wt = np.kaiser(length, 0)
if wt is None:
return window('sine', length)
return wt
def wavetable(wavetable_type=None, length=None, duty=0.5, data=None):
if data is not None:
return interpolation.linear(data, length)
wt = None
if wavetable_type is None:
wavetable_type = 'sine'
elif wavetable_type == 'random':
wavetable_type = random.choice(list(ALL_WAVETABLES))
if wavetable_type in SINEWAVE_NAMES:
wt = np.linspace(-np.pi, np.pi, length, dtype='d', endpoint=False)
wt = np.sin(wt)
if wavetable_type in COSINE_NAMES:
wt = np.linspace(-np.pi, np.pi, length, dtype='d', endpoint=False)
wt = np.cos(wt)
if wavetable_type in TRIANGLE_NAMES:
wt = np.linspace(-1, 1, length, dtype='d', endpoint=False)
wt = np.abs(wt)
wt = (wt - wt.mean()) * 2
if wavetable_type in SAWTOOTH_NAMES:
wt = np.linspace(-1, 1, length, dtype='d', endpoint=False)
if wavetable_type in RSAWTOOTH_NAMES:
wt = np.linspace(1, -1, length, dtype='d', endpoint=False)
if wavetable_type in SQUARE_NAMES:
wt = np.zeros(length)
duty = int(length * duty)
wt[:duty] = 1
wt[duty:] = -1
if wt is None:
return wavetable('sine', length)
return wt
|
[
"dagomankle@hotmail.com"
] |
dagomankle@hotmail.com
|
61e355d3284db96cc11f987258d161aa7254a7af
|
5b2c920946732fb2c03f37a83220491230e39aff
|
/testlyt.py
|
19b2fc038a1b8a2d92b7195cdb0cbaccf461aabc
|
[] |
no_license
|
gloriaiaiaiaia/test_123
|
2a4bb46f6b740696424d5a1b0bd23f86d3013267
|
2115b507bbe0403dc52c96115bc01fc3356b07e3
|
refs/heads/master
| 2021-07-05T14:53:40.310527
| 2017-09-29T17:31:11
| 2017-09-29T17:31:11
| 105,297,462
| 0
| 0
| null | 2017-09-29T17:04:58
| 2017-09-29T17:04:58
| null |
UTF-8
|
Python
| false
| false
| 52
|
py
|
ddddddddddddd
jjkslaksl;ajodhqocdqwe
jjkk
kklklklll
|
[
"noreply@github.com"
] |
gloriaiaiaiaia.noreply@github.com
|
09af47de6a19917c807d1f56ea540c3629f06085
|
02a680759a50f523a42d274d844b9a3b13d8b98e
|
/local_settings-example.py
|
37e2ad76384a6786467c1b9c69320b9298474dbe
|
[] |
no_license
|
JuliFed/board_v2
|
86261772ac662e3347d757776eda3d728b8f653d
|
f15157633e0311ce21072941418bbcfb88f59ec9
|
refs/heads/master
| 2020-03-19T08:52:37.123119
| 2018-06-12T13:45:32
| 2018-06-12T13:45:32
| 136,241,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_TRACK_MODIFICATIONS = False
# SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir,'app.db')
# SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
|
[
"fedorchenkojuli@gmail.com"
] |
fedorchenkojuli@gmail.com
|
54cc2229676d7ba820006f56c595adce01319a33
|
07e1f102993454350d0daaa8d61a11d4c018ca2f
|
/test4.py
|
f2341f6962b2cfc362321b945335e1bcccb1600a
|
[] |
no_license
|
ravinkece/Selenium-WebDriver-Python-
|
cc117ae804e1306f4986fdb3138699394e752461
|
2d19cef2abf082578552544fc9934d85dc293edc
|
refs/heads/master
| 2020-04-01T19:57:51.323785
| 2020-01-14T04:23:55
| 2020-01-14T04:23:55
| 153,580,711
| 0
| 1
| null | 2019-08-19T11:20:06
| 2018-10-18T07:21:53
|
Python
|
UTF-8
|
Python
| false
| false
| 282
|
py
|
import time
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome()
driver.get("http://google.com")
print ("Ada")
|
[
"noreply@github.com"
] |
ravinkece.noreply@github.com
|
3c71cf23f566faaac769fd3eb5aa1b028593dd60
|
bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d
|
/lib/googlecloudsdk/command_lib/compute/scope.py
|
3d89de4a7553e92bbf964aee1d1f46b7782b7ab6
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
05fbb473d629195f25887fc5bfaa712f2cbc0a24
|
392abf004b16203030e6efd2f0af24db7c8d669e
|
refs/heads/master
| 2023-08-31T05:40:41.317697
| 2023-08-23T18:23:16
| 2023-08-23T18:23:16
| 335,182,594
| 9
| 2
|
NOASSERTION
| 2022-10-29T20:49:13
| 2021-02-02T05:47:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,976
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitiones compute scopes (locations)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import enum
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import properties
class ScopeEnum(enum.Enum):
"""Enum representing GCE scope."""
ZONE = ('zone', 'a ', properties.VALUES.compute.zone.Get)
REGION = ('region', 'a ', properties.VALUES.compute.region.Get)
GLOBAL = ('global', '', lambda: None)
def __init__(self, flag_name, prefix, property_func):
# Collection parameter name matches command line file in this case.
self.param_name = flag_name
self.flag_name = flag_name
self.prefix = prefix
self.property_func = property_func
@classmethod
def CollectionForScope(cls, scope):
if scope == cls.ZONE:
return 'compute.zones'
if scope == cls.REGION:
return 'compute.regions'
raise exceptions.Error(
'Expected scope to be ZONE or REGION, got {0!r}'.format(scope))
def IsSpecifiedForFlag(args, flag_name):
"""Returns True if the scope is specified for the flag.
Args:
args: The command-line flags.
flag_name: The name of the flag.
"""
return (getattr(args, '{}_region'.format(flag_name), None) is not None or
getattr(args, 'global_{}'.format(flag_name), None) is not None)
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
f687eba6bdacdff5305d1244a95c34500656513f
|
f43ef4a4291d65087407c1f1fd4bb0a532bf152d
|
/MoviesHub/views.py
|
b243e1ffc5e41b20cb76285222b56c3493add83a
|
[] |
no_license
|
dheerajiiitv/Spoon
|
807f1d03e0fbadeaa4d2bd10794ddaeb72345752
|
4f1e99a3d860c0ba7b9ac63eea1fa96912d0ab00
|
refs/heads/master
| 2022-12-21T12:03:11.263825
| 2018-12-07T00:32:07
| 2018-12-07T00:32:07
| 160,619,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,197
|
py
|
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.contrib import messages
import requests
from threading import Thread
from Spoon.settings import TMDB_KEY
import queue
import time
# Create your views here.
def index(request):
# Function to show home page.
if request.method == 'POST':
# If user submitted a query
query = request.POST.get('query')
page = request.POST.get('page', 1)
if query:
link = "https://api.themoviedb.org/3/search/movie?api_key=" + TMDB_KEY + "&include_adult=true&query=" \
+ query + "&page=" + str(page)
response = requests.get(link)
if response.status_code == 200:
json_data = response.json()
results = json_data['results']
page_no = json_data['page']
total_pages = json_data['total_pages']
return render(request, 'MoviesHub/movies_list.html',
{'movies': results, 'query': query, 'page_no': page_no, 'total_pages': total_pages})
else:
messages.error(request, "TMDB API not working")
return redirect('MoviesHub:index')
return render(request, 'MoviesHub/index.html')
else:
try:
messages.error(request, "No query provided")
except:
print("error")
return redirect('MoviesHub:index')
# Call API
else:
return render(request, 'MoviesHub/index.html')
def get_movie_details(request, movie_id):
if movie_id:
primary_info_link = 'https://api.themoviedb.org/3/movie/'+str(movie_id)+'?api_key='+TMDB_KEY
alternative_link = 'https://api.themoviedb.org/3/movie/'+str(movie_id)+'/alternative_titles?api_key='+TMDB_KEY
cast_link = 'https://api.themoviedb.org/3/movie/'+str(movie_id)+'/credits?api_key='+TMDB_KEY
images_link = 'https://api.themoviedb.org/3/movie/'+str(movie_id)+'/images?api_key='+TMDB_KEY
plot_keywords_link = 'https://api.themoviedb.org/3/movie/'+str(movie_id)+'/keywords?api_key='+TMDB_KEY
release_info_link = 'https://api.themoviedb.org/3/movie/'+str(movie_id)+'/release_dates?api_key='+TMDB_KEY
videos_link = 'https://api.themoviedb.org/3/movie/'+str(movie_id)+'/videos?api_key='+TMDB_KEY
reviews_link = 'https://api.themoviedb.org/3/movie/'+str(movie_id)+'/reviews?api_key='+TMDB_KEY
# Taking too much time (calling multiple API) Using threads
# Queue is used to store all data
data = queue.Queue()
primary_info_data = GetData(primary_info_link,data,'primary_info_data')
alternative_data = GetData(alternative_link,data,'alternative_data')
cast_data = GetData(cast_link,data,'cast_data')
images_data = GetData(images_link,data,'images_data')
plot_keywords_data = GetData(plot_keywords_link,data,'plot_keywords_data')
release_info_data = GetData(release_info_link,data,'release_info_data')
videos_data = GetData(videos_link,data,'videos_data')
reviews_data = GetData(reviews_link,data,'reviews_data')
primary_info_data.start()
alternative_data.start()
cast_data.start()
images_data.start()
plot_keywords_data.start()
release_info_data.start()
videos_data.start()
reviews_data.start()
real_data = {}
for d in range(8):
temp = data.get()
real_data[temp[0]] = temp[1]
# Data Format
return render(request, 'MoviesHub/movie_details.html', {'real_data':real_data})
else:
redirect('MoviesHub:index')
class GetData(Thread):
def __init__(self, link,q, field):
super(GetData, self).__init__()
self.link = link
self.q = q
self.field = field
def run(self):
response = requests.get(self.link)
if response.status_code == 200:
temp = []
temp.append(self.field)
temp.append(response.json())
self.q.put(temp)
else:
print('Error!', self.link)
return {}
|
[
"dheeraj.agarwal@monoxor.com"
] |
dheeraj.agarwal@monoxor.com
|
bf6b1db427afcec7a6112db1436fba53c06a09b2
|
ab3a53ef9f39e7124afa49a3de7c28359f036aff
|
/exemples/langue-bois.py
|
f02d7bb5c81b16701a90426544fe4df223c17379
|
[] |
no_license
|
marcyves/Python-Pratique
|
19ae70525b9ef399ee76f1d6ae7297f411352fc2
|
d1104252eab688778e31b1cc7f5e4cdb178847e0
|
refs/heads/master
| 2021-08-16T21:36:54.580105
| 2018-10-13T10:34:54
| 2018-10-13T10:34:54
| 134,450,855
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,290
|
py
|
from random import randint
fragment = []
fragment.append(
[
"Mesdames, messieurs, ",
"Je reste profondement persuadé que ",
"Dès lors, sachez que je me battrai pour faire admettre que ",
"Par ailleurs, c'est en toute connaissance de cause que je peux affirmer aujourd'hui que ",
"Je tiens à vous dire ici ma détermination sans faille pour clamer haut et fort que ",
"J'ai depuis longtemps (ai-je besoin de le rappeler ?), défendue l'idée que ",
"Et c'est en toute conscience que je déclare avec conviction que ",
"Et ce n'est certainement pas vous, mes chers compatriotes, qui me contredirez si je vous dis que ",
]
)
fragment.append(
[
"la conjoncture actuelle ",
"la situation d'exclusion que certains d'entre vous connaissez ",
"l'acuité des problèmes de la vie quotidienne ",
"la volonté farouche de sortir notre pays de la crise ",
"l'effort prioritaire en faveur du statut précaire des exclus ",
"le particularisme dû à notre histoire unique ",
"l'aspiration plus que légitime de chacun au progrès social ",
"la nécessité de répondre à votre inquiétude journalière, que vous soyez jeune ou âgés ",
]
)
fragment.append(
[
"doit s'intégrer à la finalisation globale ",
"oblige à la prise en compte encore plus effective ",
"interpelle le citoyen que je suis et nous oblige tous à aller de l'avant dans la voie ",
"a pour conséquence obligatoire l'urgente nécessité ",
"conforte mon désir incontestable d'aller dans le sens ",
"doit nous amener au choix réellement impératif ",
"doit prendre en compte les préoccupations de la population de base dans l'élaboration ",
"entraine une mission somme toute des plus exaltantes pour moi : l'élaboration ",
]
)
fragment.append(
[
"d'un processus allant vers plus d'égalité.",
"d'un avenir s'orientant vers plus de progrès et plus de justice.",
"d'une restructuration dans laquelle chacun pourra enfin retrouver sa dignité.",
"d'une valorisation sans concession de nos caractères spécifiques.",
"d'un plan coorespondant véritablement aux exigences légitimes de chacun.",
"de solutions rapides correspondant aux grands axes sociaux prioritaires.",
"d'un programme plus humain, plus fraternel et plus juste.",
"d'un projet porteur de véritables espoirs, notamment pour les plus démunis.",
]
)
# Welcome message
print("\t- - - - - - - - - - - - - - - - - - - - - - - - -")
print("\t Discours type ENA garanti avec langue de bois!!")
print("\t- - - - - - - - - - - - - - - - - - - - - - - - -")
# Loop on speeches
loop = True
while loop:
print("\n\t- - - - - - - - - - - - - - - - - - - - - - - - -")
print("\t Voici mon discours Pythonesque !")
print("\t- - - - - - - - - - - - - - - - - - - - - - - - -")
for col in range(4):
i = randint(0, 7)
print(fragment[col][i], end=" ")
reponse = input("\n\nUn autre discours ? (o/n): ")
if (reponse == "n") or (reponse == "N"):
loop = False
print("Merci d'utiliser notre générateur de discours Python")
|
[
"m.augier@me.com"
] |
m.augier@me.com
|
f34f7b206662ef49703b1ecbd2f758bb64d2328f
|
8a090cf9d8d27f62a0024774792783f751599db8
|
/array-string/max-product-subarray.py
|
5ee822653017dd7268992a8a1f58634936552089
|
[] |
no_license
|
kcaebe/interview-practice
|
1809964351fc7f81cc281cade18c2a1d85568b2b
|
a00847f49de646205ccb29f0619a8b6dead47cd0
|
refs/heads/master
| 2020-06-26T18:56:46.112861
| 2019-10-29T06:11:58
| 2019-10-29T06:11:58
| 199,721,816
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
def maxProd(nums):
max_here = min_here = nums[0]
tot_max = tot_min = nums[0]
for i in range(1, len(nums)):
num = nums[i]
print(max_here, min_here ,num)
max_here = max(num, max_here * num, min_here * num)
min_here = min(num, max_here * num, min_here * num)
tot_max = max(tot_max, max_here)
tot_min = min(tot_min, min_here)
return tot_max
print(maxProd([2, 3, -2, 4]))
|
[
"kcaebe@gmail.com"
] |
kcaebe@gmail.com
|
8b2e3aea484729669fff0e7f931cf7252b691257
|
e6c442ed80d147d53759985b8b34abe1af47ec8a
|
/blog/migrations/0001_initial.py
|
892a40b9013c1c2aa117c941a247abf7cc82db4a
|
[] |
no_license
|
ConjureETS/site-2015
|
178db4b2693f2e4a6ae268eed40e3886dadc8f83
|
119fbd2b8215908b2ace9d4e03c2d594f0c8f124
|
refs/heads/master
| 2021-06-11T14:41:16.432678
| 2015-11-12T04:01:14
| 2015-11-12T04:01:14
| 38,846,537
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 737
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=50)),
('text', models.TextField()),
('author', models.CharField(max_length=20)),
('photo', models.ImageField(upload_to=b'article_photos')),
],
options={
},
bases=(models.Model,),
),
]
|
[
"emile.filteau@gmail.com"
] |
emile.filteau@gmail.com
|
88693c9476a42420cea4bce4980ecc2686d2b249
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/containerregistry/latest/replication.py
|
88bad9240e4a22c7d76aa251b4b8af98bb69f71d
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 6,403
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = ['Replication']
class Replication(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location: Optional[pulumi.Input[str]] = None,
registry_name: Optional[pulumi.Input[str]] = None,
replication_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
An object that represents a replication for a container registry.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: The location of the resource. This cannot be changed after the resource is created.
:param pulumi.Input[str] registry_name: The name of the container registry.
:param pulumi.Input[str] replication_name: The name of the replication.
:param pulumi.Input[str] resource_group_name: The name of the resource group to which the container registry belongs.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if location is None:
raise TypeError("Missing required property 'location'")
__props__['location'] = location
if registry_name is None:
raise TypeError("Missing required property 'registry_name'")
__props__['registry_name'] = registry_name
if replication_name is None:
raise TypeError("Missing required property 'replication_name'")
__props__['replication_name'] = replication_name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['status'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:containerregistry/v20170601preview:Replication"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20171001:Replication"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20190501:Replication"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20191201preview:Replication")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Replication, __self__).__init__(
'azure-nextgen:containerregistry/latest:Replication',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Replication':
"""
Get an existing Replication resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Replication(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The location of the resource. This cannot be changed after the resource is created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the replication at the time the operation was called.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def status(self) -> pulumi.Output['outputs.StatusResponse']:
"""
The status of the replication at the time the operation was called.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
5787b2a7f26f241cdc436266558d80aabdec547a
|
e66635486a8abc7710432b52aa15c2d6e4488c94
|
/vmware_nsx/services/qos/common/utils.py
|
9384070307b7fe32628078496486bb60e1070665
|
[
"Apache-2.0"
] |
permissive
|
yfauser/vmware-nsx
|
ba2bff4c3cc982b7af03ac7d9891a067018a7233
|
1fb08a7555efd820c2d5625665ab77d7e69d3b0c
|
refs/heads/master
| 2021-01-18T17:41:40.411620
| 2016-06-02T21:13:43
| 2016-06-02T21:13:43
| 60,336,943
| 2
| 0
| null | 2016-06-03T09:42:43
| 2016-06-03T09:42:43
| null |
UTF-8
|
Python
| false
| false
| 1,836
|
py
|
# Copyright 2016 VMware, Inc.
#
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.objects.qos import policy as qos_policy
def update_network_policy_binding(context, net_id, new_policy_id):
# detach the old policy (if exists) from the network
old_policy = qos_policy.QosPolicy.get_network_policy(
context, net_id)
if old_policy:
if old_policy.id == new_policy_id:
return
old_policy.detach_network(net_id)
# attach the new policy (if exists) to the network
if new_policy_id is not None:
new_policy = qos_policy.QosPolicy.get_object(
context, id=new_policy_id)
if new_policy:
new_policy.attach_network(net_id)
def update_port_policy_binding(context, port_id, new_policy_id):
# detach the old policy (if exists) from the port
old_policy = qos_policy.QosPolicy.get_port_policy(
context, port_id)
if old_policy:
if old_policy.id == new_policy_id:
return
old_policy.detach_port(port_id)
# attach the new policy (if exists) to the port
if new_policy_id is not None:
new_policy = qos_policy.QosPolicy.get_object(
context, id=new_policy_id)
if new_policy:
new_policy.attach_port(port_id)
|
[
"asarfaty@vmware.com"
] |
asarfaty@vmware.com
|
96e6cf4dee890b0cf54958c1c5bf892d01bf4b23
|
8ba34c5c61105ef7e444d96ecfeb1fe049e16aee
|
/scripts/sofi.py
|
dc273143c2f7bb95e6dc6713bbaacc3b982f9e24
|
[] |
no_license
|
lifewinning/blackbox.finance
|
2bbc156155438c3e2190362721fac96b2c7e774e
|
fb484c76cfbf586b65d6347ef81190683c13d72c
|
refs/heads/master
| 2021-01-21T13:48:34.935530
| 2016-05-25T04:03:10
| 2016-05-25T04:03:10
| 53,981,462
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,717
|
py
|
#!/usr/bin/python
# MUST BE RUN AS ROOT (due to GPIO access)
#
# Required software includes Adafruit_Thermal, Python Imaging and PySerial
# libraries. Other libraries used are part of stock Python install.
#
# Resources:
# http://www.adafruit.com/products/597 Mini Thermal Receipt Printer
# http://www.adafruit.com/products/600 Printer starter pack
from __future__ import print_function
import RPi.GPIO as GPIO
import random, time, Image, socket
from Adafruit_Thermal import *
fanPin = 23
ledPin = 18
buttonPin = 25
printer = Adafruit_Thermal("/dev/ttyAMA0", 19200, timeout=5)
def sofi():
GPIO.output(fanPin, True)
x = random.randint(300,850)
value = ["totally great","not great at all","great enough","not especially great"]
v = random.choice(value)
printer.doubleHeightOn()
printer.setSize('L')
printer.justify('C')
printer.boldOn()
printer.feed(5)
printer.print(x)
printer.feed(3)
printer.print(v)
printer.feed(5)
def hold():
GPIO.output(ledPin, GPIO.HIGH)
sofi()
# Initialization
# Use Broadcom pin numbers (not Raspberry Pi pin numbers) for GPIO
GPIO.setmode(GPIO.BCM)
# Enable LED and button (w/pull-up on latter)
GPIO.setup(ledPin, GPIO.OUT)
GPIO.setup(fanPin, GPIO.OUT)
GPIO.output(fanPin, False)
GPIO.setup(buttonPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# LED on while working
GPIO.output(ledPin, GPIO.HIGH)
# Processor load is heavy at startup; wait a moment to avoid
# stalling during greeting.
time.sleep(5)
printer.feed(5)
while True:
if ( GPIO.input(buttonPin) == False ):
print("Printing")
sofi()
GPIO.output(fanPin, False)
print("Printed")
time.sleep(.1)
if KeyboardInterrupt:
GPIO.cleanup()
|
[
"lifewinning@gmail.com"
] |
lifewinning@gmail.com
|
4c7408245d0f7d1fcc88dbabb956dc8e46589020
|
5dccc472d1699f0ce0527c7c4966f937875452ae
|
/instagramy/__init__.py
|
af5d33c518798d232ac95c34d1de75261908424e
|
[
"MIT"
] |
permissive
|
gabcarvalhogama/instagramy
|
7dd2f8f79f7fb97cdbe164b99a1c158196a6485d
|
56aa3b3468942b3d2469075752784333b7ab6939
|
refs/heads/master
| 2023-04-01T22:28:48.208118
| 2021-04-08T03:25:29
| 2021-04-08T03:25:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
# -*- coding: utf-8 -*-
"""
Instagramy
~~~~~~~~~~
A python package for Instagram. It scarpe the Instagram
contents.
:license: MIT License
"""
__package__ = "instagramy"
__description__ = "A python package for Instagram. It scarpe the Instagram contents."
__url__ = "https://github.com/yogeshwaran01/instagramy"
__version__ = "4.3"
__author__ = "YOGESHWARAN R <yogeshin247@gmail.com>"
__license__ = "MIT License"
__copyright__ = "Copyright 2021 Yogeshwaran R"
__all__ = ["InstagramUser", "InstagramHashTag", "InstagramPost"]
from .InstagramUser import InstagramUser
from .InstagramPost import InstagramPost
from .InstagramHashTag import InstagramHashTag
|
[
"yogeshin247@gmail.com"
] |
yogeshin247@gmail.com
|
cace5e557a5ff1d7bc9aa35d633a24612384608f
|
3daceea1f6a57498a2dac48e8479cb21b34ee307
|
/inScribble/getpatientprescriptions/__init__.py
|
32c2ac99b9f6c6801c63fe2ca676301328602e5e
|
[
"MIT"
] |
permissive
|
shreybatra/inScribble
|
3e5452e3c23f5203f714206fa986c7ef0ceb2e50
|
c22446fb4a74fc2e6914ec3e3be60669fb5e12e1
|
refs/heads/master
| 2022-12-27T04:12:43.470749
| 2019-11-17T02:01:01
| 2019-11-17T02:01:01
| 222,119,968
| 0
| 1
|
MIT
| 2022-12-10T07:11:43
| 2019-11-16T15:35:57
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,655
|
py
|
import logging
import azure.functions as func
from ..common.response import make_response
from pymongo import MongoClient
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info("Python HTTP trigger function processed a request.")
email = req.params.get("email")
if not email:
return make_response({"msg": "Email not sent."}, 400)
client = MongoClient(
"mongodb://admin:admin@cluster0-shard-00-00-cty3m.azure.mongodb.net:27017,cluster0-shard-00-01-cty3m.azure.mongodb.net:27017,cluster0-shard-00-02-cty3m.azure.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin&retryWrites=true&w=majority"
)
db = client["inscribble"]
prescriptions = db["prescriptions"]
result = list(
prescriptions.aggregate(
[
{"$match": {"patientEmail": email}},
{
"$lookup": {
"from": "users",
"localField": "doctorEmail",
"foreignField": "email",
"as": "member",
}
},
{
"$project": {
"_id": 1,
"name": "$member.name",
"createdOn": 1,
}
},
{"$unwind": "$name"},
]
)
)
for doc in result:
doc["id"] = str(doc.pop("_id"))
doc["createdOn"] = int(doc["createdOn"].timestamp() * 1000)
return make_response({"data": result}, 200)
|
[
"shrey.batra@innovaccer.com"
] |
shrey.batra@innovaccer.com
|
e713fe8dc1c287ce74088bc73a770941fc8ed2b3
|
d5eb97923a877fca8dc195d3c5a5b12bac784f04
|
/PythonImageAnalysis/GeometricalAnalysisForSUN/FullGeometricalFeaturesAnalysisForSUN.py
|
1fc99c16af51d2cac9db29d16a0267026c8d9b66
|
[] |
no_license
|
manf1984/RemotePythonImageAnalysis
|
60acc65ecf66b96d57a655b88d2949e8911421aa
|
737af0dc68f0ede999fab14ad8b87fdae05f387f
|
refs/heads/master
| 2022-11-20T13:04:43.269068
| 2020-07-05T18:18:48
| 2020-07-05T18:18:48
| 256,361,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,226
|
py
|
'''
Created on 5 Jul 2020
@author: Andrea Manfrin
'''
#Import of necessary Modules and Packages:
import os
import re
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import skimage
from skimage.measure import label, regionprops
import skimage.filters as skf
import skimage.morphology as skm
from skimage.measure._regionprops import RegionProperties
from cellpose.models import Cellpose
import math
from scipy import stats
from tifffile import TiffFile
from tifffile import TiffWriter
from scipy.ndimage.morphology import binary_fill_holes as fillHoles
import timeit
import pandas as pd
#This is the folder that contains all the folders with the files of the extracted microwells and segmented aggregates:
path = "/path/to/your/folder"
#Calculate the conversion factor pixel/micrometers:
pixelLength = 9723.41/13442 #Length in micrometers of a pixel (along one dimension). Total width in micrometers/total width in pixels
pixelArea = pixelLength**2 #Area, in square micrometers, of a pixel.
#This function takes a "mask" file path and returns the geometrical properties (only if the mask is composed of
#exactly 1 particle, otherwise it returns a None type object). It returns a tuple containing the file name (e.g "01")
#as first element and a "skimage.measure._regionprops.RegionProperties" object as second element of the tuple!
def extractGeometryFromFile(file : str = ""):
myFile = file
mask = None
with TiffFile(myFile) as tif:
mask = tif.asarray()
fileName = None
listOfParticleProps = []
sepPatt = re.compile(os.sep)
tifPatt = re.compile(r".tif+$")
mo1 = re.split(sepPatt, myFile)
mo2 = re.split(tifPatt, mo1[-1])
fileName = mo2[0]
maskParticles = label(mask)
props = regionprops(maskParticles)
if len(props) == 1:
return (fileName, props[0])
else:
return None
#This functions takes the path to a "/Mask" folder and applies the function "extractGeometryFromFile" to all the
#"mask" files contained in it. It then returns a list of "skimage.measure._regionprops.RegionProperties" objects,
#one per file.
def processAWellFolder(folder : str = "") -> list:
myFolder = Path(folder)
filePatt = re.compile(r"^.*\.tif+$")
fileList = [file.as_posix() for file in myFolder.iterdir() if file.is_file() and re.search(filePatt, file.as_posix())]
fileList.sort()
listOfParticles = []
for file in fileList:
result = extractGeometryFromFile(file)
if result != None:
listOfParticles.append(result)
else:
pass
return listOfParticles
#This function takes the path of the folder containing all the folders of each well. It makes use of the function
#"processAWellFolder" to process all the files contained in each folder (remeber that you have to add "/Mask"
#to the path for "processAWellFolder"). It returns a dictionary where the "key" is a string corresponding to the
#“folder/well name“ and it cotains as unique value (for each key) a list of all the "RegionProperties" objects associated
#with that folder/well:
def processAllFolders(path : str = "") -> dict:
mainPath = Path(path)
folderList = [folder.as_posix() for folder in mainPath.iterdir() if folder.is_dir()]
folderList.sort()
dictionaryAllWells = dict()
for folder in folderList:
maskPath = folder + "/Masks"
separator = re.compile(os.sep)
mo = re.split(separator, folder)
wellName = mo[-1]
regionsList = processAWellFolder(maskPath)
dictionaryAllWells[wellName] = regionsList
return dictionaryAllWells
#It will be cool now to have a function that takes the dictionary, and build out of it a Pandas.DataFrame with the
#value of all the geometrical parameters I want to analyze and a label corresponding to the name of the well/folder
#in which the original file was stored. The function return this DataFrame.
def createDataFrame(dictionary : dict = dict()):
myDict = dictionary
tempDict = {"Sample" : [], "Date" : [], "File_name" : [], "Area" : [], "Eccentricity" : [], "Major_Axis" : [],
"Minor_Axis" : [], "Perimeter" : [], "Solidity" : [], "Plate_format" : [], "Treatment" : [],
"Microwell_type" : [], "Microwell_diameter" : [], "Staining" : [], "Diameter_Of_Circle" : [],
"Repetition" : []}
for label in myDict:
sepPatt = re.compile(r"_")
mo = re.split(sepPatt, label)
Date = mo[0]
Plate_format = mo[1]
Microwell_type = mo[2]
Microwell_diameter = mo[3]
Staining = mo[4]
Treatment = mo[5]
Repetition = mo[6]
for elems in myDict[label]:
tempDict["Sample"].append(label)
tempDict["Date"].append(Date)
tempDict["Plate_format"].append(Plate_format)
tempDict["Microwell_type"].append(Microwell_type)
tempDict["Microwell_diameter"].append(Microwell_diameter)
tempDict["Staining"].append(Staining)
tempDict["Treatment"].append(Treatment)
tempDict["Repetition"].append(Repetition)
#Extract from the tuple created by "extractGeometryFromFile" function the file name (= first element
#of the tuple):
tempDict["File_name"].append(elems[0])
#Extract all the geometrical parameters that are part of the "RegionProperties" object, which is the
#second element of the tuple:
tempDict["Area"].append(elems[1].area)
tempDict["Eccentricity"].append(elems[1].eccentricity)
tempDict["Major_Axis"].append(elems[1].major_axis_length)
tempDict["Minor_Axis"].append(elems[1].minor_axis_length)
tempDict["Perimeter"].append(elems[1].perimeter)
tempDict["Solidity"].append(elems[1].solidity)
tempDict["Diameter_Of_Circle"].append(elems[1].equivalent_diameter)
#Create the Pandas.DataFrame from "tempDict":
myData = pd.DataFrame(tempDict)
#Process the values in "myData" DataFrame:
#Calculate the "Circularity" parameter ((4*pi*Area)/(Perimeter^2)):
myData["Circularity"] = (4*math.pi*myData["Area"]) / (myData["Perimeter"]**2)
#Calculate the "Proper_Roundness", the one defined by this formula ( (4*Area)/(pi*Major_Axis^2) ):
myData["Roundness"] = (4*myData["Area"]) / (math.pi*(myData["Major_Axis"]**2))
#Convert all the values that are in pixel-dimensions to micrometer-dimensions:
myData["Area"] = myData["Area"]*pixelArea
myData["Major_Axis"] = myData["Major_Axis"]*pixelLength
myData["Minor_Axis"] = myData["Minor_Axis"]*pixelLength
myData["Perimeter"] = myData["Perimeter"]*pixelLength
myData["Diameter_Of_Circle"] = myData["Diameter_Of_Circle"]*pixelLength
myData = myData[["Sample", "File_name", "Date", "Plate_format", "Microwell_type", "Microwell_diameter", "Staining", "Treatment",
"Repetition", "Area", "Perimeter", "Diameter_Of_Circle", "Roundness", "Major_Axis", "Minor_Axis", "Circularity", "Eccentricity",
"Solidity"]]
return myData
#EXECUTE THE CODE:
#Create the DataFrame:
myDictionary = processAllFolders(path)
myData = createDataFrame(myDictionary)
print(myData)
#This functions takes a proper Pandas.DataFrame and represent the data in it in form of Seaborn violinplots:
def violinPlotGeometry(df, xName : str = "Sample", yName : str = "", hueStr : str = None, fileName : str = "ViolinPlot"):
fig, ax = plt.subplots(1)
fig.suptitle(yName)
sns.violinplot(x = xName, y = yName, data = df, ax = ax, hue = hueStr)
ax.set_xticklabels(ax.get_xticklabels(), rotation = 45, horizontalalignment = "right")
fig.set_size_inches(12, 6)
plt.show();
fig.savefig(path + os.sep + fileName + ".pdf", dpi = 300, format = "pdf", bbox_inches = "tight")
return fig, ax
#This functions takes a proper Pandas.DataFrame and represent the data in it in form of Seaborn barplots
#(average as height of bars and standard deviation reported as a line):
def barPlotGeometry(df, xName : str = "Sample", yName : str = "", hueStr : str = None, fileName : str = "BarPlot"):
fig, ax = plt.subplots(1)
fig.suptitle(yName)
sns.barplot(x = xName, y = yName, data = df, ci = "sd", hue = hueStr)
ax.set_xticklabels(ax.get_xticklabels(), rotation = 45, horizontalalignment = "right")
ax.set_ylabel(ax.get_ylabel() + " (Mean and SD)")
fig.set_size_inches(12, 6)
plt.show();
fig.savefig(path + os.sep + fileName + ".pdf", dpi = 300, format = "pdf", bbox_inches = "tight")
return fig, ax
#Strip plot function:
def stripPlotGeometry(df, xName : str = "Sample", yName : str = "", hueStr : str = None, fileName : str = "StripPlot"):
fig, ax = plt.subplots(1)
fig.suptitle(yName)
sns.stripplot(x = xName, y = yName, hue = hueStr, data = df, jitter = True, ax = ax)
ax.set_xticklabels(ax.get_xticklabels(), rotation = 45, horizontalalignment = "right")
fig.set_size_inches(12, 6)
plt.show();
fig.savefig(path + os.sep + fileName + ".pdf", dpi = 300, format = "pdf", bbox_inches = "tight")
return fig, ax
#Swarm plot function:
def swarmPlotGeometry(df, xName : str = "Sample", yName : str = "", hueStr : str = None, fileName : str = "SwarmPlot"):
fig, ax = plt.subplots(1)
fig.suptitle(yName)
sns.swarmplot(x = xName, y = yName, hue = hueStr, data = df, ax = ax)
ax.set_xticklabels(ax.get_xticklabels(), rotation = 45, horizontalalignment = "right")
fig.set_size_inches(12, 6)
plt.show();
fig.savefig(path + os.sep + fileName + ".pdf", dpi = 300, format = "pdf", bbox_inches = "tight")
return fig, ax
def boxPlotGeometry(df, xName : str = "Sample", yName : str = "", hueStr : str = None, fileName : str = "BoxPlot"):
fig, ax = plt.subplots(1)
fig.suptitle(yName)
sns.boxplot(x = xName, y = yName, hue = hueStr, data = df, ax = ax)
ax.set_xticklabels(ax.get_xticklabels(), rotation = 45, horizontalalignment = "right")
fig.set_size_inches(12, 6)
plt.show();
fig.savefig(path + os.sep + fileName + ".pdf", dpi = 300, format = "pdf", bbox_inches = "tight")
return fig, ax
#CREATE THE PLOTS:
#Violin plot:
violinPlotGeometry(myData, yName = "Area")
#Various Bar plots:
barPlotGeometry(myData, yName = "Area")
barPlotGeometry(myData, yName = "Area", hueStr = "Plate_format", fileName = "BarPlotByPlateFormat")
barPlotGeometry(myData, xName = "Microwell_diameter", yName = "Area", hueStr = "Plate_format", fileName = "BarPlotByPlateFormatAndMicrowellDiameter")
#Strip plot:
stripPlotGeometry(myData, yName = "Area")
#Swarm plot:
swarmPlotGeometry(myData, yName = "Area")
#Box plot:
boxPlotGeometry(myData, yName = "Area")
#Here you can calculate various parameters from "myData" DataFrame:
#Average Area per Sample:
sampleGroup = myData.groupby("Sample")
sampleGroup.agg(["mean", "std"])
summary = sampleGroup.agg(["mean", "std"])
myData.to_csv(path + os.sep + "Data.csv", index = False)
summary.to_csv(path + os.sep + "DataSummary.csv", index = True)
|
[
"andrea@localhost"
] |
andrea@localhost
|
dbe0942ebb3c4ee44c4c49ccca899603e66decd1
|
d453b2914128b4d8d83eb5e7a5c01a383a3b0299
|
/django_project/blog/forms.py
|
a4a544470c340d40647fcf26a60770a2d497398f
|
[] |
no_license
|
geegatomar/Django-DBMS
|
441ca47ee0462e03870677b087055bc93bd13cac
|
132e124a9a8218bbff50a52d4154d950a3d2ad9f
|
refs/heads/master
| 2023-03-31T01:56:24.592722
| 2021-03-31T09:20:30
| 2021-03-31T09:20:30
| 344,864,384
| 0
| 1
| null | 2021-03-31T08:43:45
| 2021-03-05T16:12:06
|
Python
|
UTF-8
|
Python
| false
| false
| 379
|
py
|
from django import forms
from .models import Items, ItemsCart
class ItemsForm(forms.ModelForm):
class Meta:
model = Items
fields = [
"item_name",
"item_type",
"item_details",
"item_price",
]
class ItemsCartForm(forms.ModelForm):
class Meta:
model = ItemsCart
fields = [
]
|
[
"djoshithareddy@gmail.com"
] |
djoshithareddy@gmail.com
|
492d20da99257954825f5906068e71391ad96673
|
1b9f547ccc303f5505f1471980329d71c226ce49
|
/Proba/wsgi.py
|
9e80bea2ed132c11d2deed990770069cfd4e52a4
|
[] |
no_license
|
Sergikv/my-first-blog
|
e581cf54f1033544f0739a421071068b97d41a3a
|
04ced896cdb12d8ebe2184a9272c21a18a6d3d02
|
refs/heads/master
| 2020-04-05T04:07:50.638330
| 2018-11-07T11:13:40
| 2018-11-07T11:13:40
| 156,538,583
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
"""
WSGI config for Proba project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Proba.settings')
application = get_wsgi_application()
|
[
"sergikv@rambler.ru"
] |
sergikv@rambler.ru
|
b825071f64c76fbd90187acb619a61e11ec0eb88
|
2cb133bca85eb5e8d1a224ec72e8783f62b31f3b
|
/72_inheritance.py
|
886d46a2661b206d7398ae4ccb8eb8d28757b725
|
[] |
no_license
|
KarmanyaT28/Python-Code-Yourself
|
9739e424eef64b14742a959140a51d5b0e4508ff
|
84081354232456842ee5fddba4d6c417cc94a75d
|
refs/heads/main
| 2023-08-15T03:08:30.192210
| 2021-10-12T11:40:38
| 2021-10-12T11:40:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
class Person(object):
def __init__(self,name):
self.name = name
def getName(self):
return self.name
def isEmployee(self):
return False
#Inherited class
class Employee(Person):
# Here we return true
def isEmployee(self):
return True
emp = Person("anyone") # An Object of Person
print(emp.getName(), emp.isEmployee())
emp = Employee("karmanya") # An Object of Employee
print(emp.getName(), emp.isEmployee())
|
[
"noreply@github.com"
] |
KarmanyaT28.noreply@github.com
|
6d688a3c14986fda73b9177746945be47f60e40b
|
e60c2fd49adaac61de0f5df658927b3b050cc917
|
/hw4/matrix_topdown.py
|
d09849d2d5f9434118620aa03bba449a2bd89000
|
[] |
no_license
|
scordata/spring2014
|
5c799d033dbd60b2deea2b64c9522e063e305d44
|
7d2977ffc503d514fd42547726e673ae8e34480a
|
refs/heads/master
| 2020-06-12T20:44:20.328796
| 2014-05-25T01:42:08
| 2014-05-25T01:42:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,476
|
py
|
__author__ = "Adam Najman"
#__sources__ == "CLRS"
"""
Adam Najman
Python implementation of top-down (recursive)
Matrix-chain Multiplication
Adapted from CLRS
"""
import re
import sys
def print_best(s, i, j):
if i == j:
sys.stdout.write("A%i" % i)
else:
sys.stdout.write("(")
print_best(s, i, s[i][j])
print_best(s, s[i][j]+1, j)
sys.stdout.write(")")
def rec_matrix_order(p):
n = len(p) - 1
m = [[float("inf") for y in xrange(n + 1)] \
for x in xrange(n + 1)]
#print m
return lookup(m, p, 1, n)
def lookup(m, p, i, j):
if m[i][j] < float("inf"):
return m[i][j]
elif i == j:
m[i][j] = 0
else:
for k in xrange(i, j):
q = ( lookup(m, p, i, k) + \
lookup(m, p, k + 1, j) + \
( p[i-1]*p[k]*p[j] ) )
if q < m[i][j]:
m[i][j] = q
return m[i][j]
f = sys.stdin#open('input.txt', 'rb')
times = int(f.readline())
delim = " ", "x", "\n"
patern = '|'.join(map(re.escape, delim))
#print "patern is: ", patern
for x in f:
foo = re.split(patern, x)
foo = foo[:-1]
#print foo
matrix = []
for y in xrange(len(foo)):
if y % 2 == 0:
bar = (int(foo[y]), int(foo[y+1]))
matrix.append(bar)
print "matrix is: "
print matrix
param = []
for e in matrix:
param.append(e[0])
if e == matrix[-1]:
param.append(e[1])
print "\n"
print "rec: "
print rec_matrix_order(param)
|
[
"najman@gmail.com"
] |
najman@gmail.com
|
126ba0d7d136420efb359711f3b449175e9aee3b
|
8004831758776360a421b6cb458b48a120d1586e
|
/chapter_3/your_own_list.py
|
360b62f50d4d4a392f2b592c3873c2ae7afa70b0
|
[] |
no_license
|
scott-gordon72/python_crash_course
|
025d15952d7372c2a40780b7038008f9b39c42d2
|
605f1f7e8d90534bd9cb63f44098d95dec739e50
|
refs/heads/master
| 2022-06-04T19:29:40.142291
| 2020-04-25T01:13:48
| 2020-04-25T01:13:48
| 255,947,783
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
automobiles = ['lexus', 'bmw', 'tesla', 'acura']
print(f"I would like to own a {automobiles[0].title()} automobile.")
print(f"I would like to own a {automobiles[1].title()} automobile.")
print(f"I would like to own a {automobiles[2].title()} automobile.")
print(f"I would like to own a {automobiles[3].title()} automobile.")
|
[
"scott.gordon72@outlook.com"
] |
scott.gordon72@outlook.com
|
72218b87d882051997aef5524f192134bc7d09f1
|
c500eab7d2d7504e337eb9dc6ebb12fb0765321e
|
/werewolf/commands/base.py
|
456f59f611b167e864b929aa2fb7d39c0c0b2e92
|
[
"Apache-2.0"
] |
permissive
|
jan-g/slackwolf
|
7316ad02b2817923145afd5524d49d5578f0699e
|
7da027b87d79d663fe88e2e8301a748b8fcf6951
|
refs/heads/master
| 2020-06-08T15:22:29.428811
| 2019-08-23T14:56:40
| 2019-08-23T14:57:40
| 193,251,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,049
|
py
|
import logging
from ..service import Agent
LOG = logging.getLogger(__name__)
def match_agent(agents=None, any_agent=False):
def match(item):
if isinstance(item, Agent) and (any_agent or item in agents):
return item
elif isinstance(item, str):
matches = {agent
for agent in agents
if agent.name.lower().startswith(item.lower())
or agent.real_name.lower().startswith(item.lower())}
LOG.debug("matching agents for %s amongst %r", item, agents)
if len(matches) == 1:
return matches.pop()
return match
class BaseCommand:
def welcome(self, srv=None, game=None, role=None):
"""Send an introductory message, if appropriate"""
pass
def on_message(self, srv=None, game=None, role=None, channel=None, text=None):
"""When a message is received by a player in a particular channel, handle it
Text turns up as already split ready for matching."""
pass
def ready(self, srv=None, game=None):
"""As a phase starts, these steps are called in order
They should ready the scratchpad if required."""
def resolve(self, srv=None, game=None):
"""As a phase exits, these resolution phases are called in order
They should update the scratchpad if required.
Then they should make any reports required.
Return None if there is no winner; otherwise, return the winner's side.
The first resolution mechanism to declare a winner will be the value taken."""
return None
def is_relevant(self, game=None):
return game.current_phase['handler'].action_relevant(game=game, command=self)
def notice(self, game=None, running=True):
"""Return any text to add to the day's notice
`running` will be True if this phase is still going,
or False if the phase is concluded.
Return None - or Text to be appended to the game notice."""
return None
|
[
"jan.grant@oracle.com"
] |
jan.grant@oracle.com
|
9facc7b7965e5fd143564b4f208b64108527bd39
|
f074fe742b1b242c75872e64a1196ba539066609
|
/verres/data/event/__init__.py
|
289584da344e726d716467aa82a5d63b4dae8cf5
|
[
"MIT"
] |
permissive
|
csxeba/Verres
|
63f31d82b4e12b4da8b5fa5cc930eb874f1aa60d
|
04230d22b7791f84d86b9eb2272a6314a27580ed
|
refs/heads/master
| 2023-04-07T02:07:26.863612
| 2021-12-09T20:15:12
| 2021-12-09T20:15:12
| 176,881,974
| 0
| 0
|
MIT
| 2023-03-18T19:56:38
| 2019-03-21T06:17:58
|
Python
|
UTF-8
|
Python
| false
| false
| 37
|
py
|
from .video2events import DiffStream
|
[
"noreply@github.com"
] |
csxeba.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.