blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a1df8914c35f5e949416165a0782c85926e4e9f7
|
2cf9f165cb4d6e8e9009d74b43020fe2d5c1964f
|
/chat/migrations/0001_initial.py
|
a2db3bb2cfb40b358f72ef4113b611bc648018b0
|
[] |
no_license
|
jimy1824/chat
|
29f5039c6284083b8328502932795bee586dec14
|
627ad4678c6215d37322737b38b3e5eb6d69696f
|
refs/heads/master
| 2023-04-27T15:10:51.316824
| 2019-11-08T05:00:12
| 2019-11-08T05:00:12
| 220,081,959
| 1
| 0
| null | 2023-04-21T20:40:47
| 2019-11-06T20:17:20
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,194
|
py
|
# Generated by Django 2.2.6 on 2019-11-06 19:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('message', models.TextField()),
('receiver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_receiver', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_sender', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
|
[
"jamshaid1824@gmail.com"
] |
jamshaid1824@gmail.com
|
53ff9198dfe47a214fb9a2f6bd7b7738da26a9cf
|
dfe78d0296c2f2497d323bc6d0516e3d03f6e5c3
|
/learning_templates/app1/urls.py
|
ec7eefe116f7b638f3fae7bd3beb5c4ebd257c51
|
[] |
no_license
|
tekam47/django-deployement-trial
|
49ac22a6683d0156d694ec67432bdc8fa21e2514
|
c96b4bb8ef7104b6dbab2bd33b4fce65c627925a
|
refs/heads/master
| 2022-11-21T14:47:16.097166
| 2020-07-22T10:54:03
| 2020-07-22T10:54:03
| 281,641,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
from app1 import views
from django.conf.urls import url
#TEMPLATE TAGGING
app_name = "app1"
urlpatterns = [
url(r'^other',views.other,name="other"),
url(r'^relative',views.relative,name="relative"),
]
|
[
"kounatzepi@gmail.com"
] |
kounatzepi@gmail.com
|
3720c2cfb59920028d138cfe49a9a780696b3a31
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03804/s226430328.py
|
40ea2fbbe5322fb1e9b734e857d574fcafee112b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 532
|
py
|
N, M = map(int, input().split())
A = [input() for _ in range(N)]
B = [input() for _ in range(M)]
for i in range(N - M + 1):
for j in range(N - M + 1):
check = True
count = 0
for k in range(M):
if (A[i + k][j: j + M] == B[k]):
# print(A[i + k][j:j + M], B[k])
count += 1
continue
else:
check = False
break
if (check and count == M):
print('Yes')
exit()
print('No')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
3b930951163c7e85c90c1df23c7a762af58fc623
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Projects/pyinstaller/tests/unit/test_modulegraph/testpkg-relimport/pkg/relimport.py
|
0e76e39fedf777016e011f2daa0a233827236b57
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 127
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:b1c982ab98ef0425093ad8bafe4c9deb7aad009260b384b762696e8fc4c0e375
size 22
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
a6db9d0ebe8e9a8f0ab0a1cacff578441a2234ba
|
4fcfc6834f598954e069e9481e4f69d6f7205f3b
|
/Week1/day_3/Flask_Intro/first_flask_app/server.py
|
a7693151c6981a66648352d4cf8857f87de47de7
|
[] |
no_license
|
sadieBoBadie/jan_2020_python_stack
|
dadc77a8a76fd4b900bc31ad83ed4680a0802aa1
|
b91da3da23ea57a27c086075f4b86c5bfec413d0
|
refs/heads/main
| 2023-03-02T22:27:46.877290
| 2021-01-26T19:04:57
| 2021-01-26T19:04:57
| 326,852,429
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def puppies():
return "<h1 style='color: red'>Puppies are cute!</h1>"
@app.route('/<animal>/<color>')
@app.route('/<animal>')
@app.route('/<animal>/<color>/<int:num>')
def display_animal(animal, color="blue", num=5):
print(f"Animal: {animal}")
print(f"Color: {color}")
print("Type of the num var: ", type(num))
return render_template('index.html', animal=animal, color=color, num=num)
if __name__=="__main__":
app.run(debug=True)
|
[
"sflick@codingdojo.com"
] |
sflick@codingdojo.com
|
3a7c1d7adfb59f00b11ae77e1d37b1885d33f881
|
d1ad7bfeb3f9e3724f91458277284f7d0fbe4b2d
|
/react/003-react-django-justdjango/backend/env/bin/sqlformat
|
b08eaac3345a9fc3b0a7dbb48e6607276b57395a
|
[] |
no_license
|
qu4ku/tutorials
|
01d2d5a3e8740477d896476d02497d729a833a2b
|
ced479c5f81c8aff0c4c89d2a572227824445a38
|
refs/heads/master
| 2023-03-10T20:21:50.590017
| 2023-03-04T21:57:08
| 2023-03-04T21:57:08
| 94,262,493
| 0
| 0
| null | 2023-01-04T21:37:16
| 2017-06-13T22:07:54
|
PHP
|
UTF-8
|
Python
| false
| false
| 307
|
#!/Users/kamilwroniewicz/_code/_github/_tutorials/react/003-react-django-justdjango/backend/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"qu4ku@hotmail.com"
] |
qu4ku@hotmail.com
|
|
dc3f8793d740e0cf8d825bacb6e97764c8c288b2
|
be20ff4fe04864c6f48317e9bbebdf6546358caa
|
/Enthought/exercises/python_language/roman_dictionary/roman_dictionary_solution.py
|
9e3df15d356eb328498ddce6f8d12cd904c5c386
|
[] |
no_license
|
cardsrock10/Python-Training
|
3267e20ee9e70683b0daba0007e87aaf4acf5022
|
7bc83cdd6955cb1498e6f391ce9274d4c75a0a3b
|
refs/heads/master
| 2021-04-15T11:56:52.197773
| 2018-03-23T23:09:27
| 2018-03-23T23:09:27
| 126,546,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,311
|
py
|
"""
Roman Dictionary
----------------
Mark Antony keeps a list of the people he knows in several dictionaries
based on their relationship to him::
friends = {'julius': '100 via apian', 'cleopatra': '000 pyramid parkway'}
romans = dict(brutus='234 via tratorium', cassius='111 aqueduct lane')
countrymen = dict([('plebius','786 via bunius'),
('plebia', '786 via bunius')])
1. Print out the names for all of Antony's friends.
2. Now all of their addresses.
3. Now print them as "pairs".
4. Hmmm. Something unfortunate befell Julius. Remove him from the
friends list.
5. Antony needs to mail everyone for his second-triumvirate party. Make
a single dictionary containing everyone.
6. Antony's stopping over in Egypt and wants to swing by Cleopatra's
place while he is there. Get her address.
7. The barbarian hordes have invaded and destroyed all of Rome.
Clear out everyone from the dictionary you created in step 5.
"""
friends = {'julius': '100 via apian', 'cleopatra': '000 pyramid parkway'}
romans = dict(brutus='234 via tratorium', cassius='111 aqueduct lane')
countrymen = dict([('plebius','786 via bunius'), ('plebia', '786 via bunius')])
# Print out the names for all of Antony's friends:
print 'friend names:', friends.keys()
print
# Now all of their addresses:
print 'friend addresses:', friends.values()
print
# Now print them as "pairs":
print 'friend (name, address) pairs:', friends.items()
print
# Hmmm. Something unfortunate befell Julius. Remove him from the friends
# list:
del friends['julius']
# Antony needs to mail everyone for his second-triaumvirate party. Make a
# single dictionary containing everyone:
mailing_list = {}
mailing_list.update(friends)
mailing_list.update(romans)
mailing_list.update(countrymen)
print 'party mailing list:'
print mailing_list
print
# Or, using a loop (which we haven't learned about yet...):
print 'party mailing list:'
for name, address in mailing_list.items():
print name, ':\t', address
print
# Antony's stopping over in Egypt and wants to swing by Cleopatra's place
# while he is there. Get her address:
print "Cleopatra's address:", friends['cleopatra']
# The barbarian hordes have invaded and destroyed all of Rome. Clear out
# everyone from the dictionary:
mailing_list.clear()
|
[
"rmbirmi@srn.sandia.gov"
] |
rmbirmi@srn.sandia.gov
|
edd4a21019c7cc1c1b9b7eb3abe2e8acb32f929b
|
d2b0c67b919783cceb58bc25ae0b18dc7d4ce892
|
/ExoCTK/tor/contam_tool/f_visibilityPeriods.py
|
48b7d3824ab4feb7b5673f3b13fa18fcd8eadd15
|
[] |
no_license
|
natashabatalha/ExoCTK
|
7cff16184bd999e5eb50e1c935e12020594c8e50
|
7b996f77fd7b87eac381ca396877bda4121f18a8
|
refs/heads/master
| 2021-07-01T01:47:51.028537
| 2017-09-07T18:32:53
| 2017-09-07T18:32:53
| 106,414,418
| 2
| 0
| null | 2017-10-10T12:30:31
| 2017-10-10T12:30:30
| null |
UTF-8
|
Python
| false
| false
| 17,229
|
py
|
# =====================================================================================
# Series of functions to compute the visibility periods for a given (RA,DEC) with
# in some cases the possibility to select a PA value.
#
# Functions derived from the code of Wayne Kinzel provided by Jeff Valenti
# Extract from the e-mail of Wayne Kinzel:
# As before, the code is not officially tested, nor is it an official STScI product.
# Users should be warned that the apparent position of the Sun changes ~+/-0.2 degrees
# depending upon where JWST is in its orbit. So do not rely strongly on these results
# if the target is within ~0.2 degrees of |ecliptic latitude| 45 degrees or 85 degrees.
# For example if a target is at 84.9 degrees latitude and the tool says it is CVZ, it
# may not be with the operational orbit.
#
# =====================================================================================
import sys
import math
import ephemeris_old2x
D2R = math.pi / 180. #degrees to radians
R2D = 180. / math.pi #radians to degrees
PI2 = 2. * math.pi # 2 pi
def f_computeVisibilityPeriods(ephemeris, mjdmin, mjdmax, ra, dec):
'''
# -----------------------------------------------------------
# METHOD f_computeVisibilityPeriods()
# TYPE function
#
# DESCRIPTION function that will compute the visibility
# periods for a given (RA,DEC) over a given
# time period.
#
# SYNTAX f_computeVisibilityPeriods(ephemeris, mjdmin,
# mjdmax, ra, dec)
#
# ephemeris: input ephemeris object
# mjdmin: beginning of the search interval (modified
# Julian date). It must be covered by the ephemeris.
# mjdmax: end of the search interval (modified
# Julian date). It must be covered by the ephemeris.
# ra: input RA coordinate (equatorial coordinate, in rad)
# dec: input DEC coordinate (equatorial coordinate, in rad)
#
# Returns two lists containing the start end end of each
# visibility period and a list containing a status flag:
# flag = 0 visibility period fully in the search interval
# flag = -1 start of the visibility period truncated by
# the start of the search interval
# flag = -2 end of the visibility period truncated by
# the end of the search interval
# flag = +1 the search interval is fully included in
# the visibility period
#
# -----------------------------------------------------------
'''
# ===========================================================
# Paranoid checks
# ===========================================================
# print "# RA = {:12.8f} rad = {:12.8f} deg".format(ra, ra / D2R)
# print "# DEC = {:12.8f} rad = {:12.8f} deg".format(dec, dec / D2R)
# print"# No constraint on the PA."
if (ephemeris.amin > mjdmin):
print("f_computeVisibilityPeriods(): the start of the search interval is not covered by the ephemeris.")
print("Ephemeris start date (modified Julian date): {:8.5f}".format(ephemeris.amin))
print("Search interval start date (modified Julian date): {:8.5f}".format(mjdmin))
raise ValueError
if (ephemeris.amax < mjdmax):
print("f_computeVisibilityPeriods(): the end of the search interval is not covered by the ephemeris.")
print("Ephemeris end date (modified Julian date): {:8.5f}".format(ephemeris.amax))
print("Search interval end date (modified Julian date): {:8.5f}".format(mjdmax))
raise ValueError
# ===========================================================
# Scanning the search period
# ===========================================================
# Flag used to track the beginning and the end of a
# visibility period
iflip = False
wstart = mjdmin
startList = []
endList = []
statusList = []
# Scannning step size (must be small enough to make sure that
# it cannot contain a full vsibility period (we would miss
# it)
scanningStepSize = 0.1
span = int((mjdmax - mjdmin) / scanningStepSize)
# Initialisation (first step of the scan is outside from the
# loop
iflag_old = ephemeris.in_FOR(mjdmin,ra,dec)
for i in range(span):
# Current date (the last step may be partial to remain
# within the search interval
currentdate = mjdmin + (i + 1) * scanningStepSize
if (currentdate >= mjdmax):
currentdate = mjdmax
iflag = ephemeris.in_FOR(currentdate, ra, dec)
# Checking if we are reaching the beginning or the end of a visibility period
# (in which case the iflag value will change)
if iflag != iflag_old:
# Setting the iflip flag to True to keep track of the change (in order to
# detect CVZ object which are permanenetly visible)
# If iflag = True we are starting a visibility period and use a bisection method
# to find the exact transition date. This assumes that there is a single
# transition in the interval => it looks like a step size of 0.1 day is
# sufficient to ensure that.
if (iflag):
wstart = ephemeris.bisect_by_FOR(currentdate, currentdate-scanningStepSize, ra, dec)
# IF iflag = False we are reaching the end of a visibility period.
# Like for the previous case a bisection method is used to locate
# accurately the end of the visibility period.
else:
wend = ephemeris.bisect_by_FOR(currentdate-scanningStepSize, currentdate, ra, dec)
startList.append(wstart)
endList.append(wend)
if (iflip):
statusList.append(0)
else:
statusList.append(-1)
iflip = True
iflag_old = iflag
# If there was a transition and we end up with a valid date, we close the interval with the
# end of the search interval
if (iflag and iflip):
startList.append(wstart)
endList.append(currentdate)
statusList.append(-2)
# There is also the case were the visibility period covers the complete search interval
if (iflag and (not iflip)):
startList.append(mjdmin)
endList.append(mjdmax)
statusList.append(1)
# End of the function
return startList, endList, statusList
def f_computeVisibilityPeriodsWithPA(ephemeris, mjdmin, mjdmax, ra, dec, pa):
'''
# -----------------------------------------------------------
# METHOD f_computeVisibilityPeriodsWithPA()
# TYPE function
#
# DESCRIPTION function that will compute the visibility
# periods for a given (RA,DEC), a given PA and
# over a given time period.
#
# SYNTAX f_computeVisibilityPeriodsWithPA(ephemeris, mjdmin,
# mjdmax, ra, dec, pa)
#
# ephemeris: input ephemeris object
# mjdmin: beginning of the search interval (modified
# Julian date). It must be covered by the ephemeris.
# mjdmax: end of the search interval (modified
# Julian date). It must be covered by the ephemeris.
# ra: input RA coordinate (equatorial coordinate, in rad)
# dec: input DEC coordinate (equatorial coordinate, in rad)
# pa: input PA (in rad)
#
# Returns two lists containing the start end end of each
# visibility period and a list containing a status flag:
# flag = 0 visibility period fully in the search interval
# flag = -1 start of the visibility period truncated by
# the start of the search interval
# flag = -2 end of the visibility period truncated by
# the end of the search interval
# flag = +1 the search interval is fully included in
# the visibility period
#
# -----------------------------------------------------------
'''
# ===========================================================
# Paranoid checks
# ===========================================================
# print "# RA = {:12.8f} rad = {:12.8f} deg".format(ra, ra / D2R)
# print "# DEC = {:12.8f} rad = {:12.8f} deg".format(dec, dec / D2R)
# print"# No constraint on the PA."
if (ephemeris.amin > mjdmin):
print("f_computeVisibilityPeriodsWithPA(): the start of the search interval is not covered by the ephemeris.")
print("Ephemeris start date (modified Julian date): {:8.5f}".format(ephemeris.amin))
print("Search interval start date (modified Julian date): {:8.5f}".format(mjdmin))
raise ValueError
if (ephemeris.amax < mjdmax):
print("f_computeVisibilityPeriodsWithPA(): the end of the search interval is not covered by the ephemeris.")
print("Ephemeris end date (modified Julian date): {:8.5f}".format(ephemeris.amax))
print("Search interval end date (modified Julian date): {:8.5f}".format(mjdmax))
raise ValueError
# ===========================================================
# Scanning the search period
# ===========================================================
# Flag used to track the beginning and the end of a
# visibility period
iflip = False
wstart = mjdmin
startList = []
endList = []
statusList = []
# Scannning step size (must be small enough to make sure that
# it cannot contain a full vsibility period (we would miss
# it)
scanningStepSize = 0.1
span = int((mjdmax - mjdmin) / scanningStepSize)
# Initialisation (first step of the scan is outside from the
# loop
iflag_old = ephemeris.is_valid(mjdmin, ra, dec, pa)
for i in range(span):
# Current date (the last step may be partial to remain
# within the search interval
currentdate = mjdmin + (i + 1) * scanningStepSize
if (currentdate >= mjdmax):
currentdate = mjdmax
iflag = ephemeris.is_valid(currentdate, ra, dec, pa)
# Checking if we are reaching the beginning or the end of a visibility period
# (in which case the iflag value will change)
if iflag != iflag_old:
# Setting the iflip flag to True to keep track of the change (in order to
# detect CVZ object which are permanenetly visible)
# If iflag = True we are starting a visibility period and use a bisection method
# to find the exact transition date. This assumes that there is a single
# transition in the interval => it looks like a step size of 0.1 day is
# sufficient to ensure that.
if (iflag):
wstart = ephemeris.bisect_by_attitude(currentdate, currentdate-scanningStepSize, ra, dec, pa)
# IF iflag = False we are reaching the end of a visibility period.
# Like for the previous case a bisection method is used to locate
# accurately the end of the visibility period.
else:
wend = ephemeris.bisect_by_attitude(currentdate-scanningStepSize, currentdate, ra, dec, pa)
startList.append(wstart)
endList.append(wend)
if (iflip):
statusList.append(0)
else:
statusList.append(-1)
iflip = True
iflag_old = iflag
# If there was a transition and we end up with a valid date, we close the interval with the
# end of the search interval
if (iflag and iflip):
startList.append(wstart)
endList.append(currentdate)
statusList.append(-2)
# There is also the case were the visibility period covers the complete search interval
if (iflag and (not iflip)):
startList.append(mjdmin)
endList.append(mjdmax)
statusList.append(1)
# End of the function
return startList, endList, statusList
def f_computeDurationOfVisibilityPeriodWithPA(ephemeris, mjdmin, mjdmax, ra, dec, pa, mjdc):
'''
# -----------------------------------------------------------
# METHOD f_computeDurationOfVisibilityPeriodWithPA()
# TYPE function
#
# DESCRIPTION function that will compute the duration of
# a specific visibility period associated to
# a given (RA,DEC), a given PA and given
# date.
#
# SYNTAX f_computeDurationOfVisibilityPeriodWithPA(ephemeris,
# mjdmin, mjdmax, ra, dec, pa, mjdc)
#
# ephemeris: input ephemeris object
# mjdmin: beginning of the search interval (modified
# Julian date). It must be covered by the ephemeris.
# mjdmax: end of the search interval (modified
# Julian date). It must be covered by the ephemeris.
# ra: input RA coordinate (equatorial coordinate, in rad)
# dec: input DEC coordinate (equatorial coordinate, in rad)
# pa: input PA (in rad)
# mjdc: date within the visibility period (i.e. compatible
# with (RA,DEC) and PA.
#
# Returns start,end,status
# Status flag:
# flag = 0 visibility period fully in the search interval
# flag = -1 start of the visibility period truncated by
# the start of the search interval
# flag = -2 end of the visibility period truncated by
# the end of the search interval
# flag = +1 the search interval is fully included in
# the visibility period
#
# -----------------------------------------------------------
'''
# ===========================================================
# Paranoid checks
# ===========================================================
# print "# RA = {:12.8f} rad = {:12.8f} deg".format(ra, ra / D2R)
# print "# DEC = {:12.8f} rad = {:12.8f} deg".format(dec, dec / D2R)
# print"# No constraint on the PA."
if (ephemeris.amin > mjdmin):
print("f_computeDurationOfVisibilityPeriodWithPA(): the start of the search interval is not covered by the ephemeris.")
print("Ephemeris start date (modified Julian date): {:8.5f}".format(ephemeris.amin))
print("Search interval start date (modified Julian date): {:8.5f}".format(mjdmin))
raise ValueError
if (ephemeris.amax < mjdmax):
print("f_computeDurationOfVisibilityPeriodWithPA(): the end of the search interval is not covered by the ephemeris.")
print("Ephemeris end date (modified Julian date): {:8.5f}".format(ephemeris.amax))
print("Search interval end date (modified Julian date): {:8.5f}".format(mjdmax))
raise ValueError
if (mjdmin > mjdc):
print("f_computeDurationOfVisibilityPeriodWithPA(): initial date is not included in the search interval.")
print("Search interval start date (modified Julian date): {:8.5f}".format(mjdmin))
print("Initial date (modified Julian date): {:8.5f}".format(mjdc))
raise ValueError
if (mjdmax < mjdc):
print("f_computeDurationOfVisibilityPeriodWithPA(): initial date is not included in the search interval.")
print("Search interval end date (modified Julian date): {:8.5f}".format(mjdmax))
print("Initial date (modified Julian date): {:8.5f}".format(mjdc))
raise ValueError
iflag = ephemeris.is_valid(mjdc, ra, dec, pa)
if (not iflag):
print("f_computeDurationOfVisibilityPeriodWithPA(): invalid date (not in a vsibility period).")
print("Date (modified Julian date): {:8.5f}".format(mjdc))
raise ValueError
# ===========================================================
# Lookign for the start of the visibility period
# ===========================================================
scanningStepSize = 0.1
iflipLeft = False
currentmjd = mjdc
continueFlag = True
boundaryFlag = False
while (continueFlag):
currentmjd -= scanningStepSize
if (currentmjd < mjdmin):
currentmjd = mjdmin
boundaryFlag = True
continueFlag = False
iflag = ephemeris.is_valid(currentmjd, ra, dec, pa)
if (not iflag):
wstart = ephemeris.bisect_by_attitude(currentmjd, currentmjd+scanningStepSize, ra, dec, pa)
iflipLeft = True
continueFlag = False
elif (boundaryFlag):
wstart = mjdmin
iflipRight = False
currentmjd = mjdc
boundaryFlag = False
continueFlag = True
while (continueFlag):
currentmjd += scanningStepSize
if (currentmjd > mjdmax):
currentmjd = mjdmax
boundaryFlag = True
continueFlag = False
iflag = ephemeris.is_valid(currentmjd, ra, dec, pa)
if (not iflag):
wend = ephemeris.bisect_by_attitude(currentmjd-scanningStepSize, currentmjd, ra, dec, pa)
iflipRight = True
continueFlag = False
elif (boundaryFlag):
wend = mjdmax
if ((not iflipLeft) and (not iflipRight)):
status = 1
elif (not iflipLeft):
status = -1
elif (not iflipRight):
status = -2
else:
status = 0
# End of the function
return wstart, wend, status
|
[
"rafia0037@gmail.com"
] |
rafia0037@gmail.com
|
1b13c4cc42e2710f2aa2c196f9030cf2e7b9ae1d
|
7a2573f5539fe6de33fe4e355fde5df366cedb1f
|
/pl3.py
|
11eb70e73311b185b4cecea5fcc5e87fa406b57f
|
[] |
no_license
|
anujasubbarao/anuja
|
508c2d5e777d854cdc1387560590f7c345f25134
|
c840cdcf99986f3e2d842ee24045178e72da6e9c
|
refs/heads/master
| 2021-07-06T01:52:17.420722
| 2019-03-06T14:26:53
| 2019-03-06T14:26:53
| 142,555,136
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 101
|
py
|
n=int(raw_input())
rev=0
while n>0:
rem=n%10
rev=(rev*10)+rem
n=n//10
print rev
|
[
"noreply@github.com"
] |
noreply@github.com
|
5ac18cbdef9d19e5b20538cf4607c5551ca81f13
|
750269b63dedbf2d3b402a15346681d9abcb991b
|
/crops/views.py
|
5d9d0cc6dc69a8be2bd48bd24dc0e293c9b7efe2
|
[] |
no_license
|
SRaiz/Krishi-Karma
|
36f67a45d4f9306ed10a3ced633a808b6ccd7a5b
|
7eb7348575cf9152b006eb0328dc8138fddd2d3b
|
refs/heads/master
| 2022-12-12T15:57:08.809116
| 2020-07-17T19:20:31
| 2020-07-17T19:20:31
| 243,632,577
| 0
| 1
| null | 2022-12-08T07:22:40
| 2020-02-27T22:48:33
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,587
|
py
|
import pandas as pd
from django.http import HttpResponse
from django.shortcuts import render
from apps.ml.cropsyield_classifier import random_forest
from .models import Crop, Yield
crops = Crop.objects.all()
yields = Yield.objects.all()
yield_df = pd.DataFrame.from_records(yields.values())
crops_df = pd.DataFrame.from_records(crops.values())
def index(request):
data_sd = values_for_homepage(yield_df)
return render(request, 'index.html', {
'crops': crops,
'states': data_sd
})
'''
This method is returning the states and districts to be shown on the homepage.
'''
def values_for_homepage(yield_df):
return yield_df['state_name'].unique()
def filter_districts(request):
if request.method == 'POST':
state = request.POST['state']
filtered_df = yield_df[yield_df.state_name == state]
uniq_dist = filtered_df['district_name'].unique()
districts_string = ','.join(map(str, uniq_dist))
return HttpResponse(districts_string)
def filter_crops(request):
if request.method == 'POST':
state = request.POST['state']
district = request.POST['district']
filtered_df = yield_df[ (yield_df.state_name == state) & (yield_df.district_name == district) ]
uniq_crops = filtered_df['crop'].unique()
crops_string = ','.join(map(str, uniq_crops))
# Get all crops and also send it for comparison and hiding
all_crops = crops_df['name'].unique()
all_crops_string = ','.join(map(str, all_crops))
string_to_send = all_crops_string + '====' + crops_string
return HttpResponse(string_to_send);
def predict_yield(request):
if request.method == 'POST':
state = request.POST.get('state', False);
district = request.POST.get('district', False);
year = request.POST.get('year', False);
season = request.POST.get('season', False);
landArea = request.POST.get('landArea', False);
crop = request.POST.get('crop', False);
# Filter the dataframe on basis of district state and year to get the rainfall data
filtered_df = yield_df[ (yield_df.state_name == state) & (yield_df.district_name == district) & (yield_df.crop_year == int(year)) ]
filtered_df_prod = yield_df[ (yield_df.state_name == state) & (yield_df.district_name == district) & (yield_df.crop_year == int(year)) & (yield_df.crop == crop) ]
minimum_rainfall = filtered_df['min_rainfall'].unique()[0]
maximum_rainfall = filtered_df['max_rainfall'].unique()[0]
average_rainfall = filtered_df['mean_rainfall'].unique()[0]
total_annual_rainfall = filtered_df['annual_rainfall'].unique()[0]
production = filtered_df_prod['production'].unique()[0].max()
crop_yield = (production / float(landArea)).round(3)
# Get the prediction and show it on screen
input_data = {
"state_name": state,
"district_name": district,
"crop_year": int(year),
"season": season,
"crop": crop,
"area": float(landArea),
"min_rainfall": minimum_rainfall,
"max_rainfall": maximum_rainfall,
"mean_rainfall": average_rainfall,
"annual_rainfall": total_annual_rainfall,
"production": production,
"yield": crop_yield
}
rf_alg = random_forest.RandomForestClassifier()
response = rf_alg.compute_prediction(input_data)
return HttpResponse(response.get('label'))
|
[
"sidharthraizada07@gmail.com"
] |
sidharthraizada07@gmail.com
|
250bdd434c3ca14198d3b1dc4817974af8a02c58
|
36f91525be7418d90f77687e31554c86561013be
|
/venv/bin/easy_install
|
27b20fdc7f13220c7148e3122347c50d4db02d9a
|
[] |
no_license
|
rizkimn/attendance-system
|
a2bfafa0470ca76e25bd64d2beee1abb77a510b7
|
16e90d397dee2036d8183f06d635daab48f55645
|
refs/heads/main
| 2023-04-22T19:38:40.630024
| 2021-05-04T08:41:16
| 2021-05-04T08:41:16
| 359,022,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
#!/home/rizkimnur/Documents/python/attendance-system/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"rizkimnur0@gmail.com"
] |
rizkimnur0@gmail.com
|
|
66ff77da4530f172e873831475c4198c3aa8c691
|
94e54c1e885808cab39fc6de3aca906b72d09d7b
|
/src/day5/d5part1.py
|
56170599cfebed4876b3137ea39f5bbeeed1737d
|
[] |
no_license
|
jondarrer/advent-of-code-2020
|
d7a54f9d6fb1869796cc972ec0ffd47bfa743e83
|
c942f950231d5a5585acf1357eb578776d7bf2e9
|
refs/heads/main
| 2023-01-31T15:45:34.052160
| 2020-12-06T11:00:58
| 2020-12-06T11:00:58
| 317,469,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,466
|
py
|
import read_input_file
def convert_boarding_pass_to_seat(boarding_pass):
'''Converts a boarding pass, e.g. FBFBBFFRLR, to seat, e.g. row 44 column 5'''
row = convert_binary_to_decimal(boarding_pass[0:7], 'B')
column = convert_binary_to_decimal(boarding_pass[7:10], 'R')
return {'row': row, 'column': column}
def convert_seat_to_seat_id(seat):
'''The seat as a seat id, e.g. row 44 column 5 is seat id 357 (row + (column * 8))'''
return seat['row'] * 8 + seat['column']
def convert_binary_to_decimal(binary, one_char):
'''Converts a binary number to decimal, with a specified character for 1, so FBF with B as 1 gives 3'''
decimal = 0
# https://www.w3schools.com/python/python_howto_reverse_string.asp
for index, bit in enumerate(binary[::-1]):
if (bit == one_char):
decimal += (2 ** index + 1) - 1
return decimal
def highest_seat_id_from_boarding_passes(boarding_passes):
'''The highest seat id from a list of boarding passes'''
boarding_pass_with_highest_seat_id = max(boarding_passes, key=lambda boarding_pass: convert_seat_to_seat_id(
convert_boarding_pass_to_seat(boarding_pass)))
return convert_seat_to_seat_id(convert_boarding_pass_to_seat(boarding_pass_with_highest_seat_id))
if __name__ == '__main__':
lines = read_input_file.read(
'/Users/jondarrer/Code/advent-of-code-2020/src/input/day5.txt')
print(highest_seat_id_from_boarding_passes(lines))
|
[
"jonny@jondarrer.me.uk"
] |
jonny@jondarrer.me.uk
|
035504981c5c4ce873430a3439ea302e21244885
|
53c3462ff265b6273f4a4fa17f6d59688f69def0
|
/剑指offer/65_hasPath.py
|
fc4f3bf5633e263578be82adeacb409feea73759
|
[] |
no_license
|
17764591637/jianzhi_offer
|
b76e69a3ecb2174676da2c8d8d3372a3fc27b5c4
|
27e420ee302d5ab6512ecfdb8d469b043fb7102d
|
refs/heads/master
| 2023-08-03T01:32:51.588472
| 2019-10-13T07:56:21
| 2019-10-13T07:56:21
| 197,692,548
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,300
|
py
|
'''
请设计一个函数,用来判断在一个矩阵中是否存在一条包含某字符串所有字符的路径。
路径可以从矩阵中的任意一个格子开始,每一步可以在矩阵中向左,向右,向上,向下移动一个格子。
如果一条路径经过了矩阵中的某一个格子,则之后不能再次进入这个格子。
例如 a b c e s f c s a d e e 这样的3 X 4 矩阵中包含一条字符串"bcced"的路径,但是矩阵中不包含"abcb"路径,
因为字符串的第一个字符b占据了矩阵中的第一行第二个格子之后,路径不能再次进入该格子。
分析:回溯算法
这是一个可以用回朔法解决的典型题。首先,在矩阵中任选一个格子作为路径的起点。如果路径上的第i个字符不是ch,
那么这个格子不可能处在路径上的第i个位置。如果路径上的第i个字符正好是ch,那么往相邻的格子寻找路径上的第i+1个字符。
除在矩阵边界上的格子之外,其他格子都有4个相邻的格子。重复这个过程直到路径上的所有字符都在矩阵中找到相应的位置。
由于回朔法的递归特性,路径可以被开成一个栈。当在矩阵中定位了路径中前n个字符的位置之后,在与第n个字符对应的格子
的周围都没有找到第n+1个字符,这个时候只要在路径上回到第n-1个字符,重新定位第n个字符。
由于路径不能重复进入矩阵的格子,还需要定义和字符矩阵大小一样的布尔值矩阵,用来标识路径是否已经进入每个格子。
当矩阵中坐标为(row,col)的格子和路径字符串中相应的字符一样时,从4个相邻的格子(row,col-1),(row-1,col),
(row,col+1)以及(row+1,col)中去定位路径字符串中下一个字符如果4个相邻的格子都没有匹配字符串中下一个的字符,
表明当前路径字符串中字符在矩阵中的定位不正确,我们需要回到前一个,然后重新定位。一直重复这个过程,
直到路径字符串上所有字符都在矩阵中找到合适的位置。
'''
class Solution:
def hasPath(self, matrix, rows, cols, path):
# write code here
for i in range(rows):
for j in range(cols):
if matrix[i*cols+j] == path[0]:
#print(i,j)
if self.find(list(matrix),rows,cols,path[1:],i,j):
return True
return False
def find(self,matrix,rows,cols,path,i,j):
if not path:
return True
matrix[i*cols+j]='0'#记录是否已经走过,0表示已经走过
if j+1<cols and matrix[i*cols+j+1]==path[0]:
return self.find(matrix,rows,cols,path[1:],i,j+1)#往右
elif j-1>=0 and matrix[i*cols+j-1]==path[0]:
return self.find(matrix,rows,cols,path[1:],i,j-1)#往左
elif i+1<rows and matrix[(i+1)*cols+j]==path[0]:
return self.find(matrix,rows,cols,path[1:],i+1,j)#往下
elif i-1>=0 and matrix[(i-1)*cols+j]==path[0]:
return self.find(matrix,rows,cols,path[1:],i-1,j)#往上
else:
return False
s = Solution()
res = s.hasPath(['a','b','c', 'e', 's' ,'f' ,'c', 's', 'a', 'd', 'e', 'e'],3,4,'bcced')
print(res)
|
[
"17764591637@163.com"
] |
17764591637@163.com
|
c672da054ce83d887363f44a14b4d22240729758
|
131d941a4d1df110afe09733fe010da816597bb4
|
/PASA/alameda/input/motion-detection-camera/mmal/motion-detected.py
|
5cf1a72a7454179e2769cc7877f1faf7f8313954
|
[] |
no_license
|
onebeartoe/electronic-signs
|
386cfb7288c211634a3afad55d334d4602178a95
|
621099b7d81995a9c7e977ca683bb3c3d02c9d17
|
refs/heads/master
| 2020-12-24T11:06:57.754906
| 2018-11-01T16:40:35
| 2018-11-01T16:40:35
| 13,177,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 120
|
py
|
#import sendint;
print "nothing happened in motion-detected.py"
#print "motion detected"
#sendint.sendInt(ser, '2')
|
[
"onebeartoe@gmail.com"
] |
onebeartoe@gmail.com
|
f64c6ffb584cd043d80268c613a23fadf9f3d960
|
d0e268862f359bbeec426b00a0c45788f6fb0b4e
|
/lesson22-优化小实例/main.py
|
10fefdf9bbed196814d0bc20e94497752dbfa13d
|
[] |
no_license
|
jpegbert/PyTorch
|
f87c2e38572c51842785de5ed1b39bb641402ac6
|
482421c76a093312ffeff7e5af4ecd3ab0cdcf30
|
refs/heads/master
| 2023-08-27T03:56:26.883297
| 2021-11-08T06:03:30
| 2021-11-08T06:03:30
| 326,677,679
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 853
|
py
|
import numpy as np
from matplotlib import pyplot as plt
import torch
def himmelblau(x):
return (x[0] ** 2 + x[1] - 11) ** 2 + (x[0] + x[1] ** 2 - 7) ** 2
x = np.arange(-6, 6, 0.1)
y = np.arange(-6, 6, 0.1)
print('x,y range:', x.shape, y.shape)
X, Y = np.meshgrid(x, y)
print('X,Y maps:', X.shape, Y.shape)
Z = himmelblau([X, Y])
fig = plt.figure('himmelblau')
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, Z)
ax.view_init(60, -30)
ax.set_xlabel('x')
ax.set_ylabel('y')
plt.show()
# [1., 0.], [-4, 0.], [4, 0.]
x = torch.tensor([-4., 0.], requires_grad=True)
print(x)
optimizer = torch.optim.Adam([x], lr=1e-3)
for step in range(20000):
pred = himmelblau(x)
optimizer.zero_grad()
pred.backward()
optimizer.step()
if step % 2000 == 0:
print('step {}: x = {}, f(x) = {}'.format(step, x.tolist(), pred.item()))
|
[
"jiangpeng.jiang@zhaopin.com.cn"
] |
jiangpeng.jiang@zhaopin.com.cn
|
4357f72ea71e855c96174c243f3d51a84d1b6c8e
|
57a8986e7622e471f392ffefa1e1d959c6a1bb7c
|
/mazeSolver.py
|
1e522304e60f9ee925eee57e47874b7821dff42c
|
[] |
no_license
|
pandyakaa/MazeProblem
|
394b5f99eef5ee61e0f4f206c29cc393ced2fa64
|
8ca7d6b5dd96697632326322ff1e3272d9d6fbea
|
refs/heads/master
| 2020-05-01T16:38:48.006105
| 2019-03-29T06:45:58
| 2019-03-29T06:45:58
| 177,577,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,601
|
py
|
import sys
from collections import deque
from point import Point
from priorityQueue import PriorityQueue
# Fungsi inputMaze, dengan parameter sebuah file
# digunakan untuk memasukkan matriks sebagai representasi dari maze
# dari file eksternal dengan nama filename
# Sekaligus mencari titik awal masuk dan keluar, disimpan dalam startb, startk, finishb dan finishk
def inputMaze(filename) :
arr = []
f = open("{}.txt".format(filename),"r")
for line in f :
arr.append([int(c) for c in line.strip()])
baris = len(arr)
kolom = len(arr[0])
f.close()
startb = -1
startk = -1
finishb = -1
finishk = -1
# Melakukan pencarian titik mulai dan akhir (case : kiri dan kanan)
for i in range(baris) :
if (arr[i][0] == 0) :
startb = i
startk = 0
if (arr[i][kolom-1] == 0) :
finishb = i
finishk = kolom-1
# Melakukan pencarian titik mulai dan akhir (case : atas dan bawah)
for i in range(kolom) :
if (arr[0][i] == 0) :
startb = 0
startk = i
if (arr[baris-1][i] == 0) :
finishb = baris-1
finishk = i
# Melakukan validasi apakah matriks bisa dimainkan atau tidak
if ( startb != -1 and startk != -1 and finishb != -1 and finishk != -1 ) :
valid = True
else :
valid = False
return arr,startb,startk,finishb,finishk,valid
# Fungsi printSolution, dengan parameter sebuah matriks m
# digunakan untuk melakukan output sebuah maze yang sudah ada solved
def printSolution(m) :
for i in m :
for j in i :
if (j == 1 ) :
print("# ",end = '')
elif (j == 3 or j == 2) :
print(" ",end = '')
elif ( j == 4 ) :
print("X ", end = '')
else :
print(" ", end = '')
print()
# Fungsi copy, dengan parameter sebuah matriks m1
# digunakan untuk melakukan DEEP COPY pada sebuah matriks
# sehingga tidak perlu membaca dari file eksternal lagi
def copy(m1) :
m2 = []
for i in range(len(m1)) :
temp = []
for j in range(len(m1[0])) :
temp.append(m1[i][j])
m2.append(temp)
return m2
# Fungsi isFeasible, dengan parameter sebuah matriks m, int x dan int y
# digunakan untuk melakukan validasi, apakah koordinat (x,y) valid atau tidak
# DEFINISI VALID : Lebih atau sama dengan 0 , dan kurang dari panjang atau kolom matriks
def isFeasible(m,x,y) :
if ( m[x][y]==0 and x >= 0 and x < len(m) and y >= 0 and y < len(m[0]) ) :
return True
return False
# Fungsi BFS, dengan parameter maze maze, int x, int y, dan point fp
# merupakan salah satu dari dua fungsi utama dalam program ini
# Memanfaatkan sebuah type data DEQUE, dan melakukan proses Breadth-First Searching
# Jika memiliki solusi, akan me-return sebuah point p
def BFS(maze,x,y,fp) :
de = deque()
de.append(Point(x,y,None))
while ( not(len(de) == 0) ) :
p = de.popleft()
maze[p.x][p.y] = 3
if (p.isEqual(fp)) :
return p
if(isFeasible(maze,p.x-1,p.y)) :
nextP = Point(p.x-1,p.y,p)
de.append(nextP)
if (isFeasible(maze,p.x+1,p.y)) :
nextP = Point(p.x+1,p.y,p)
de.append(nextP)
if(isFeasible(maze,p.x,p.y+1)) :
nextP = Point(p.x,p.y+1,p)
de.append(nextP)
if(isFeasible(maze,p.x,p.y-1)) :
nextP = Point(p.x,p.y-1,p)
de.append(nextP)
# Fungsi manhattanDist, dengan parameter point point_start dan point point_finish
# digunakan untuk mencari nilai h(n) pada algoritma A*
# Menggunakan manhattan distance karena hanya bisa bergerak ke empat arah
def manhattanDist(point_start,point_finish) :
return (abs(point_start.x - point_finish.x) + abs(point_start.y - point_finish.y))
# Fungsi AStar, dengan parameter maze maze, int x, int y, dan point fpoint
# merupakan salah satu dari dua fungsi utama dalam program ini
# Memanfaatkan type data Priority Queue, yang telah dibuat kelas sendiri sebelumnya
# Akan melakukan pencarian dengan algoritma AStar dengan :
# f(n) = g(n) + h(n)
# dengan g(n) adalah jarak sebenarnya sebuah titik ke titik akhir
# dan h(n) adalah jarak heuristik dari sebuah titik ke titik akhir dengan memanfaatkan manhattanDist
def AStar(maze,x,y,fpoint) :
startPoint = Point(x,y,None)
startPoint.f = startPoint.g = startPoint.h = 0
openList = PriorityQueue()
openList.insert(startPoint)
while ( not(openList.isEmpty()) ) :
current_node = openList.delete()
maze[current_node.x][current_node.y] = 3
if (current_node.isEqual(fpoint) ) :
return current_node
children = []
for pos in [(0, -1), (0, 1), (-1, 0), (1, 0)]:
curr_x = current_node.x + pos[0]
curr_y = current_node.y + pos[1]
if (not(isFeasible(maze,curr_x,curr_y))) :
continue
child = Point(curr_x,curr_y,current_node)
children.append(child)
for child in children :
child.g = current_node.g + 1
child.h = manhattanDist(child,fpoint)
child.f = child.g + child.h
openList.insert(child)
# Fungsi main, akan dipanggil saat program ini dijalankan
if __name__ == "__main__":
# Melakukan input nama file dari pengguna, dan memanggil fungsi inputMaze untuk
# memasukkannya ke dalam maze
file = input("Masukkan nama file : ")
maze, start_baris , start_kolom, finish_baris , finish_kolom , valid = inputMaze(file)
maze2 = copy(maze)
# Util yang diperlukan oleh fungsi-fungsi searching
fp = Point(finish_baris,finish_kolom,None)
if ( valid ) :
# Melakukan pemanggilan fungsi-fungsi Searching
p = BFS(maze,start_baris,start_kolom,fp)
q = AStar(maze2,start_baris,start_kolom,fp)
# Melakukan output dari algoritma DFS
maze[start_baris][start_kolom] = 4
while (p.getParent() != None ) :
maze[p.x][p.y] = 4
p = p.getParent()
print("\n \t \t Solution with BFS : \n")
printSolution(maze)
# Melakukan output dari algoritma A*
maze2[start_baris][start_kolom] = 4
while (q.getParent() != None ) :
maze2[q.x][q.y] = 4
q = q.getParent()
print("\n \t \t Solution with A-Star : \n")
printSolution(maze2)
else :
print("NOT FOUND")
|
[
"pandyaka.aptanagi@gmail.com"
] |
pandyaka.aptanagi@gmail.com
|
853ea408b40f36263a1ac3f6bb6b89829d2bbd39
|
a639206e5849432d6613c9af093b14f97e9dd794
|
/manage.py
|
3cda76ad753a5eb70246fda28e6126466f0d11c0
|
[
"MIT"
] |
permissive
|
Bazulenkov/primorsk-cup
|
6a37139b56b3b3df605f9fa49b1ef652a08d8020
|
13c227f9fb8289eb3e25d21e1de7a0fde8cb5f87
|
refs/heads/main
| 2023-06-10T00:21:27.939260
| 2021-06-30T12:36:04
| 2021-06-30T12:36:04
| 336,052,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 664
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "primorsk.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
|
[
"anton@bazul.ru"
] |
anton@bazul.ru
|
e06e1100601a1bacb795bb1f1efe4a2e96a3d781
|
221d5405763d1a6ab3c6755583e557c14b9f3742
|
/gusregon/gus.py
|
ca34d58334fd1bd2383ff69dfbb77899f224cc07
|
[
"BSD-2-Clause"
] |
permissive
|
tpro/django-gusregon
|
4bd7253be9d43345376e36312d763d4653d0bbcd
|
75d4f291ae805bd986e1b4cb03b3b94e52a48076
|
refs/heads/master
| 2021-01-18T18:11:50.746453
| 2015-04-19T11:00:49
| 2015-04-19T11:00:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,656
|
py
|
import requests
import json
GUS_API_URL = 'https://wyszukiwarkaregon.stat.gov.pl/wsBIR/UslugaBIRzewnPubl.svc/ajaxEndpoint/'
LOGIN_ENDPOINT = 'Zaloguj'
CAPTCHA_ENDPOINT = 'PobierzCaptcha'
CHECK_CAPTCHA_ENDPOINT = 'SprawdzCaptcha'
SEARCH_ENDPOINT = 'daneSzukaj'
COMPANY_DETAILS_ENDPOINT = 'DanePobierzPelnyRaport'
class GUS(object):
sid = None
report_type = {
'F': 'DaneRaportFizycznaPubl',
'P': 'DaneRaportPrawnaPubl'}
prefix_data = {
'F': 'fiz_',
'P': 'praw_'}
def __init__(self, sid=None):
self.sid = sid
def login(self):
data = {'pKluczUzytkownika': 'aaaaaabbbbbcccccdddd'}
self.sid = self._post(LOGIN_ENDPOINT, data=json.dumps(data))
return self.sid
def get_captcha(self):
return self._post(CAPTCHA_ENDPOINT)
def check_captcha(self, captcha):
data = {'pCaptcha': captcha}
return self._post(
CHECK_CAPTCHA_ENDPOINT, data=json.dumps(data))
def search(self, nip=None, regon=None, krs=None,
detailed=True, no_prefix=True):
if not any([nip, regon, krs]):
raise AttributeError(
'At least one parameter (nip, regon, krs) is required.')
if nip:
search_params = {'Nip': nip}
elif regon:
search_params = {'Regon': regon}
else:
search_params = {'Krs': krs}
data = {'pParametryWyszukiwania': search_params}
basic_info = self._post(
SEARCH_ENDPOINT, data=json.dumps(data))
if not detailed or not basic_info:
return basic_info
basic_info = json.loads(basic_info)[0]
data = {
'pNazwaRaportu': self.report_type.get(basic_info['Typ']),
'pRegon': basic_info['Regon'],
'pSilosID': 1,
}
details = json.loads(self._post(
COMPANY_DETAILS_ENDPOINT, data=json.dumps(data)))[0]
if no_prefix:
return self._remove_prefix(details)
return details
def _post(self, url, **kwargs):
headers = {'Content-Type': 'application/json'}
if self.sid:
headers.update({'sid': self.sid})
url = '%s%s' % (GUS_API_URL, url)
response = requests.post(url, headers=headers, **kwargs)
return json.loads(response.content)['d']
def _remove_prefix(self, data):
data_without_prefix = {}
for key, value in data.iteritems():
if key.startswith(tuple(self.prefix_data.values())):
key = key[key.find('_') + 1:]
data_without_prefix[key] = value
return data_without_prefix
|
[
"adam@bogdal.pl"
] |
adam@bogdal.pl
|
0843c71fc383feb3fad8512aa89bc2e6a0434ce9
|
760b47b46574552c5371270036b391a6a3b7dc2f
|
/src/loggedfs/_core/fs.py
|
ced81b475209403f3dcd7fbc2e7e3a9c1a7796b4
|
[
"Apache-2.0"
] |
permissive
|
pleiszenburg/loggedfs-python
|
7ae2ba90780f700b3ec5153b8b7447452bf643c7
|
1c0740f6f38e7795d6f834d08d9bab59d911b454
|
refs/heads/master
| 2023-04-06T05:00:50.457663
| 2020-07-11T08:33:41
| 2020-07-11T08:33:41
| 113,670,055
| 31
| 3
|
Apache-2.0
| 2023-03-31T02:14:20
| 2017-12-09T13:07:44
|
Python
|
UTF-8
|
Python
| false
| false
| 14,726
|
py
|
# -*- coding: utf-8 -*-
"""
LoggedFS-python
Filesystem monitoring with Fuse and Python
https://github.com/pleiszenburg/loggedfs-python
src/loggedfs/_core/fs.py: File system core
Copyright (C) 2017-2020 Sebastian M. Ernst <ernst@pleiszenburg.de>
<LICENSE_BLOCK>
The contents of this file are subject to the Apache License
Version 2 ("License"). You may not use this file except in
compliance with the License. You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
https://github.com/pleiszenburg/loggedfs-python/blob/master/LICENSE
Software distributed under the License is distributed on an "AS IS" basis,
WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the
specific language governing rights and limitations under the License.
</LICENSE_BLOCK>
"""
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# IMPORT
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import errno
import os
import stat
from refuse.high import (
FUSE,
fuse_get_context,
FuseOSError,
Operations
)
from .defaults import (
FUSE_ALLOWOTHER_DEFAULT,
FUSE_FOREGROUND_DEFAULT,
LIB_MODE_DEFAULT,
LOG_BUFFERS_DEFAULT,
LOG_ENABLED_DEFAULT,
LOG_JSON_DEFAULT,
LOG_ONLYMODIFYOPERATIONS_DEFAULT,
LOG_PRINTPROCESSNAME_DEFAULT,
LOG_SYSLOG_DEFAULT
)
from .filter import filter_pipeline_class
from .log import get_logger, log_msg
from .out import event
from .timing import time
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ROUTINES
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def loggedfs_factory(directory, **kwargs):
if not isinstance(directory, str):
raise TypeError('directory must be of type string')
if not os.path.isdir(directory):
raise ValueError('directory must be a path to an existing directory')
if not isinstance(kwargs.get('fuse_foreground', FUSE_FOREGROUND_DEFAULT), bool):
raise TypeError('fuse_foreground must be of type bool')
if not isinstance(kwargs.get('fuse_allowother', FUSE_ALLOWOTHER_DEFAULT), bool):
raise TypeError('fuse_allowother must be of type bool')
return FUSE(
_loggedfs(
directory,
**kwargs
),
directory,
raw_fi = True,
nothreads = True,
foreground = kwargs.get('fuse_foreground', FUSE_FOREGROUND_DEFAULT),
allow_other = kwargs.get('fuse_allowother', FUSE_ALLOWOTHER_DEFAULT),
default_permissions = kwargs.get('fuse_allowother', FUSE_ALLOWOTHER_DEFAULT),
attr_timeout = 0,
entry_timeout = 0,
negative_timeout = 0,
sync_read = False, # relying on fuse.Operations class defaults?
# max_readahead = 0, # relying on fuse.Operations class defaults?
# direct_io = True, # relying on fuse.Operations class defaults?
nonempty = True, # common options taken from LoggedFS
use_ino = True # common options taken from LoggedFS
)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# CORE CLASS: Init and internal routines
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
class _loggedfs(Operations):
flag_utime_omit_ok = 1
use_ns = True
_ST_FIELDS = tuple(i for i in dir(os.stat_result) if i.startswith('st_'))
_STVFS_FIELDS = tuple(i for i in dir(os.statvfs_result) if i.startswith('f_'))
def __init__(self,
directory,
fuse_foreground = FUSE_FOREGROUND_DEFAULT,
fuse_allowother = FUSE_ALLOWOTHER_DEFAULT,
lib_mode = LIB_MODE_DEFAULT,
log_buffers = LOG_BUFFERS_DEFAULT,
log_enabled = LOG_ENABLED_DEFAULT,
log_file = None,
log_filter = None,
log_json = LOG_JSON_DEFAULT,
log_only_modify_operations = LOG_ONLYMODIFYOPERATIONS_DEFAULT,
log_printprocessname = LOG_PRINTPROCESSNAME_DEFAULT,
log_syslog = LOG_SYSLOG_DEFAULT,
**kwargs
):
if log_filter is None:
log_filter = filter_pipeline_class()
if not isinstance(directory, str):
raise TypeError('directory must be of type string')
if not os.path.isdir(directory):
raise ValueError('directory must be a path to an existing directory')
if not os.access(directory, os.W_OK | os.R_OK):
raise ValueError('not sufficient permissions on "directory"')
if not isinstance(log_filter, filter_pipeline_class):
raise TypeError('log_filter must either be None or of type filter_pipeline_class')
if log_file is not None:
if not os.path.isdir(os.path.dirname(log_file)):
raise ValueError('path to logfile directory does not exist')
if os.path.exists(log_file) and not os.path.isfile(log_file):
raise ValueError('logfile exists and is not a file')
if os.path.isfile(log_file) and not os.access(log_file, os.W_OK):
raise ValueError('logfile exists and is not writeable')
if not os.path.exists(log_file) and not os.access(directory, os.W_OK):
raise ValueError('path to logfile directory is not writeable')
if not isinstance(log_syslog, bool):
raise TypeError('log_syslog must be of type bool')
if not isinstance(log_enabled, bool):
raise TypeError('log_enabled must be of type bool')
if not isinstance(log_printprocessname, bool):
raise TypeError('log_printprocessname must be of type bool')
if not isinstance(log_json, bool):
raise TypeError('log_json must be of type bool')
if not isinstance(log_buffers, bool):
raise TypeError('log_buffers must be of type bool')
if not isinstance(lib_mode, bool):
raise TypeError('lib_mode must be of type bool')
if not isinstance(log_only_modify_operations, bool):
raise TypeError('log_only_modify_operations must be of type bool')
if not isinstance(fuse_foreground, bool):
raise TypeError('fuse_foreground must be of type bool')
if not isinstance(fuse_allowother, bool):
raise TypeError('fuse_allowother must be of type bool')
self._root_path = directory
self._log_printprocessname = log_printprocessname
self._log_json = log_json
self._log_buffers = log_buffers
self._log_filter = log_filter
self._lib_mode = lib_mode
self._log_only_modify_operations = log_only_modify_operations
self._logger = get_logger('LoggedFS-python', log_enabled, log_file, log_syslog, self._log_json)
if fuse_foreground:
self._logger.info(log_msg(self._log_json, 'LoggedFS-python not running as a daemon'))
if fuse_allowother:
self._logger.info(log_msg(self._log_json, 'LoggedFS-python running as a public filesystem'))
if log_file is not None:
self._logger.info(log_msg(self._log_json, 'LoggedFS-python log file: %s' % log_file))
self._logger.info(log_msg(self._log_json, 'LoggedFS-python starting at %s' % directory))
try:
self._root_path_fd = os.open(directory, os.O_RDONLY)
except Exception as e:
self._logger.exception('Directory access failed.')
raise e
log_configfile = kwargs.pop('_log_configfile', None)
if log_configfile is not None:
self._logger.info(log_msg(self._log_json,
'LoggedFS-python using configuration file %s' % log_configfile
))
if len(kwargs) > 0:
raise ValueError('unknown keyword argument(s)')
def _full_path(self, partial_path):
if partial_path.startswith('/'):
partial_path = partial_path[1:]
path = os.path.join(self._root_path, partial_path)
return path
@staticmethod
def _rel_path(partial_path):
if len(partial_path) == 0:
return '.'
elif partial_path == '/':
return '.'
elif partial_path.startswith('/'):
return partial_path[1:]
elif partial_path.startswith('./'):
return partial_path[2:]
else:
return partial_path
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# CORE CLASS: Filesystem & file methods - STUBS
# ... addressing https://github.com/fusepy/fusepy/issues/81
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def create(self, path, mode, fi = None):
raise FuseOSError(errno.ENOSYS)
def flush(self, path, fip):
raise FuseOSError(errno.ENOSYS)
def fsync(self, path, datasync, fip):
raise FuseOSError(errno.ENOSYS) # the original loggedfs just returns 0
def ioctl(self, path, cmd, arg, fh, flags, data):
raise FuseOSError(errno.ENOSYS)
def lock(self, path, fh, cmd, lock):
raise FuseOSError(errno.ENOSYS)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# CORE CLASS: Filesystem & file methods - IMPLEMENTATION
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@event(format_pattern = '{param_path}')
def access(self, path, mode):
if not os.access(self._rel_path(path), mode, dir_fd = self._root_path_fd):
raise FuseOSError(errno.EACCES)
@event(format_pattern = '{param_path} to {param_mode}')
def chmod(self, path, mode):
os.chmod(self._rel_path(path), mode, dir_fd = self._root_path_fd)
@event(format_pattern = '{param_path} to {param_uid_name}({param_uid}):{param_gid_name}({param_gid})')
def chown(self, path, uid, gid):
os.chown(self._rel_path(path), uid, gid, dir_fd = self._root_path_fd, follow_symlinks = False)
@event(format_pattern = '{param_path}')
def destroy(self, path):
os.close(self._root_path_fd)
@event(format_pattern = '{param_path} (fh={param_fip})')
def getattr(self, path, fip):
if not fip:
try:
st = os.lstat(self._rel_path(path), dir_fd = self._root_path_fd)
except FileNotFoundError:
raise FuseOSError(errno.ENOENT)
else:
st = os.fstat(fip.fh)
ret_dict = {key: getattr(st, key) for key in self._ST_FIELDS}
for key in ['st_atime', 'st_ctime', 'st_mtime']:
ret_dict[key] = ret_dict.pop(key + '_ns')
return ret_dict
@event(format_pattern = '{param_path}')
def init(self, path):
pass
@event(format_pattern = '{param_source_path} to {param_target_path}')
def link(self, target_path, source_path):
target_rel_path = self._rel_path(target_path)
os.link(
self._rel_path(source_path), target_rel_path,
src_dir_fd = self._root_path_fd, dst_dir_fd = self._root_path_fd
)
uid, gid, pid = fuse_get_context()
os.chown(target_rel_path, uid, gid, dir_fd = self._root_path_fd, follow_symlinks = False)
@event(format_pattern = '{param_path} {param_mode}')
def mkdir(self, path, mode):
rel_path = self._rel_path(path)
os.mkdir(rel_path, mode, dir_fd = self._root_path_fd)
uid, gid, pid = fuse_get_context()
os.chown(rel_path, uid, gid, dir_fd = self._root_path_fd, follow_symlinks = False)
os.chmod(rel_path, mode, dir_fd = self._root_path_fd) # follow_symlinks = False
@event(format_pattern = '{param_path} {param_mode}')
def mknod(self, path, mode, dev):
rel_path = self._rel_path(path)
if stat.S_ISREG(mode):
res = os.open(
rel_path, os.O_CREAT | os.O_EXCL | os.O_WRONLY, mode,
dir_fd = self._root_path_fd
) # TODO broken, applies umask to mode no matter what ...
if res >= 0:
os.close(res)
elif stat.S_ISFIFO(mode):
os.mkfifo(rel_path, mode, dir_fd = self._root_path_fd)
else:
os.mknod(rel_path, mode, dev, dir_fd = self._root_path_fd)
uid, gid, pid = fuse_get_context()
os.chown(rel_path, uid, gid, dir_fd = self._root_path_fd, follow_symlinks = False)
os.chmod(rel_path, mode, dir_fd = self._root_path_fd) # follow_symlinks = False
@event(format_pattern = '({param_fip}) {param_path} (fh={param_fip})')
def open(self, path, fip):
fip.fh = os.open(self._rel_path(path), fip.flags, dir_fd = self._root_path_fd)
return 0
@event(format_pattern = '{param_length} bytes from {param_path} at offset {param_offset} (fh={param_fip})')
def read(self, path, length, offset, fip):
ret = os.pread(fip.fh, length, offset)
return ret
@event(format_pattern = '{param_path}')
def readdir(self, path, fh):
rel_path = self._rel_path(path)
dirents = ['.', '..']
if stat.S_ISDIR(os.lstat(rel_path, dir_fd = self._root_path_fd).st_mode):
dir_fd = os.open(rel_path, os.O_RDONLY, dir_fd = self._root_path_fd)
dirents.extend(os.listdir(dir_fd))
os.close(dir_fd)
return dirents
@event(format_pattern = '{param_path}')
def readlink(self, path):
pathname = os.readlink(self._rel_path(path), dir_fd = self._root_path_fd)
if pathname.startswith('/'): # TODO check this ... actually required?
return os.path.relpath(pathname, self._root_path)
else:
return pathname
@event(format_pattern = '{param_path} (fh={param_fip})')
def release(self, path, fip):
os.close(fip.fh)
@event(format_pattern = '{param_old_path} to {param_new_path}')
def rename(self, old_path, new_path):
os.rename(
self._rel_path(old_path), self._rel_path(new_path),
src_dir_fd = self._root_path_fd, dst_dir_fd = self._root_path_fd
)
@event(format_pattern = '{param_path}')
def rmdir(self, path):
os.rmdir(self._rel_path(path), dir_fd = self._root_path_fd)
@event(format_pattern = '{param_path}')
def statfs(self, path):
fd = os.open(self._rel_path(path), os.O_RDONLY, dir_fd = self._root_path_fd)
stv = os.statvfs(fd)
os.close(fd)
return {key: getattr(stv, key) for key in self._STVFS_FIELDS}
@event(format_pattern = 'from {param_source_path} to {param_target_path_}')
def symlink(self, target_path_, source_path):
target_rel_path = self._rel_path(target_path_)
os.symlink(source_path, target_rel_path, dir_fd = self._root_path_fd)
uid, gid, pid = fuse_get_context()
os.chown(target_rel_path, uid, gid, dir_fd = self._root_path_fd, follow_symlinks = False)
@event(format_pattern = '{param_path} to {param_length} bytes (fh={param_fip})')
def truncate(self, path, length, fip = None):
if fip is None:
fd = os.open(self._rel_path(path), flags = os.O_WRONLY, dir_fd = self._root_path_fd)
ret = os.ftruncate(fd, length)
os.close(fd)
return ret
else:
return os.ftruncate(fip.fh, length)
@event(format_pattern = '{param_path}')
def unlink(self, path):
os.unlink(self._rel_path(path), dir_fd = self._root_path_fd)
@event(format_pattern = '{param_path}')
def utimens(self, path, times = None):
def _fix_time_(atime, mtime):
if None in (atime, mtime):
st = os.lstat(relpath, dir_fd = self._root_path_fd)
if atime is None:
atime = st.st_atime_ns
if mtime is None:
mtime = st.st_mtime_ns
return (atime, mtime)
relpath = self._rel_path(path)
os.utime(relpath, ns = _fix_time_(*times), dir_fd = self._root_path_fd, follow_symlinks = False)
@event(format_pattern = '{param_buf_len} bytes to {param_path} at offset {param_offset} (fh={param_fip})')
def write(self, path, buf, offset, fip):
res = os.pwrite(fip.fh, buf, offset)
return res
|
[
"ernst@pleiszenburg.de"
] |
ernst@pleiszenburg.de
|
25724b55aa4d29b7d7541545d4b7a257d6a3af48
|
a2accf55b7b57a376344689e1f8fa4d64acdd6be
|
/salaray_to_csv.py
|
692da5634ca9d92a5b99a5746186df6cf7f8d942
|
[] |
no_license
|
Thybat/EulerProject
|
8e590455f2fbebceeec031e667ac77a97fd5d35c
|
54ee77fac34de4c13b33bf5459f6b6258d6cca1f
|
refs/heads/main
| 2022-12-27T23:19:25.058389
| 2020-10-09T09:57:54
| 2020-10-09T09:57:54
| 302,599,892
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 495
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
df = pd.read_csv("data_reg_age_salary.csv")
x = df.values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
pd.DataFrame(min_max_scaler.fit_transform(df.T), columns=df.columns, index=df.index)
df = pd.DataFrame(x_scaled)
df.to_csv("data_reg_age_salary.csv", index=False)
|
[
"noreply@github.com"
] |
noreply@github.com
|
26c0479d73b4773d979b4de73b5383b4ceeb2883
|
ff5705d2813486da67b8aca48cfd5bf2c6cce068
|
/2_text_classification.py
|
0fa709be5b6d1b79dc77f3609155e88c78f26369
|
[] |
no_license
|
m-nasiruddin/text_blob
|
61934763586413115054f1557c62e0301ab70f87
|
1f95f7ee0632ddc6bc09cc619a0510114a6a93c6
|
refs/heads/master
| 2022-07-15T03:48:56.591056
| 2018-07-20T10:48:13
| 2018-07-20T10:48:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,714
|
py
|
from textblob.classifiers import NaiveBayesClassifier
from textblob import TextBlob
# creating a custom sentiment analyzer
# loading data and creating a classifier
train = [('I love this sandwich.', 'pos'),
('this is an amazing place!', 'pos'),
('I feel very good about these beers.', 'pos'),
('this is my best work.', 'pos'),
("what an awesome view", 'pos'),
('I do not like this restaurant', 'neg'),
('I am tired of this stuff.', 'neg'),
("I can't deal with this", 'neg'),
('he is my sworn enemy!', 'neg'),
('my boss is horrible.', 'neg')]
test = [('the beer was good.', 'pos'),
('I do not enjoy my job', 'neg'),
("I ain't feeling dandy today.", 'neg'),
("I feel amazing!", 'pos'),
('Gary is a friend of mine.', 'pos'),
("I can't believe I'm doing this.", 'neg')]
cl = NaiveBayesClassifier(train) # creating a naive bayes classifier
# or, open from a file
# with open('data/input/train.json', 'r') as fp:
# cl = NaiveBayesClassifier(fp, format="json")
# classifying text
print(cl.classify("This is an amazing library!"))
# get the label probability distribution
prob_dist = cl.prob_classify("This one's a doozy.")
print(prob_dist.max())
print(round(prob_dist.prob("pos"), 2))
print(round(prob_dist.prob("neg"), 2))
# classifying textblob
blob = TextBlob("The beer is good. But the hangover is horrible.", classifier=cl)
print(blob.classify())
for s in blob.sentences:
print(s)
print(s.classify())
# evaluating classifiers
print(cl.accuracy(test))
print(cl.show_informative_features(5)) # displaying a listing of the most informative features
# updating classifiers wth new data
new_data = [('She is my best friend.', 'pos'),
("I'm happy to have a new friend.", 'pos'),
("Stay thirsty, my friend.", 'pos'),
("He ain't from around here.", 'neg')]
print(cl.update(new_data))
print(cl.accuracy(test))
# feature extractors
# creating a feature extractor that just uses the first and last words of a document as its features
def end_word_extractor(document):
tokens = document.split()
first_word, last_word = tokens[0], tokens[-1]
feats = {}
feats["first({0})".format(first_word)] = True
feats["last({0})".format(last_word)] = False
return feats
features = end_word_extractor("I feel happy")
assert features == {'last(happy)': False, 'first(I)': True}
# using the feature extractor in a classifier by passing it as the second argument of the constructor
cl2 = NaiveBayesClassifier(test, feature_extractor=end_word_extractor)
blob = TextBlob("I'm excited to try my new classifier.", classifier=cl2)
print(blob.classify())
|
[
"mohammad.nasiruddin@gmail.com"
] |
mohammad.nasiruddin@gmail.com
|
2272af86ec47659657698bd4b83b445ace287269
|
4b4ff2c0d135d3615caaeb80735c2ad6ee987914
|
/venv/bin/pip
|
f5f3cb0efa2f979b5fd036420c2b2dc08b10a062
|
[] |
no_license
|
Nicolas-Turck/Tuto-deployement-heroku
|
23060837b47f195d9af2eb280a85836d1a8f8efd
|
54d104054c06070420ae36b6bbb45089492da286
|
refs/heads/master
| 2023-08-01T07:18:13.563988
| 2021-05-20T16:07:56
| 2021-05-20T16:07:56
| 257,563,781
| 0
| 0
| null | 2021-09-22T18:54:33
| 2020-04-21T10:46:23
|
Python
|
UTF-8
|
Python
| false
| false
| 263
|
#!/home/nicos/PycharmProjects/Tuto-heroku/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"nicolas.turck@gmail.com"
] |
nicolas.turck@gmail.com
|
|
4b35f9e5c7f11f607452a11c9cd445ed2278b7b9
|
54e8ac0398bfa33d9a1d40e5a8d6477e3806bb17
|
/RaspberryPiCode/getHSV.py
|
f25d56d310c28ed15cd468d7255e72f710e71a4d
|
[] |
no_license
|
RoboLions/frc2016-vision
|
ac46a15ba3c85f713f2d86619bce8b27aa996174
|
c32e559485e956a33794fa5a453c7202d042c27c
|
refs/heads/master
| 2021-04-30T22:59:30.402487
| 2016-03-30T03:26:17
| 2016-03-30T03:26:17
| 50,622,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,193
|
py
|
import cv2
import numpy as np
# move u* and l* sliders to find upper and lower end of hsv range respectively.
# hit q to quit
cap=cv2.VideoCapture(0)
def nothing(x):
pass
cv2.namedWindow("result")
h,s,v=100,100,100
cv2.createTrackbar('lh', 'result', 0, 255, nothing)
cv2.createTrackbar('ls', 'result', 0, 255, nothing)
cv2.createTrackbar('lv', 'result', 0, 255, nothing)
cv2.createTrackbar('uh', 'result', 0, 255, nothing)
cv2.createTrackbar('us', 'result', 0, 255, nothing)
cv2.createTrackbar('uv', 'result', 0, 255, nothing)
while True:
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV )
lh=cv2.getTrackbarPos('lh', 'result')
ls=cv2.getTrackbarPos('ls', 'result')
lv=cv2.getTrackbarPos('lv', 'result')
uh=cv2.getTrackbarPos('uh', 'result')
us=cv2.getTrackbarPos('us', 'result')
uv=cv2.getTrackbarPos('uv', 'result')
lower = np.array([lh,ls,lv])
upper = np.array([uh,us,uv])
mask = cv2.inRange(hsv, lower, upper)
result = cv2.bitwise_and(frame, frame, mask = mask )
cv2.imshow('result', result )
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cap.release
cv2.destroyAllWindows()
|
[
"alan.glaser@gnail.com"
] |
alan.glaser@gnail.com
|
a19137e2bc295d4d4e9c77c15d61e3a9e4d708f9
|
ff20661ef00b2db927c78f95a08cd6c40f950ee0
|
/inputmorethanone.py
|
81994128fa875ec38b52ef7cf8ec19866fc7810f
|
[] |
no_license
|
Techsrijan/mppython2021
|
57ca26e1acdf5adad2afa692dd5ae23336273603
|
583a991f85e2414c6b8ffe0405f727f3f5d38eee
|
refs/heads/main
| 2023-06-18T22:05:44.602220
| 2021-07-16T00:42:26
| 2021-07-16T00:42:26
| 374,290,977
| 0
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
'''f=int(input("Enter the first number"))
s=int(input("Enter the Second number"))
'''
f,s=input("Enter two number").split(',')
print("F=",f,"S=",s)
j,k=input("Enter two number").split(' ')
print("j=",j,"k=",k)
print("add=",j+k)
|
[
"aswanibtech@gmail.com"
] |
aswanibtech@gmail.com
|
3ec809e921148f10c35e5d7afb4098a0d5cfa51e
|
3a107debc014817103fd6cf18a346ba87cd4a896
|
/indexapp/views.py
|
19013ae175afb5a1629f37931d307a8c04c3468e
|
[] |
no_license
|
shuang3322/itsm_new_hj
|
20c979bb682af084d347a10f773591dc090e3a8d
|
a99384243ec0dc667e04edda30754e9cead96bbf
|
refs/heads/master
| 2021-08-22T22:30:20.032674
| 2018-11-27T06:17:27
| 2018-11-27T06:17:27
| 147,472,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
from django.shortcuts import render
# Create your views here.
from django.shortcuts import HttpResponse
from indexapp.models import IPdata
def visit(request):
ip = request.META.get('REMOTE_ADDR')
print(ip)
all = IPdata.objects.all()
# for item in request.META:
# print(item,request.META.get(item))
return render(request, "test.html", {'current_user': all,'re_ip':ip})
#
# def add_IP(request):
# for
|
[
"shuang0528@hotmail.com"
] |
shuang0528@hotmail.com
|
87e04c086ca8fcfe065781aaefdf79add1f73023
|
14a93d1114c26f1483b711793297cd00c1abd37f
|
/data/data_dav/export.py
|
2cc46ba94df22917591e14f70aef822629e3ec10
|
[] |
no_license
|
csmfindling/bci_eeg
|
7c50eca0c150e760a0e70892c1182f8862df3902
|
a5a4ab8456f0354904cae8d3817017861ebd192d
|
refs/heads/master
| 2021-05-09T21:34:35.513477
| 2018-01-29T16:10:54
| 2018-01-29T16:10:54
| 118,730,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,089
|
py
|
import pickle
import sys
import numpy as np
from scipy.signal import butter, lfilter, detrend
from numpy.fft import fft, fftfreq
import time as timelib
import pickle
# functions
def butter_bandpass(lowcut, highcut, fs, order=9):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=9):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
def butter_lowpass(lowcut, fs, order=9):
nyq = 0.5 * fs
low = lowcut / nyq
b, a = butter(order, low, btype='low')
return b, a
def butter_lowpass_filter(data, lowcut, fs, order=9):
b, a = butter_lowpass(lowcut, fs, order=order)
y = lfilter(b, a, data)
return y
# data
ref = pickle.load(open('python_data/rawref.pkl'))
close = pickle.load(open('python_data/rawclose.pkl'))
eye = pickle.load(open('python_data/rawblink.pkl'))
# parameters
dt = 0.004 # sampling frequency is of 1/0.004 = 250Hz
# write in csv for reference = 30sec
nb_points = int(30 * 1./0.004)
data = ref[:, -nb_points:]
y = butter_lowpass_filter(data, 30., 1./dt) # low pass filter
y = (y - np.mean(y))/np.std(y) # normalize
concat = np.concatenate((data[:-1], y[:-1]), axis=0)
import csv
with open('csv_data/reference.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(['eye', 'channel 2', 'channel 3', 'alpha', 'eye filtered', 'channel 2 filtered', 'channel 3 filtered', 'alpha filtered'])
for idx in range(concat.shape[-1]):
spamwriter.writerow(concat[:,idx])
# write in csv for close_eye = 60 sec
nb_points = int(60 * 1./0.004)
data = close[:, -nb_points:]
y = butter_lowpass_filter(data, 30., 1./dt) # low pass filter
y = (y - np.mean(y))/np.std(y) # normalize
concat = np.concatenate((data[:-1], y[:-1]), axis=0)
import csv
with open('csv_data/close_eye.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(['eye', 'channel 2', 'channel 3', 'alpha', 'eye filtered', 'channel 2 filtered', 'channel 3 filtered', 'alpha filtered'])
for idx in range(concat.shape[-1]):
spamwriter.writerow(concat[:,idx])
# write in csv for close_eye = 30 sec
nb_points = int(30 * 1./0.004)
data = eye[:, -nb_points:]
y = butter_lowpass_filter(data, 30., 1./dt) # low pass filter
y = (y - np.mean(y))/np.std(y) # normalize
concat = np.concatenate((data[:-1], y[:-1]), axis=0)
import csv
with open('csv_data/blinks.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(['eye', 'channel 2', 'channel 3', 'alpha', 'eye filtered', 'channel 2 filtered', 'channel 3 filtered', 'alpha filtered'])
for idx in range(concat.shape[-1]):
spamwriter.writerow(concat[:,idx])
|
[
"charles.findling@gmail.com"
] |
charles.findling@gmail.com
|
eb66983747fd37d5bad2b03c62aa2cb5b9820300
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/A_Primer_on_Scientific_Programming_with_Python/input/c2f_cml_v3.py
|
9d13a354db2390ff9b57049d4ab8c67135d83538
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 217
|
py
|
import sys
try:
C = float(sys.argv[1])
except:
print 'You failed to provide Celsius degrees as input '\
'on the command line!'
sys.exit(1) # abort
F = 9.0*C/5 + 32
print '%gC is %.1fF' % (C, F)
|
[
"bb@b.om"
] |
bb@b.om
|
88a1436030cb0e0d641a755da900e1e5e1086035
|
d123e83108ffb222a2bceb57e4dcd5d7fd2b5b41
|
/api/controllers/jobscheduling/build/config.gypi
|
b4e5770be03aa67be34c86283b682796d7fe765f
|
[] |
no_license
|
ajaxtream/smart2
|
0aede425b17f8666750427e4a89e3817995e43b5
|
7701b98d66f3da201e096eee43a0724c9a494dd1
|
refs/heads/master
| 2021-01-10T14:58:08.529403
| 2015-12-16T04:42:33
| 2015-12-16T04:42:33
| 48,087,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,517
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"icu_data_file": "icudt54l.dat",
"icu_data_in": "../../deps/icu/source/data/in/icudt54l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "./deps/icu",
"icu_small": "true",
"icu_ver_major": "54",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_mdb": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "x64",
"uv_library": "static_library",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/Users/stream/.node-gyp/0.12.7",
"copy_dev_lib": "true",
"standalone_static_library": 1
}
}
|
[
"noEmail@anonymous.com"
] |
noEmail@anonymous.com
|
c8217fd5718d9e8ce56aa8984ec31a0f48ea85a3
|
d95cd6f18f9f83df625362dd28ab6b733fa98c90
|
/extract_bboxes.py
|
ca82fd055ca9f70678b1b8d0c449e0586a5e3a0a
|
[] |
no_license
|
hassony2/yolo2-pytorch
|
477196349ee1d44ad09376b8f02cb08cced559a2
|
b9e1e905115694df5dd33b8a7485977263e19caf
|
refs/heads/master
| 2021-01-20T21:41:48.529189
| 2017-09-21T13:24:54
| 2017-09-21T13:24:54
| 101,783,945
| 1
| 0
| null | 2017-09-21T13:24:56
| 2017-08-29T16:40:30
|
Python
|
UTF-8
|
Python
| false
| false
| 5,011
|
py
|
import numpy as np
import os
from PIL import Image
import torch
from torch.autograd import Variable
from torchvision import transforms
from tqdm import tqdm
import cfgs.config as cfg
from darknet import Darknet19
from datasets.gteagazeplusimage import GTEAGazePlusImage
from datasets.smthgimage import SmthgImage
from datasets.utils.visualize import plot_bboxes
import utils.network as net_utils
import utils.yolo as yolo_utils
def get_crop_params(bbox, img_shape, increase_ratio=2.2):
"""
returns x_min, y_min, x_max ,y_max crop coordinates according to rule
2.2 times max dimension of the bounding box
Args:
bbox(numpy.ndarray): x_min, y_min, x_max, y_max
img_shape(tuple): original image shape
increase_ratio(float): final bbox size / tight bbox size
"""
width = bbox[2] - bbox[0]
height = bbox[3] - bbox[1]
square_dim = max(width, height)
final_dim = square_dim * increase_ratio
center_x = bbox[0] + width / 2
center_y = bbox[1] + height / 2
new_x_min = int(center_x - final_dim / 2)
new_x_max = int(center_x + final_dim / 2)
new_y_min = int(center_y - final_dim / 2)
new_y_max = int(center_y + final_dim / 2)
if new_x_min >= 0 and new_y_min >= 0 and\
new_x_max <= img_shape[0] and new_y_max <= img_shape[1]:
success = True
else:
success = False
return success, (new_x_min, new_y_min, new_x_max, new_y_max)
def test_net(net, dataset, transform=None, max_per_image=300, thresh=0.5,
num_classes=1, vis=False, crop_folder=None):
# Initialize counter for number of cropped hands
extracted_hands = 0
# Run through dataset
for i, (img, annots) in tqdm(enumerate(dataset)):
original_img = img
np_original_img = np.array(original_img)
if transform is not None:
img = transform(img)
# Add batch dimension
img = img.unsqueeze(0)
# Create GPU variable
img_var = Variable(img.type(torch.FloatTensor))
img_var = img_var.cuda()
# Detect hands
bbox_pred, iou_pred, prob_pred = net(img_var)
# to numpy
bbox_pred = bbox_pred.data.cpu().numpy()
iou_pred = iou_pred.data.cpu().numpy()
prob_pred = prob_pred.data.cpu().numpy()
bboxes, scores, cls_inds = yolo_utils.postprocess(
bbox_pred, iou_pred, prob_pred,
np_original_img.shape[0:2], cfg, thresh)
for class_idx in range(num_classes):
# Extract class
inds = np.where(cls_inds == class_idx)[0]
class_bboxes = bboxes[inds]
class_scores = scores[inds]
class_scores = class_scores[:, np.newaxis]
# Create class detections in format
# [[x_min, y_min, x_max, y_max, score], ...]
if vis:
fig = plot_bboxes(np_original_img, class_bboxes,
class_scores)
fig.savefig('bboxes_{:03}.jpg'.format(i), bbox_inches='tight')
# Save crops to (368x368) images
if crop_folder is not None:
for i, bbox in enumerate(class_bboxes):
crop_success, crop_params = get_crop_params(bbox,
(original_img.width,
original_img.height))
if crop_success:
crop = original_img.crop((crop_params))
crop_name = 'rendered_{:03d}.jpg'.format(
extracted_hands)
crop = crop.resize((368, 368))
# if bbox[2] - bbox[0] > 100 and bbox[3] - bbox[1] > 100:
# Mirror left hands
if cls_inds[i] == 0:
crop = crop.transpose(Image.FLIP_LEFT_RIGHT)
print('saving image')
crop.save(os.path.join(crop_folder, crop_name))
extracted_hands += 1
if __name__ == "__main__":
vis = True
crop_folder = 'results/crops'
# Initialize dataset
dataset = GTEAGazePlusImage()
# dataset = SmthgImage()
# Initialize test image transform
test_transform = transforms.Compose([
transforms.Scale(cfg.inp_size),
transforms.ToTensor()])
# Initialise network
# trained_model = 'models/training/darknet19_all_exp1/darknet19_all_exp1_64.h5'
trained_model = 'models/training/darknet19_all_exp1/darknet19_all_exp1_15.h5'
net = Darknet19()
net_utils.load_net(trained_model, net)
net.cuda()
net.eval()
# Extract bounding boxes
test_net(net, dataset, transform=test_transform, vis=vis, thresh=0.5,
crop_folder=None)
|
[
"yana.hasson@inria.fr"
] |
yana.hasson@inria.fr
|
842f5307feedb014785782c8f52774517fee51a9
|
969be1a2865722374293a286fa0923d6247d54d5
|
/language/bert/bert_code/fairseq/optim/lr_scheduler/fairseq_lr_scheduler.py
|
1732e2e495edd65fe93ea60644562119ae0b2e80
|
[
"MIT"
] |
permissive
|
Relwayg/Differentially-Private-Deep-Learning
|
36935e882cd37482f690b7f5888835065aa5e127
|
55eb9a1b75472895588d388de7567f012c9bc9ed
|
refs/heads/main
| 2023-09-06T01:23:44.915967
| 2021-11-23T06:53:29
| 2021-11-23T06:53:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,396
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .. import FairseqOptimizer
class FairseqLRScheduler(object):
def __init__(self, args, optimizer):
super().__init__()
if not isinstance(optimizer, FairseqOptimizer):
raise ValueError('optimizer must be an instance of FairseqOptimizer')
self.args = args
self.optimizer = optimizer
self.best = None
@staticmethod
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
pass
def state_dict(self):
"""Return the LR scheduler state dict."""
return {'best': self.best}
def load_state_dict(self, state_dict):
"""Load an LR scheduler state dict."""
self.best = state_dict['best']
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
if val_loss is not None:
if self.best is None:
self.best = val_loss
else:
self.best = min(self.best, val_loss)
def step_update(self, num_updates):
"""Update the learning rate after each update."""
return self.optimizer.get_lr()
def reinit(self, total_num_update, num_updates):
pass
|
[
"yudakuai18@163.com"
] |
yudakuai18@163.com
|
ac8d8c76de9def04f8aad137f4e5827afd1ca93c
|
1d70ee049c5198b75567e0000c45ef879f6d39be
|
/JobMatchPonos/server/utils/word2vec/wordEmbeddings.py
|
7f5247842cca39719da0f18965c455959fc9dc9c
|
[] |
no_license
|
alexawl/Job-Match-Ponos-Back
|
95f28185f71c38733973bc6d730947455c2e6c93
|
c48b4bfddfbf2f4f5aa95409fd2c6ee4f469d9dd
|
refs/heads/master
| 2022-09-30T15:07:19.875016
| 2020-02-01T06:50:34
| 2020-02-01T06:50:34
| 237,575,072
| 0
| 0
| null | 2021-05-06T20:07:41
| 2020-02-01T06:49:35
|
Python
|
UTF-8
|
Python
| false
| false
| 4,009
|
py
|
from gensim.models import Word2Vec, KeyedVectors
from os import listdir
from os.path import isfile, join
import numpy as np
from scipy import spatial
from sklearn import decomposition
import matplotlib.pyplot as plt
from jobmatcher.server.utils.pattern import text
# def read_All_CV(filename):
# text = textract.process(filename)
# return text.decode('utf-8')
allText = " Chris Ware 789 E 901 N , Salt Lake City, UT 11111 E: cwse@fastmail.com P: 555-234-2345" \
"Professional Summary" \
"Experienced software engineer with a passion for developing innovative programs that expedite the efficiency and effectiveness of organizational success. Well-versed in technology and writing code to create systems that are reliable and user-friendly. Skilled leader who has the proven ability to motivate, educate, and manage a team of professionals to build software programs and effectively track changes. Confident communicator, strategic thinker, and innovative creator to develop software that is customized to meet a company’s organizational needs, highlight their core competencies, and further their success. " \
"Skills" \
"-Well-versed in software tools including HTML, JavaScript, CSS, BackBone and JQuery, among others. -Skilled at reading and writing code using viable inputs and outputs after accurate assessment of pre- and post-conditions. -Experienced at designing unit tests to measure the effectiveness of software programs, backend services, and user interfaces. -Confident problem-solving abilities to overcome glitches with creative solutions that are strategically designed to last long-term. -Strong communication skills and the ability to listen carefully to user feedback to determine modifications for optimal user-function." \
"Work Experience" \
"Software Engineer-April 2013 – present Rav Industries" \
"Developed and designed three critical software programs for financial tracking and reporting." \
"Optimized user effectiveness by creating a detailed feedback queue for users to discuss functionality, convenience, and effectiveness." \
"Oversee a team of four software developers and lead weekly discussions to brainstorm ideas in software development and to track changes made in existing programs." \
"Software Developer-February 2008 – April 2013 Brac Inc." \
"Participated in creating scalable systems for three primary departments, including human resources, marketing, and supply chain." \
"Ran monthly unit tests to determine software effectiveness and mend broken links or glitches in the system." \
"Gave quarterly reports to executive management regarding current developments, and tracked changes in existing software." \
"Education Internship2010-2011"\
"Estes Corp. Salt Lake City Utah Bachelor of Science 2010 in Computer Engineering 2010" \
"University of Utah Salt Lake City Utah"
def preprocess_training_data1():
s = text.parsetree('The cat sat on the mat.', relations=True, lemmata=True)
print(s)
# result = es.parsetree('The cat sat on the mat.', relations=True, lemmata=True)
#
# s = en.parse('The cat sat on the mat.', relations=True, lemmata=True)
#
#
#
# print(s)
# dircvs = [join(dir_cvs, f) for f in listdir(dir_cvs) if isfile(join(dir_cvs, f))]
# alltext = ' '
# for cv in dircvs:
# yd = read_All_CV(cv)
# alltext += yd + " "
# alltext = allText.lower()
# vector = []
# for sentence in es.parsetree(alltext, tokenize=True, lemmata=True, tags=True):
# temp = []
# for chunk in sentence.chunks:
# for word in chunk.words:
# if word.tag == 'NN' or word.tag == 'VB':
# temp.append(word.lemma)
# vector.append(temp)
# global model
# model = Word2Vec(vector, size=200, window=5, min_count=3, workers=4)
# # model.save(dir_model_name)
#
# print("model:")
# print(model)
|
[
"alexawl@bellsouth.net"
] |
alexawl@bellsouth.net
|
2d46fb8bfbd693468cd059acdc41ca93221da9c6
|
c90b3ac3e5ad11cb93d4e6b76b9b9c4a19d0f512
|
/.history/copytest_20200502124009.py
|
413d6f42f34cf44c8b5ee71050cdd80e9dccb60d
|
[] |
no_license
|
rbafna6507/passwordstorageproject
|
6465585e36c81075856af8d565fe83e358b4a40a
|
480c30e358f7902ac0ef5c4e8d9556cb1d6d33f4
|
refs/heads/master
| 2022-11-25T12:05:02.625968
| 2020-07-27T21:33:38
| 2020-07-27T21:33:38
| 283,021,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 954
|
py
|
import pickle
import cryptography
from cryptography.fernet import Fernet
def encrypt(message: bytes, key: bytes) -> bytes:
return Fernet(key).encrypt(message)
def decrypt(token: bytes, key: bytes) -> bytes:
return Fernet(key).decrypt(token)
infile = open('jeff.pkl','rb')
z = pickle.load(infile)
key = Fernet.generate_key()
e_userpass = z
username = input("Username: ")
password = input("password: ")
website = input("Website: ")
e_username = encrypt(username.encode(), key)
e_password = encrypt(password.encode(), key)
e_list = [b"Username: " + e_username, b"Password: " + e_password]
e_userpass["Website: " + website] = e_list
outfile = open("jeff.pkl", "wb")
pickle.dump(e_userpass, outfile)
outfile.close()
infile = open('jeff.pkl','rb')
z = pickle.load(infile)
e_userpass = z
j = [e_userpass[k] for k in e_userpass]
e = [r.encode() for r in j]
q = decrypt(e, key)
"""for key, value in d_userpass.items():
print(key, ' : ', value)"""
|
[
"35872545+rbafna6507@users.noreply.github.com"
] |
35872545+rbafna6507@users.noreply.github.com
|
5c65745999dcd0d78e74ff5c2c30969e2bf57ef8
|
b75aedc25188b059189df7654dacd32818a86de2
|
/Facial Emotions/emotions.py
|
ef7d2650e5198354441345cbe25e5092c2c92730
|
[] |
no_license
|
scorpiocodes/ComputerVision
|
2308c9dcfb07fc85fdb8c46f45891ae0e4b106fa
|
a346ce69c81ae1a74cbd94f1ad8749a50aa44fbd
|
refs/heads/master
| 2020-03-22T01:53:54.861885
| 2019-06-11T18:39:34
| 2019-06-11T18:39:34
| 139,337,035
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,457
|
py
|
import cv2
import numpy as np
from keras.models import load_model
from statistics import mode
from utils.datasets import get_labels
from utils.inference import detect_faces
from utils.inference import draw_text
from utils.inference import draw_bounding_box
from utils.inference import apply_offsets
from utils.inference import load_detection_model
from utils.preprocessor import preprocess_input
USE_WEBCAM = True # If false, loads video file source
# parameters for loading data and images
emotion_model_path = './models/emotion_model.hdf5'
emotion_labels = get_labels('fer2013')
# hyper-parameters for bounding boxes shape
frame_window = 10
emotion_offsets = (20, 40)
# loading models
face_cascade = cv2.CascadeClassifier('./models/haarcascade_frontalface_default.xml')
emotion_classifier = load_model(emotion_model_path)
# getting input model shapes for inference
emotion_target_size = emotion_classifier.input_shape[1:3]
# starting lists for calculating modes
emotion_window = []
# starting video streaming
cv2.namedWindow('window_frame')
video_capture = cv2.VideoCapture(0)
# Select video or webcam feed
cap = None
if (USE_WEBCAM == True):
cap = cv2.VideoCapture(0) # Webcam source
else:
cap = cv2.VideoCapture('./demo/dinner.mp4') # Video file source
while cap.isOpened(): # True:
ret, bgr_image = cap.read()
#bgr_image = video_capture.read()[1]
gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5,
minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)
for face_coordinates in faces:
x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
gray_face = gray_image[y1:y2, x1:x2]
try:
gray_face = cv2.resize(gray_face, (emotion_target_size))
except:
continue
gray_face = preprocess_input(gray_face, True)
gray_face = np.expand_dims(gray_face, 0)
gray_face = np.expand_dims(gray_face, -1)
emotion_prediction = emotion_classifier.predict(gray_face)
emotion_probability = np.max(emotion_prediction)
emotion_label_arg = np.argmax(emotion_prediction)
emotion_text = emotion_labels[emotion_label_arg]
emotion_window.append(emotion_text)
if len(emotion_window) > frame_window:
emotion_window.pop(0)
try:
emotion_mode = mode(emotion_window)
except:
continue
if emotion_text == 'angry':
color = emotion_probability * np.asarray((255, 0, 0))
elif emotion_text == 'sad':
color = emotion_probability * np.asarray((0, 0, 255))
elif emotion_text == 'happy':
color = emotion_probability * np.asarray((255, 255, 0))
elif emotion_text == 'surprise':
color = emotion_probability * np.asarray((0, 255, 255))
else:
color = emotion_probability * np.asarray((0, 255, 0))
color = color.astype(int)
color = color.tolist()
draw_bounding_box(face_coordinates, rgb_image, color)
draw_text(face_coordinates, rgb_image, emotion_mode,
color, 0, -45, 1, 1)
bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
cv2.imshow('window_frame', bgr_image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
noreply@github.com
|
924236479976a262f093bcadbe5e2db823bb810b
|
a6aada9cb93cc23bd17e4213c7a0b5114905a34a
|
/introduction/lab6.py
|
fed0db889f02668cb3f0910318040399a0aece3a
|
[] |
no_license
|
SethKwashie/PythonLabs
|
90caf6a4fc5159536a892c84245b26bab4e48fbd
|
0a9578ce2f09fbec5ab0f1accb1e0d10f7792117
|
refs/heads/main
| 2023-01-04T12:57:54.497264
| 2020-11-02T14:46:18
| 2020-11-02T14:46:18
| 307,773,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
age = int(input("Enter your age >>> "))
total = 0
start = 1
while start <= age:
total += start
start +=1
month = total * 12
days = total * 365
hours = total * 8760
print(f"{total} years ----> {month} months ----> {days} days ----> {hours} hours")
|
[
"seth.kwashie@turntabl.io"
] |
seth.kwashie@turntabl.io
|
625ab94e1d6b0ee32774ffd6931f538b4fdb913f
|
6298ff0f597ec09633696622c42321dd6bbaab6d
|
/znachkov_oleksandr/03/task3.1.py
|
061a601d2277c930b0c29708f2bee2eb8ab3078c
|
[] |
no_license
|
Neckmus/itea_python_basics_3
|
94db7ea756cbb164b8705445b1ab027bb5ff4d93
|
9d76fc3d081fa407805e98d12f7f9960c8976366
|
refs/heads/master
| 2022-05-28T08:28:00.415625
| 2020-04-22T11:03:25
| 2020-04-22T11:03:25
| 254,841,391
| 0
| 0
| null | 2020-04-11T10:14:57
| 2020-04-11T10:14:57
| null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
from random import randint
ELEMENTS = 5
HIGHLIMIT = 10
exclusion_list = [4,2]
def array_diff(source_list):
print ("souerce list:", source_list)
print ("exclusion list:", exclusion_list)
#print (exclusion_list)
return [ elem for elem in source_list if not elem in exclusion_list]
source_list = lambda listsize, upper : [randint(0, upper) for index in range (listsize)]
print ("result:", array_diff(source_list(ELEMENTS, HIGHLIMIT)))
|
[
"alx.inferno@gmail.com"
] |
alx.inferno@gmail.com
|
ba3177e820dd8a4793158eb218326d48229866ef
|
bb372428bb90fa80f2e87820b3c8c5ba305dcd4c
|
/python/bot/eups.py
|
0e4812155219126d211b7dcd779287ab6d1ce9ec
|
[] |
no_license
|
TallJimbo/lsst-bot
|
7eb9b7a71a87a1ed416397c193931c80639bd746
|
0843afb2fdd5cc9ba62cf424a7dd73672b10e28f
|
refs/heads/master
| 2021-01-19T05:43:55.451321
| 2016-06-04T00:07:25
| 2016-06-04T00:07:25
| 60,484,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,534
|
py
|
#!/usr/bin/env python
from __future__ import absolute_import
import eups.table
import os
import logging
__all__ = "get_dependencies"
def get_dependencies(config, path, pkg, recursive=False):
"""Return immediate dependencies from inspecting a table file.
NOTE: recursive=True has not been tested.
"""
e = eups.Eups()
t = eups.table.Table(os.path.join(path, "ups", pkg + ".table"))
dependencies = t.dependencies(e, recursive=recursive)
if recursive:
dependencies.sort(key=lambda x: x[2])
for product, optional, depth in dependencies:
yield product.name, optional
def declare(config, path, pkg, version, tag_only=False):
e = eups.Eups()
if not tag_only:
logging.debug("Declaring {pkg} {version}.".format(pkg=pkg, version=version))
e.declare(productName=pkg, versionName=version, productDir=path)
for tmp in config.eups.tags:
tag = tmp.format(eups=config.eups)
logging.debug("Assigning tag {tag} to {pkg}.".format(pkg=pkg, tag=tag))
e.assignTag(tag, productName=pkg, versionName=version)
def undeclare(config, pkg, version):
e = eups.Eups()
e.undeclare(productName=pkg, versionName=version)
def setup(pkg, version, nodepend=False):
e = eups.Eups(max_depth=(0 if nodepend else -1))
e.setup(productName=pkg, versionName=version)
def tag(pkg, version, tag):
e = eups.Eups()
logging.debug("Assigning tag {tag} to {pkg}.".format(pkg=pkg, tag=tag))
e.assignTag(tag, productName=pkg, versionName=version)
|
[
"jbosch@astro.princeton.edu"
] |
jbosch@astro.princeton.edu
|
389f4af608abd72b7016e5c50c3de3b32f56bf4e
|
394e0b00b35c61f7f9c2d34b11cb6aa29f6a1eb5
|
/Used_Others/CRT-Button/g.log.py
|
0946926cce66bb3bd59dc504aa9555cffe710af9
|
[] |
no_license
|
congzheng-git/myProject
|
ecca28eafb129e014981b6fc74d4f8362ea3386b
|
da4cac92d87520d20036513616609bbffe9d9bef
|
refs/heads/master
| 2022-12-18T02:40:21.425117
| 2020-07-01T07:33:38
| 2020-07-01T07:33:38
| 207,499,868
| 0
| 0
| null | 2022-12-08T01:49:32
| 2019-09-10T08:03:20
|
Python
|
UTF-8
|
Python
| false
| false
| 311
|
py
|
# $language = "Python"
# $interface = "1.0"
import signal
def main():
crt.Screen.Send('\x03')
crt.Sleep(200)
crt.Screen.Send('cd ../../../opt/jelly_current/logs/' + '\r')
id = crt.Dialog.Prompt("输入要查的ID")
crt.Screen.Send('tail -f out.log |grep ' + id + '\r')
main()
|
[
"cczz1226@sina.com"
] |
cczz1226@sina.com
|
4ff5f832176e95271a6ca6a4df8efc6d8ebb25b5
|
2dba100f7ebbbb7b6f1850371b8640e0d3c0fc04
|
/ScientificProgramming/mu.py
|
1df2355ea7dc71793588406a3bf9cbbd0ecb49f6
|
[] |
no_license
|
lukasrieger-dev/python-beginner-projects
|
ea88fca0dc818ed2a63145305e49481dfa5b11af
|
d7cb2970a4067d150db57985e67e3cb6d3da7009
|
refs/heads/master
| 2023-01-04T12:19:34.881748
| 2020-11-06T18:10:52
| 2020-11-06T18:10:52
| 255,638,385
| 10
| 1
| null | 2020-10-03T07:25:23
| 2020-04-14T14:51:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,059
|
py
|
%matplotlib inline
import re
import matplotlib.pyplot as plt
def load_mu_data(path):
mu_data = dict()
with open(path, 'r') as file:
for line in file:
if re.match(r'(^ *$|^#.*$)', line):
# omit empty lines and comments
continue
*gas, C, T_0, mu_0 = line.split()
gas = ''.join(gas) # for names with more than one word
data = {'C':float(C), 'T_0':float(T_0), 'mu_0':float(mu_0)}
mu_data[gas] = data
return mu_data
def mu(T, gas, mu_data):
if not gas in mu_data:
raise ValueError
data = mu_data[gas]
T_0 = data['T_0']
C = data['C']
mu_0 = data['mu_0']
mu_T = mu_0 * (T_0-C)/(T+C) * (T/T_0)
return mu_T
path = 'viscosity_of_gases.dat'
mu_data = load_mu_data(path)
Ts = list(range(223, 374))
for gas in mu_data:
mu_values = [mu(T, gas, mu_data) for T in Ts]
plt.plot(Ts, mu_values)
plt.legend(list(mu_data))
plt.xlabel('temperatures')
plt.ylabel('viscosity')
plt.show()
|
[
"rieger.lks@gmail.com"
] |
rieger.lks@gmail.com
|
0fa20eaa5571bbbc09e2573aff18620152ed29dc
|
8f8d426ee581545cd3c58f9879305f6536a02426
|
/extractFramesEverySecond.py
|
d6cc95318f833a855b945a48b44f163a61dd46f9
|
[] |
no_license
|
tomtillo/video
|
bb0c0fabf93f804cf2e9bd7a6b2f781684a840d7
|
8be7eee7400fc3c5648e8eef90f14c667b7fc9c0
|
refs/heads/master
| 2020-06-11T20:32:44.092864
| 2019-06-27T11:35:43
| 2019-06-27T11:35:43
| 194,077,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
# ---------------------------- Function to get frames every second ( and not all the frames)
# Using tqdm to show progressbar
# Usage :extract_frames_every_second(videos1,OUT_DIR)
# Returns: times taken
import cv2
import math
import time
def extract_frames_every_second(video_file, output_folder):
import tqdm
t1= time.time()
cap = cv2.VideoCapture(video_file)
duration_seconds= cap.get(7) / cap.get(5)
pbar = tqdm.tqdm(total=duration_seconds)
ret = 1
frame_number = 1
frameRate = cap.get(5) # frame rate
while (ret):
frame_number += 1
frameId = cap.get(1) # current frame number
if (ret != True):
break
ret, frame = cap.read()
if (frameId % math.floor(frameRate) == 0):
pbar.update(1)
out_k2 = cv2.imwrite(output_folder + "im_" + str(frame_number) + ".jpg", frame)
cap.release()
t2 = time.time()
time_taken= t2-t1
return(time_taken)
# ----------------------------------- End Function ------------
videos1 = "./videos/video1.mp4"
OUT_DIR = "./output/"
t1 = extract_frames_every_second(videos1,OUT_DIR)
print("Time taken = %s " %(t1))
|
[
"noreply@github.com"
] |
noreply@github.com
|
f5762a0a3bcd4f5ba6d65173ea4d994aece629be
|
1eb73b6d3801156620cad8707ab9fe70baca3e7c
|
/library/writer.py
|
edf9aa2fa31712f34a5cb3209693bb4a227c47d2
|
[
"MIT"
] |
permissive
|
santiago26/vk4xmpp
|
732b75ef0a2a4c86d7d704da888e9be76953ec68
|
369f2550f2b422e6d27f06679ddda8ecd7cae026
|
refs/heads/master
| 2021-01-16T19:22:26.318690
| 2013-09-28T11:37:05
| 2013-09-28T11:37:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,194
|
py
|
# /* encoding: utf-8 */
# © simpleApps, 2010
import os, sys, time, logging, traceback
logger = logging.getLogger("vk4xmpp")
fixme = lambda msg: Print("\n#! fixme: \"%s\"." % msg)
lastErrorBody = None
def wFile(filename, data, mode = "w"):
with open(filename, mode, 0) as file:
file.write(data)
def rFile(filename):
with open(filename, "r") as file:
return file.read()
def crashLog(name, text = 0, fixMe = True):
global lastErrorBody
logger.error("writing crashlog %s" % name)
if fixMe:
fixme(name)
try:
File = "crash/%s.txt" % name
if not os.path.exists("crash"):
os.makedirs("crash")
exception = wException(True)
if exception not in ("None", lastErrorBody):
Timestamp = time.strftime("| %d.%m.%Y (%H:%M:%S) |\n")
wFile(File, Timestamp + exception + "\n", "a")
lastErrorBody = exception
except:
fixme("crashlog")
wException()
def Print(text, line = True):
try:
if line:
print text
else:
sys.stdout.write(text)
sys.stdout.flush()
except (IOError, OSError):
pass
def wException(File = False):
try:
exception = traceback.format_exc().strip()
if not File:
Print(exception)
return exception
except (IOError, OSError):
pass
|
[
"mrdoctorwho@gmail.com"
] |
mrdoctorwho@gmail.com
|
444b7127cd68eeab661ca9a41c8296181217f564
|
2a527f6fdee469bf1c3d76c2519149d916fa51d2
|
/python-client/victorops_client/models/on_call_log.py
|
870c6428c3097ccdca27122e1b5ff2514712c456
|
[] |
no_license
|
ArjunSangitrao/python-victorops
|
e261971800adb337da36b716f2c0fd0fc5c5973c
|
a9de3c93d141ad19213bdef6aa3790988bf03c66
|
refs/heads/master
| 2022-03-26T19:19:06.556731
| 2017-02-26T12:39:08
| 2017-02-26T12:39:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,018
|
py
|
# coding: utf-8
"""
VictorOps API
This API allows you to interact with the VictorOps platform in various ways. Your account may be limited to a total number of API calls per month. Also, some of these API calls have rate limits. NOTE: In this documentation when creating a sample curl request (clicking the TRY IT OUT! button), in some API viewing interfaces, the '@' in an email address may be encoded. Please note that the REST endpoints will not process the encoded version. Make sure that the encoded character '%40' is changed to its unencoded form before submitting the curl request.
OpenAPI spec version: 0.0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class OnCallLog(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, team_slug=None, start=None, end=None, user_logs=None):
"""
OnCallLog - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'team_slug': 'str',
'start': 'datetime',
'end': 'datetime',
'user_logs': 'list[UserLog]'
}
self.attribute_map = {
'team_slug': 'teamSlug',
'start': 'start',
'end': 'end',
'user_logs': 'userLogs'
}
self._team_slug = team_slug
self._start = start
self._end = end
self._user_logs = user_logs
@property
def team_slug(self):
"""
Gets the team_slug of this OnCallLog.
:return: The team_slug of this OnCallLog.
:rtype: str
"""
return self._team_slug
@team_slug.setter
def team_slug(self, team_slug):
"""
Sets the team_slug of this OnCallLog.
:param team_slug: The team_slug of this OnCallLog.
:type: str
"""
self._team_slug = team_slug
@property
def start(self):
"""
Gets the start of this OnCallLog.
:return: The start of this OnCallLog.
:rtype: datetime
"""
return self._start
@start.setter
def start(self, start):
"""
Sets the start of this OnCallLog.
:param start: The start of this OnCallLog.
:type: datetime
"""
self._start = start
@property
def end(self):
"""
Gets the end of this OnCallLog.
:return: The end of this OnCallLog.
:rtype: datetime
"""
return self._end
@end.setter
def end(self, end):
"""
Sets the end of this OnCallLog.
:param end: The end of this OnCallLog.
:type: datetime
"""
self._end = end
@property
def user_logs(self):
"""
Gets the user_logs of this OnCallLog.
:return: The user_logs of this OnCallLog.
:rtype: list[UserLog]
"""
return self._user_logs
@user_logs.setter
def user_logs(self, user_logs):
"""
Sets the user_logs of this OnCallLog.
:param user_logs: The user_logs of this OnCallLog.
:type: list[UserLog]
"""
self._user_logs = user_logs
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, OnCallLog):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"vincent.desmet@honestbee.com"
] |
vincent.desmet@honestbee.com
|
9436483bd9dce7f9bd3c1c1c7e5b4682ce2f8b80
|
e357f337ef80487aa6fafacb3e61cd5b56c78b60
|
/src/verification/__init__.py
|
9c7d02b050afd632c953e658bd2ee2c42fdd8642
|
[] |
no_license
|
kaleissin/django-verification
|
1537b7e78a8ca07a091bcad5fd014a4544809ba5
|
33f7d0b7d9cc1494269f16b6f07f6077ca974fff
|
refs/heads/master
| 2021-07-15T20:12:43.628199
| 2021-07-05T11:43:39
| 2021-07-05T13:08:50
| 18,205,426
| 12
| 2
| null | 2021-07-05T13:13:16
| 2014-03-28T08:19:41
|
Python
|
UTF-8
|
Python
| false
| false
| 60
|
py
|
default_app_config = 'verification.apps.VerificationConfig'
|
[
"kaleissin@gmail.com"
] |
kaleissin@gmail.com
|
362e1c5d6c3bd62bae910a57c6d2f267d85924f0
|
6cf9b92459bb6bc5d9941728f9b0d697bd5db29f
|
/task.py
|
121bdbcc4864c0ce62df6fde1bfd892db04b2cf0
|
[] |
no_license
|
mehar13hamza13/competetive-learning-using-python
|
6eb2da2ab6ccdc17bc47e432a27e5582ddd5186c
|
f62f079c1facd7536c51fb4dad3e8edf867482c3
|
refs/heads/master
| 2023-06-24T22:22:41.173546
| 2021-07-27T15:55:52
| 2021-07-27T15:55:52
| 390,043,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,303
|
py
|
import numpy as np
class CompetetiveLearning():
def __init__(self, input_nodes, output_nodes):
self.input_nodes = input_nodes
self.output_nodes = output_nodes
#initializing random weights
self.weights = np.random.uniform(low=0.0, high=1.0, size=(output_nodes, input_nodes)).round(1)
def train(self, train_x):
print("----training for "+str(len(train_x))+" samples------")
clustering = {"A": [], "B": [], "C": []}
count = 1
for i in train_x:
print("Iteration "+str(count))
x = i.reshape((6, 1)) # reshaping the ith input value so matrix multiplication can be applied
result = np.matmul(self.weights, x) #multiplying wieghts with input nodes (w11X1 + w21X2 + ....)
winning_unit = result.argmax() # index with maximum value will be the winning unit (only row with these weights will be updated)
print("Output Values for Iteration "+str(count)+": ")
print(result)
print("Winning Unit: "+str(winning_unit+1))
print("Adjusting the weight for only row "+str(winning_unit+1))
self.adjust_weights(0.5, winning_unit, x)
clustering[list(clustering.keys())[winning_unit]].append("R"+str(count))
count+=1
self.print_final_weights()
print("\nFinal Cluster Results: ")
print(clustering)
def print_final_weights(self):
print("\nFinal Weights for Output P: ")
print(self.weights[0])
print("Final Weights for Output Q: ")
print(self.weights[1])
print("Final Weights for Output R: ")
print(self.weights[2])
def adjust_weights(self, learning_rate, row_no, inputs):
for i in range(len(self.weights[row_no])):
#adjusting the weights
self.weights[row_no][i] = self.weights[row_no][i] + learning_rate*inputs[i]
#normalizing the weights
self.weights[row_no][i]/=2
def test(self, test_x):
print()
print("----testing for " + str(len(test_x)) + " samples------")
print()
count = 1
classes = ["Class A", "Class B", "Class C"]
for i in test_x:
print("Iteration " + str(count))
x = i.reshape((6, 1)) # reshaping the ith input value so matrix multiplication can be applied
result = np.matmul(self.weights, x) # multiplying wieghts with input nodes (w11X1 + w21X2 + ....)
winning_unit = result.argmax() # index with maximum value will be the winning unit (only row with these weights will be updated)
print("Output Values for t" + str(count) + ": ")
print(result)
print("Winning Unit: " + str(winning_unit + 1))
print("t"+str(count)+" belongs to "+classes[winning_unit])
count += 1
cl = CompetetiveLearning(6, 3)
train_x = np.array([
[1, 0, 0, 0, 0, 0],
[1, 0, 0, 1, 0, 0],
[1, 0, 0, 0, 1, 0],
[0, 0, 1, 1, 0, 1],
[0, 0, 1, 1, 0, 1],
[0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1],
[0, 1, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0]
])
test_x = np.array([
[0, 0, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1]
])
cl.train(train_x)
cl.test(test_x)
|
[
"hassaanfarooq71@gmail.com"
] |
hassaanfarooq71@gmail.com
|
7dc8d99db40e13c4050036693a90c23d2732f5d1
|
6af90d24f1f081c1f1046346dc1c7cdb98bcaddb
|
/tests/django/run_unit_tests.py
|
9270e1793369a22bae93f3dd891a9bc5c60bb34c
|
[
"Apache-2.0"
] |
permissive
|
aolabNeuro/brain-python-interface
|
e4d8fd4682ed63611d44f83e7889f35a71a80b59
|
bc215d97fe95765c2bf40d1be793d6ffa83586d1
|
refs/heads/master
| 2023-08-18T09:53:24.212536
| 2023-08-17T21:22:01
| 2023-08-17T21:22:01
| 250,614,460
| 3
| 3
|
NOASSERTION
| 2023-09-08T16:44:08
| 2020-03-27T18:31:13
|
HTML
|
UTF-8
|
Python
| false
| false
| 240
|
py
|
from django.test.utils import get_runner
from django.conf import settings
from db.boot_django import boot_django
boot_django()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(["tests/django"])
|
[
"leo.scholl@gmail.com"
] |
leo.scholl@gmail.com
|
efc58d0b9b812e8507cfe77bd976ff7d1d82c672
|
342b0835ad165d8fcb647792e13440ed8d0579c0
|
/posts/views.py
|
e7bd842707ebd9559bd68fbdda4e6b4983a21463
|
[] |
no_license
|
Str8jckn/mb
|
a2d1df745840a7654bef179e4088b2964f0608bc
|
d64fd91397eb979ca85e7cabefe7c290c04434e2
|
refs/heads/master
| 2023-02-23T11:37:24.684042
| 2021-01-25T05:10:35
| 2021-01-25T05:10:35
| 332,100,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
from django.shortcuts import render
from django.views.generic import ListView
from .models import Post
from django.contrib import admin
from django.urls import path, include
class HomePageView(ListView):
model = Post
template_name = 'home.html'
context_object_name = 'all_posts_list'
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('posts.urls')), # new
]
# Create your views here.
|
[
"jaebrownjr@gmail.com"
] |
jaebrownjr@gmail.com
|
68870acb9c8c92e4ebbaf015abbd0b6109358e24
|
97aa1181a8305fab0cfc635954c92880460ba189
|
/torch/__init__.py
|
4442d7e952cc12a7e75937c35b091330e7c69570
|
[
"BSD-2-Clause"
] |
permissive
|
zhujiang73/pytorch_mingw
|
64973a4ef29cc10b96e5d3f8d294ad2a721ccacb
|
b0134a0acc937f875b7c4b5f3cef6529711ad336
|
refs/heads/master
| 2022-11-05T12:10:59.045925
| 2020-08-22T12:10:32
| 2020-08-22T12:10:32
| 123,688,924
| 8
| 4
|
NOASSERTION
| 2022-10-17T12:30:52
| 2018-03-03T12:15:16
|
C++
|
UTF-8
|
Python
| false
| false
| 14,830
|
py
|
r"""
The torch package contains data structures for multi-dimensional
tensors and mathematical operations over these are defined.
Additionally, it provides many utilities for efficient serializing of
Tensors and arbitrary types, and other useful utilities.
It has a CUDA counterpart, that enables you to run your tensor computations
on an NVIDIA GPU with compute capability >= 3.0.
"""
import os
import sys
import platform
import ctypes
if sys.version_info < (3,):
raise Exception("Python 2 has reached end-of-life and is no longer supported by PyTorch.")
from ._utils import _import_dotted_name
from ._utils_internal import get_file_path, prepare_multiprocessing_environment, \
USE_RTLD_GLOBAL_WITH_LIBTORCH, USE_GLOBAL_DEPS
#from .version import __version__
from ._six import string_classes as _string_classes
from typing import Set, Type
__all__ = [
'typename', 'is_tensor', 'is_storage', 'set_default_tensor_type',
'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed', 'seed',
'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',
'no_grad', 'enable_grad', 'rand', 'randn',
'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
'ShortStorage', 'CharStorage', 'ByteStorage', 'BoolStorage',
'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
'ShortTensor', 'CharTensor', 'ByteTensor', 'BoolTensor', 'Tensor',
'lobpcg', '_set_deterministic', '_is_deterministic'
]
################################################################################
# Load the extension module
################################################################################
# See Note [Global dependencies]
def _load_global_deps():
if platform.system() == 'Windows':
return
lib_name = 'libtorch_global_deps' + ('.dylib' if platform.system() == 'Darwin' else '.so')
here = os.path.abspath(__file__)
lib_path = os.path.join(os.path.dirname(here), 'lib', lib_name)
ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL)
if (USE_RTLD_GLOBAL_WITH_LIBTORCH or os.getenv('TORCH_USE_RTLD_GLOBAL')) and \
platform.system() != 'Windows':
# Do it the hard way. You might want to load libtorch with RTLD_GLOBAL in a
# few circumstances:
#
# 1. You're in a build environment (e.g., fbcode) where
# libtorch_global_deps is not available, but you still need
# to get mkl to link in with RTLD_GLOBAL or it will just
# not work.
#
# 2. You're trying to run PyTorch under UBSAN and you need
# to ensure that only one copy of libtorch is loaded, so
# vptr checks work properly
#
# If you're using this setting, you must verify that all the libraries
# you load consistently use the same libstdc++, or you may have
# mysterious segfaults.
#
import os as _dl_flags
if not hasattr(_dl_flags, 'RTLD_GLOBAL') or not hasattr(_dl_flags, 'RTLD_LAZY'):
try:
# next try if DLFCN exists
import DLFCN as _dl_flags # type: ignore
except ImportError:
# as a last attempt, use compile-time constants
import torch._dl as _dl_flags # type: ignore
old_flags = sys.getdlopenflags()
sys.setdlopenflags(_dl_flags.RTLD_GLOBAL | _dl_flags.RTLD_LAZY)
from torch._C import *
sys.setdlopenflags(old_flags)
del old_flags
del _dl_flags
else:
# Easy way. You want this most of the time, because it will prevent
# C++ symbols from libtorch clobbering C++ symbols from other
# libraries, leading to mysterious segfaults.
#
# If building in an environment where libtorch_global_deps isn't available
# like parts of fbsource, but where RTLD_GLOBAL causes segfaults, you will
# want USE_RTLD_GLOBAL_WITH_LIBTORCH = False and USE_GLOBAL_DEPS = False
#
# See Note [Global dependencies]
if USE_GLOBAL_DEPS:
_load_global_deps()
from torch._C import *
# Appease the type checker; ordinarily this binding is inserted by the
# torch._C module initialization code in C
if False:
import torch._C as _C
__all__ += [name for name in dir(_C)
if name[0] != '_' and
not name.endswith('Base')]
################################################################################
# Define basic utilities
################################################################################
def typename(o):
if isinstance(o, torch.Tensor):
return o.type()
module = ''
class_name = ''
if hasattr(o, '__module__') and o.__module__ != 'builtins' \
and o.__module__ != '__builtin__' and o.__module__ is not None:
module = o.__module__ + '.'
if hasattr(o, '__qualname__'):
class_name = o.__qualname__
elif hasattr(o, '__name__'):
class_name = o.__name__
else:
class_name = o.__class__.__name__
return module + class_name
def is_tensor(obj):
r"""Returns True if `obj` is a PyTorch tensor.
Note that this function is simply doing ``isinstance(obj, Tensor)``.
Using that ``isinstance`` check is better for typechecking with mypy,
and more explicit - so it's recommended to use that instead of
``is_tensor``.
Args:
obj (Object): Object to test
"""
return isinstance(obj, torch.Tensor)
def is_storage(obj):
r"""Returns True if `obj` is a PyTorch storage object.
Args:
obj (Object): Object to test
"""
return type(obj) in _storage_classes
def set_default_tensor_type(t):
r"""Sets the default ``torch.Tensor`` type to floating point tensor type
``t``. This type will also be used as default floating point type for
type inference in :func:`torch.tensor`.
The default floating point tensor type is initially ``torch.FloatTensor``.
Args:
t (type or string): the floating point tensor type or its name
Example::
>>> torch.tensor([1.2, 3]).dtype # initial default for floating point is torch.float32
torch.float32
>>> torch.set_default_tensor_type(torch.DoubleTensor)
>>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
torch.float64
"""
if isinstance(t, _string_classes):
t = _import_dotted_name(t)
_C._set_default_tensor_type(t)
def set_default_dtype(d):
r"""Sets the default floating point dtype to :attr:`d`.
This dtype is:
1. The inferred dtype for python floats in :func:`torch.tensor`.
2. Used to infer dtype for python complex numbers. The default complex dtype is set to
``torch.complex128`` if default floating point dtype is ``torch.float64``,
otherwise it's set to ``torch.complex64``
The default floating point dtype is initially ``torch.float32``.
Args:
d (:class:`torch.dtype`): the floating point dtype to make the default
Example::
>>> # initial default for floating point is torch.float32
>>> torch.tensor([1.2, 3]).dtype
torch.float32
>>> # initial default for floating point is torch.complex64
>>> torch.tensor([1.2, 3j]).dtype
torch.complex64
>>> torch.set_default_dtype(torch.float64)
>>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
torch.float64
>>> torch.tensor([1.2, 3j]).dtype # a new complex tensor
torch.complex128
"""
_C._set_default_dtype(d)
def _set_deterministic(d):
r"""Sets a global flag to force all operations to use a deterministic
implementation if available. If an operation that does not have a
deterministic implementation is called while this setting is True, the
operation will throw a RuntimeError.
Note that deterministic operations tend to have worse performance than
non-deterministic operations.
Args:
d (:class:`bool`): If True, force operations to be deterministic.
If False, allow non-deterministic operations.
.. warning::
This feature is experimental and not complete. The above docstring
represents what the future behavior is intended to be. Right now,
`_set_deterministic` will only affect `torch.bmm` and convolution
operators.
"""
_C._set_deterministic(d)
def _is_deterministic():
r"""Returns True if the global deterministic flag is turned on and
operations are being forced to use a deterministic implementation.
.. warning::
This feature is experimental and not complete. The above docstring
represents what the future behavior is intended to be. Right now,
the global deterministic flag will only affect `torch.bmm` and
convolution operators.
"""
return _C._get_deterministic()
# If you edit these imports, please update torch/__init__.py.in as well
from .random import set_rng_state, get_rng_state, manual_seed, initial_seed, seed
from .serialization import save, load
from ._tensor_str import set_printoptions
################################################################################
# Define Storage and Tensor classes
################################################################################
from .tensor import Tensor
from .storage import _StorageBase
class DoubleStorage(_C.DoubleStorageBase, _StorageBase):
pass
class FloatStorage(_C.FloatStorageBase, _StorageBase):
pass
class HalfStorage(_C.HalfStorageBase, _StorageBase):
pass
class LongStorage(_C.LongStorageBase, _StorageBase):
pass
class IntStorage(_C.IntStorageBase, _StorageBase):
pass
class ShortStorage(_C.ShortStorageBase, _StorageBase):
pass
class CharStorage(_C.CharStorageBase, _StorageBase):
pass
class ByteStorage(_C.ByteStorageBase, _StorageBase):
pass
class BoolStorage(_C.BoolStorageBase, _StorageBase):
pass
class BFloat16Storage(_C.BFloat16StorageBase, _StorageBase):
pass
class ComplexDoubleStorage(_C.ComplexDoubleStorageBase, _StorageBase):
pass
class ComplexFloatStorage(_C.ComplexFloatStorageBase, _StorageBase):
pass
class QUInt8Storage(_C.QUInt8StorageBase, _StorageBase):
pass
class QInt8Storage(_C.QInt8StorageBase, _StorageBase):
pass
class QInt32Storage(_C.QInt32StorageBase, _StorageBase):
pass
_storage_classes = {
DoubleStorage, FloatStorage, LongStorage, IntStorage, ShortStorage,
CharStorage, ByteStorage, HalfStorage, BoolStorage, QUInt8Storage, QInt8Storage,
QInt32Storage, BFloat16Storage, ComplexFloatStorage, ComplexDoubleStorage
}
# The _tensor_classes set is initialized by the call to _C._initialize_tensor_type_bindings()
_tensor_classes: Set[Type] = set()
################################################################################
# Initialize extension
################################################################################
def manager_path():
if platform.system() == 'Windows':
return b""
path = get_file_path('torch', 'bin', 'torch_shm_manager')
prepare_multiprocessing_environment(get_file_path('torch'))
if not os.path.exists(path):
raise RuntimeError("Unable to find torch_shm_manager at " + path)
return path.encode('utf-8')
# Shared memory manager needs to know the exact location of manager executable
_C._initExtension(manager_path())
del manager_path
# Appease the type checker: it can't deal with direct setting of globals().
# Note that we will see "too many" functions when reexporting this way; there
# is not a good way to fix this problem. Perhaps, try to redesign VariableFunctions
# so that this import is good enough
if False:
from torch._C._VariableFunctions import *
for name in dir(_C._VariableFunctions):
if name.startswith('__'):
continue
globals()[name] = getattr(_C._VariableFunctions, name)
__all__.append(name)
################################################################################
# Import interface functions defined in Python
################################################################################
# needs to be after the above ATen bindings so we can overwrite from Python side
from .functional import *
################################################################################
# Remove unnecessary members
################################################################################
del DoubleStorageBase
del FloatStorageBase
del LongStorageBase
del IntStorageBase
del ShortStorageBase
del CharStorageBase
del ByteStorageBase
del BoolStorageBase
del QUInt8StorageBase
del BFloat16StorageBase
del ComplexDoubleStorageBase
del ComplexFloatStorageBase
################################################################################
# Import most common subpackages
################################################################################
import torch.cuda
import torch.autograd
from torch.autograd import no_grad, enable_grad, set_grad_enabled
import torch.futures
import torch.nn
import torch.nn.intrinsic
import torch.nn.quantized
import torch.optim
import torch.multiprocessing
import torch.sparse
import torch.utils.backcompat
import torch.onnx
import torch.jit
import torch.hub
import torch.random
import torch.distributions
import torch.testing
import torch.backends.cuda
import torch.backends.mkl
import torch.backends.mkldnn
import torch.backends.openmp
import torch.backends.quantized
import torch.quantization
import torch.utils.data
import torch.__config__
import torch.__future__
_C._init_names(list(torch._storage_classes))
# attach docstrings to torch and tensor functions
from . import _torch_docs, _tensor_docs, _storage_docs
del _torch_docs, _tensor_docs, _storage_docs
def compiled_with_cxx11_abi():
r"""Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1"""
return _C._GLIBCXX_USE_CXX11_ABI
# Import the ops "namespace"
from torch._ops import ops
from torch._classes import classes
# Import the quasi random sampler
import torch.quasirandom
# If you are seeing this, it means that this call site was not checked if
# the memory format could be preserved, and it was switched to old default
# behaviour of contiguous
legacy_contiguous_format = contiguous_format
# Register fork handler to initialize OpenMP in child processes (see gh-28389)
from torch.multiprocessing._atfork import register_after_fork
register_after_fork(torch.get_num_threads)
del register_after_fork
# Import tools that require fully imported torch (for applying
# torch.jit.script as a decorator, for instance):
from ._lobpcg import lobpcg
# These were previously defined in native_functions.yaml and appeared on the
# `torch` namespace, but we moved them to c10 dispatch to facilitate custom
# class usage. We add these lines here to preserve backward compatbility.
quantized_lstm = torch.ops.aten.quantized_lstm
quantized_gru = torch.ops.aten.quantized_gru
|
[
"zhujiangmail@hotmail.com"
] |
zhujiangmail@hotmail.com
|
fb935ebf7929bf8547c110f220afd9dd747ddc54
|
a90792aec007ab37cdd7d21dfb5340a88b87132f
|
/shorten/views.py
|
41aca3353713eacb4762171447c1d714b33da29e
|
[] |
no_license
|
chetangargnitd/url_shortener
|
fee4b837c79b118bf1cfea3f80582c05326a0c3d
|
b94c1f83ac56e623fedb3e2ca211a9f2f5b35ff2
|
refs/heads/master
| 2021-06-24T04:20:27.291801
| 2019-06-17T14:22:18
| 2019-06-17T14:22:18
| 191,707,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,451
|
py
|
from __future__ import unicode_literals
from django.shortcuts import render
from django.shortcuts import redirect
from django.http import HttpResponse
from django.http import Http404
from django.shortcuts import get_object_or_404
from .models import URLs
import urllib
import hashlib
def home(request):
context = URLs.objects.order_by('-created')[:5]
return render(request, 'shorten/index.html', {'context' : context})
def shrink(request):
url = (request.GET["url"])
c_url = (request.GET["c_url"])
print(type(c_url))
encoded_url = url.encode('utf-8')
hashObject = hashlib.md5(encoded_url)
shrinked_url = hashObject.hexdigest()[:8]
context = URLs.objects.order_by('-created')[:5]
if(c_url == ""):
try:
check = URLs.objects.get(shrinked_url = shrinked_url)
except URLs.DoesNotExist:
entry = URLs(shrinked_url = shrinked_url, original_url = url)
entry.save()
return render(request, 'shorten/index.html', {'shrinked_url' : shrinked_url, 'context' : context})
else:
try:
check = URLs.objects.get(shrinked_url = c_url)
except URLs.DoesNotExist:
entry = URLs(shrinked_url = c_url, original_url = url)
entry.save()
return render(request, 'shorten/index.html', {'shrinked_url' : c_url, 'context' : context})
def retrieve(request, id):
target = get_object_or_404(URLs, shrinked_url = id)
targetURL = target.original_url
if(targetURL[:4] != 'http'):
targetURL = 'http://'+targetURL
return redirect(targetURL)
|
[
"chetangarg1102@gmail.com"
] |
chetangarg1102@gmail.com
|
71099406ad6fc49075c14031a16a80c20794895f
|
a5315e8edc48c5fabcf6aaaa56de737d9594cddf
|
/deepc/modules/resnet.py
|
6e109cf6e94138f8dae8fb9e4efd2080ec7f1fd4
|
[] |
no_license
|
elirshabat/deepc-pytorch
|
efead7622d09c0c14f2b2c941cc7202e0278bbc8
|
c946ad55c5a8fedff1055f34b92b1a8e9cb3df2a
|
refs/heads/master
| 2020-03-23T01:41:05.212437
| 2018-11-06T07:31:50
| 2018-11-06T07:31:50
| 140,931,467
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,955
|
py
|
import torch
from torchvision import models
import torch.nn.init as init
import numpy as np
def initialize_weights(method='kaiming', *models):
for model in models:
for module in model.modules():
if isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.ConvTranspose2d) or isinstance(module, torch.nn.Linear):
if method == 'kaiming':
init.kaiming_normal(module.weight.data, np.sqrt(2.0))
elif method == 'xavier':
init.xavier_normal(module.weight.data, np.sqrt(2.0))
elif method == 'orthogonal':
init.orthogonal(module.weight.data, np.sqrt(2.0))
elif method == 'normal':
init.normal(module.weight.data,mean=0, std=0.02)
if module.bias is not None:
init.constant_(module.bias.data,0)
class GlobalConvolutionBlock(torch.nn.Module):
def __init__(self, in_channels, out_channels, k):
super().__init__()
super(GlobalConvolutionBlock, self).__init__()
self.left = torch.nn.Sequential(torch.nn.Conv2d(in_channels, out_channels, kernel_size=(k[0],1), padding=(k[0]//2,0)),
torch.nn.Conv2d(out_channels, out_channels, kernel_size=(1,k[1]), padding=(0,k[1]//2)))
self.right = torch.nn.Sequential(torch.nn.Conv2d(in_channels, out_channels, kernel_size=(1,k[1]), padding=(0,k[1]//2)),
torch.nn.Conv2d(out_channels, out_channels, kernel_size=(k[0],1), padding=(k[0]//2,0)))
def forward(self, x):
left = self.left(x)
right = self.right(x)
return left + right
class BoundaryRefine(torch.nn.Module):
def __init__(self, in_channels):
super(BoundaryRefine, self).__init__()
self.layer = torch.nn.Sequential(torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(in_channels),
torch.nn.ReLU(inplace=True),
torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(in_channels))
def forward(self, x):
convs = self.layer(x)
return x.expand_as(convs)+convs
class ResnetMIS(torch.nn.Module):
def __init__(self, pretrained_resnet=True, out_channels=3):
super().__init__()
resent = models.resnet101(pretrained=pretrained_resnet)
self.layer0 = torch.nn.Sequential(resent.conv1, resent.bn1, resent.relu, resent.maxpool)
self.layer1 = resent.layer1
self.layer2 = resent.layer2
self.layer3 = resent.layer3
self.layer4 = resent.layer4
# Assuming input of size 240x320
self.gcn256 = GlobalConvolutionBlock(256, out_channels, (59, 79))
self.br256 = BoundaryRefine(out_channels)
self.gcn512 = GlobalConvolutionBlock(512, out_channels, (29, 39))
self.br512 = BoundaryRefine(out_channels)
self.gcn1024 = GlobalConvolutionBlock(1024, out_channels, (13, 19))
self.br1024 = BoundaryRefine(out_channels)
self.gcn2048 = GlobalConvolutionBlock(2048, out_channels, (7, 9))
self.br2048 = BoundaryRefine(out_channels)
self.br1 = BoundaryRefine(out_channels)
self.br2 = BoundaryRefine(out_channels)
self.br3 = BoundaryRefine(out_channels)
self.br4 = BoundaryRefine(out_channels)
self.br5 = BoundaryRefine(out_channels)
self.activation = torch.nn.Sigmoid()
self.deconv1 = torch.nn.ConvTranspose2d(out_channels, out_channels, 2, stride=2)
self.deconv2 = torch.nn.ConvTranspose2d(out_channels, out_channels, 2, stride=2)
initialize_weights(self.gcn256, self.gcn512, self.gcn1024, self.gcn2048,
self.br5, self.br4, self.br3, self.br2, self.br1,
self.br256, self.br512, self.br1024, self.br2048,
self.deconv1, self.deconv2)
def forward(self, x):
x = self.layer0(x)
layer1 = self.layer1(x)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
enc1 = self.br256(self.gcn256(layer1))
enc2 = self.br512(self.gcn512(layer2))
enc3 = self.br1024(self.gcn1024(layer3))
enc4 = self.br2048(self.gcn2048(layer4))
dec1 = self.br1(torch.nn.functional.interpolate(enc4, size=enc3.size()[2:], mode='bilinear') + enc3)
dec2 = self.br2(torch.nn.functional.interpolate(dec1, size=enc2.size()[2:], mode='bilinear') + enc2)
dec3 = self.br3(torch.nn.functional.interpolate(dec2, size=enc1.size()[2:], mode='bilinear') + enc1)
dec4 = self.br4(self.deconv1(dec3))
score_map = self.br5(self.deconv2(dec4))
return self.activation(score_map)
|
[
"shabat.eliran@gmail.com"
] |
shabat.eliran@gmail.com
|
31a96cf391d906b0d3d59fcd37437e16f21f474b
|
fd326562890d4f1987c384fc7c60374938231222
|
/OOP/ExamPrep/Exam10April21/project/decoration/ornament.py
|
90d7980c034613da08f4ee857bf726562ac89427
|
[] |
no_license
|
miro-lp/SoftUni
|
cc3b0ff742218c9ceaf93f05c319ccfeed5bc8a4
|
283d9328537919de49f7f6a301e58593bae9ca2a
|
refs/heads/main
| 2023-08-23T21:22:07.856226
| 2021-08-25T15:10:18
| 2021-08-25T15:10:18
| 318,134,101
| 2
| 1
| null | 2021-08-10T12:51:54
| 2020-12-03T09:03:08
|
Python
|
UTF-8
|
Python
| false
| false
| 151
|
py
|
from project.decoration.base_decoration import BaseDecoration
class Ornament(BaseDecoration):
def __init__(self):
super().__init__(1, 5)
|
[
"miro_lp@abv.bg"
] |
miro_lp@abv.bg
|
aee5135a64857c7423190a714cc62b15b207d49f
|
9612da0b14b7e9f883a2ae50f00af87571bccabc
|
/Analyzer.py
|
2f1b4c07946a7828146daa3d16607d8aad4e94ea
|
[] |
no_license
|
andrewghaly/HandwrittenEstimation
|
e56bb5489b5833a515dff529e0f9172ed7c380db
|
97424d64a4b3ae784499d0bcf660797f056fc026
|
refs/heads/master
| 2021-01-23T03:27:17.421327
| 2017-03-24T14:45:44
| 2017-03-24T14:45:44
| 86,078,278
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,685
|
py
|
import cv2
import numpy as np
drawing_averages = []
for imageNumber in range(1,51):
img = cv2.imread('C:\\Users\\ghalya\\Pictures\\Hands\\Saunders_hand\\al_r_' + str(imageNumber) + '.png', 0)
if img is None:
print "Error loading image"
exit()
newImg = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
#cv2.imshow('Image', img)
# define the list of boundaries
boundaries = [
([151], [255]),
]
# loop over the boundaries
for (lower, upper) in boundaries:
# create NumPy arrays from the boundaries
lower = np.array(lower, dtype="uint8")
upper = np.array(upper, dtype="uint8")
#apply the mask
mask = cv2.inRange(img, lower, upper)
output = cv2.bitwise_and(img, img, mask=mask)
# show the images
#cv2.imshow("images", np.hstack([img, output]))
#cv2.imwrite('C:\Users\ghalya\Pictures\lol_testlulz.png', np.hstack([img, output]))
height, width = img.shape
points = 0
xSum = 0
ySum = 0
for i in range(0, width):
for j in range(0, height):
if img[j][i] <= 150:
points += 1
xSum += i
ySum += j
xAvg = xSum/points
yAvg = ySum/points
drawing_averages.append([xAvg, yAvg])
#print("xAvg: ", xAvg, " yAvg: ", yAvg)
cv2.circle(newImg, (xAvg, yAvg), 5, (0, 0, 255), -1)
#cv2.imshow("image", newImg)
#cv2.imwrite("C:\Users\ghalya\Pictures\genLEL.png", newImg)
cv2.waitKey(0)
print drawing_averages
count = xTotal = yTotal = 0
for i, j in drawing_averages:
xTotal += i
yTotal += j
count += 1
print "average:", xTotal/count, yTotal/count,
|
[
"ghalya@wit.edu"
] |
ghalya@wit.edu
|
b3aaed8088ac0f6dc6a87eed88943c27eea6fcb9
|
c9dd27f95f7a1a25963e0bd107a8fd72033c2168
|
/src/main.py
|
562ecf88d07a12c56bdfd8f9c24e342b1632865e
|
[] |
no_license
|
RamazanDemirci/pythonML
|
3f807c5ec8e8881fe04197c75c98010b2feb7095
|
86d7e9286f03f099f64bf39615a9ee8841e3c021
|
refs/heads/main
| 2023-02-02T02:55:40.429272
| 2020-12-27T13:42:00
| 2020-12-27T13:42:00
| 319,462,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 62
|
py
|
if __name__ == "__main__":
print("Program Entry Point")
|
[
"radem18@gmail.com"
] |
radem18@gmail.com
|
70f76d0921495935a8af0e489fc2de27af2fdd67
|
0fb2607ba5dff6c86e806dd67ba16b158c68b7a3
|
/bellorest/takeaway/apps.py
|
8530766b1735e6d11564a7d76bae23c34f085d6f
|
[] |
no_license
|
shashankiit/Restaraunt-Mangement-System
|
204259b4b7c0dbd984f4d38dcdbbab39bef2ee02
|
8a246ff56023a04c996e7fcf0ffb7d9093de1446
|
refs/heads/main
| 2023-04-23T06:14:30.348021
| 2021-05-07T18:14:15
| 2021-05-07T18:14:15
| 358,109,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
from django.apps import AppConfig
class TakeawayConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'takeaway'
|
[
"ambeloskar@iitb.ac.in"
] |
ambeloskar@iitb.ac.in
|
2725285e31284db439a824eacbd0d0ddf6681c04
|
7a6d30770cd56c2900aa7ef969b3ecfd679078a5
|
/WebGame/WebGame/game/classes/postgres.py
|
1dc642d5a3d31b85d596a939756c32fbda900e56
|
[] |
no_license
|
sherkd/zombiegame
|
519e1f7166e8e4ca192347682b2e55675757dff5
|
1438d2267ab2c615e14cf6e5d13525b38f7cb7a1
|
refs/heads/master
| 2020-06-14T12:54:40.043993
| 2017-01-05T12:24:31
| 2017-01-05T12:24:31
| 75,178,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,948
|
py
|
import psycopg2
import sys
class Postgres(object):
def __init__(self):
conn_string = "host='localhost' dbname='zombiegamers' user='postgres' password='project3'"
print("Connecting to database\n ->%s" % (conn_string))
self.connection = psycopg2.connect(conn_string)
self.cursor = self.connection.cursor()
def getConnection(self):
return self.connection
def createDatabase(self):
self.cursor.execute("CREATE TABLE Account(userid int NOT NULL, password varchar(255), username varchar(255), gender varchar(1), email varchar(255), birthday date, PRIMARY KEY(userid))")
self.cursor.execute("CREATE TABLE Player(userid int NOT NULL, health int, attack int , luck int, accuracy int, speed int, skillpoints int, weaponid int, FOREIGN KEY(userid) REFERENCES Account(userid))")
self.cursor.execute("CREATE TABLE Weapon(weaponid int NOT NULL, name varchar(255), class varchar(255), description varchar(255), level int, damage int, PRIMARY KEY(weaponid))")
self.cursor.execute("CREATE TABLE Player_Weapon(userid int NOT NULL, weaponid int NOT NULL, FOREIGN KEY(userid) REFERENCES Account(userid), FOREIGN KEY(weaponid) REFERENCES Weapon(weaponid))")
def getAccount(self):
self.cursor.execute("SELECT * FROM Account WHERE userid='12'")
return self.cursor.fetchone()
def insertAccount(self):
try:
self.cursor.execute("INSERT INTO Account (userid, password, username, gender, email, birthday) Values(12, 'pass', 'user', 'm', 'user@gmail.com', '2000-10-10')")
self.connection.commit()
except:
self.connection.rollback()
def insertTestPlayer(self):
try:
self.cursor.execute("DELETE FROM Player WHERE userid = '12'")
self.connection.commit()
except:
self.connection.rollback()
try:
self.cursor.execute("INSERT INTO Player(userid, health, attack, luck, accuracy, speed, skillpoints, weaponid) VALUES(12, 100, 10, 5, 10, 10, 0, 12);")
self.connection.commit()
except:
self.connection.rollback()
def getTestPlayer(self):
self.cursor.execute("SELECT * FROM Player WHERE userid='12'")
return self.cursor.fetchone()
def insertWeapon(self, weapon):
id = random.randint(0, 1000)
try:
self.cursor.execute("INSERT INTO Weapon (userid, name, class, description, level, damage) VALUES (" + str(id) + "," + weapon.getName() + "," + weapon.getClass() + "," + weapon.getDescription() + "," + str(weapon.getLevel()) + "," + str(weapon.getDamage()) + ")")
self.connection.commit()
except:
self.connection.rollback()
def getWeapon(self):
print(self.cursor.execute("SELECT * FROM Weapon WHERE userid='12'"))
def getWeapon(self, id):
pass
def getWeapons(self, username):
pass
|
[
"sivarwerrie@hotmail.nl"
] |
sivarwerrie@hotmail.nl
|
48a149e36e40844f00d5d10b8710b2422b11ab35
|
4bbb4fca1829ec8b146f9c73d2358b897e28d4ae
|
/main/views.py
|
4070691290ea2909d585902532f0a46b40ad40be
|
[] |
no_license
|
richardwalkerdev/handofcards
|
e54e1bc4bf868b2a9e02c63a063706daf1573f98
|
312e77884695e5e1442bff40660b6be08623604b
|
refs/heads/master
| 2022-12-16T00:35:24.096302
| 2020-02-04T17:12:38
| 2020-02-04T17:12:38
| 234,924,220
| 0
| 0
| null | 2022-12-08T03:27:57
| 2020-01-19T15:49:12
|
Python
|
UTF-8
|
Python
| false
| false
| 959
|
py
|
from django.shortcuts import render
import requests
import os
from django.core.exceptions import ImproperlyConfigured
from .models import Hand
import datetime
def hand(request):
def get_env_value(env_variable):
try:
return os.environ[env_variable]
except KeyError:
error_msg = 'Set the {} environment variable'.format(env_variable)
raise ImproperlyConfigured(error_msg)
# Get environmant variable DECKOFCARDS_URL
DECKOFCARDS_URL = get_env_value('DECKOFCARDS_URL')
response = requests.get(DECKOFCARDS_URL)
deckdata = response.json()
hand = []
total = 0
for n in range(2):
hand.append(deckdata.pop(0))
for i in hand:
total = total + i.get("value")
handTotal = Hand()
handTotal.total = total
handTotal.created = datetime.datetime.now()
handTotal.save()
return render(request, 'main/index.html', {'array': hand, 'total': total})
|
[
"rwalker@rwalker.remote.csb"
] |
rwalker@rwalker.remote.csb
|
188c2472291601922fddbb95d9c6cdbe3ca24173
|
d12e13bab06ba7083a41aba7e7f74fa40926f0cc
|
/seq2seq-affect-attention/model/Decoder.py
|
4205fde03a3c06a698c12fa5da91565c6a50e86d
|
[] |
no_license
|
HengYangDS/seq2seq-affect
|
39ca15998f7824f29932832c880cba416a478682
|
e5c40651540fea258a2d683be0fe532763168853
|
refs/heads/master
| 2020-09-25T16:09:53.663929
| 2019-12-05T02:10:13
| 2019-12-05T02:10:13
| 226,040,899
| 1
| 0
| null | 2019-12-05T07:21:28
| 2019-12-05T07:21:27
| null |
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
import torch.nn as nn
# 解码器
class Decoder(nn.Module):
def __init__(self, cell_type, # rnn类型
input_size, # 输入维度
output_size, # 输出维度
num_layer, # rnn层数
dropout=0): # dropout
super(Decoder, self).__init__()
assert cell_type in ['GRU', 'LSTM'] # 限定rnn类型
self.cell_type = cell_type
self.rnncell = getattr(nn, cell_type)( # rnncell
input_size=input_size,
hidden_size=output_size,
num_layers=num_layer,
dropout=dropout)
def forward(self, input, # 输入 [seq, batch, dim] 或者单步输入 [1, batch, dim]
state): # 初始状态 [layers*directions, batch, dim]
# output = [seq, batch, dim*directions] 每个时间步的输出
# final_state = [layers*directions, batch, dim] # 每一层的最终状态
output, final_state = self.rnncell(input, state)
return output, final_state
|
[
"1759567121@qq.com"
] |
1759567121@qq.com
|
4e6eca2f88c65a14f8f7950765320058fffc7784
|
6cd24d192fe83e2d4a23b2d7f2fe0c038940a5d9
|
/trip/models.py
|
c05e3e881cdb57c13e1a086d6c7b5744615c8a64
|
[] |
no_license
|
nabeelakhtar20/trip_app_sample
|
d5370864ae0c872b0bc24bd9c47361c2fcae413c
|
ae6ab820d9a39fa4072267f09349b2c0d794b979
|
refs/heads/master
| 2022-05-18T03:30:03.315671
| 2019-10-13T12:06:58
| 2019-10-13T12:06:58
| 214,809,376
| 1
| 0
| null | 2022-04-22T22:32:11
| 2019-10-13T11:38:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,491
|
py
|
from datetime import datetime
from neomodel import StringProperty, StructuredNode, RelationshipTo, \
IntegerProperty, DateTimeProperty, UniqueIdProperty, JSONProperty, DateProperty
from auth.models import User
class Trip(StructuredNode):
uid = UniqueIdProperty()
destination = StringProperty()
start_date = DateProperty()
end_date = DateProperty()
adults = IntegerProperty()
infants = IntegerProperty()
estimated_budget_start = IntegerProperty()
estimated_budget_end = IntegerProperty()
events = JSONProperty()
creation_date = DateTimeProperty(default=datetime.now())
last_updated = DateTimeProperty(default=datetime.now())
user = RelationshipTo(User, 'PLANNED_BY')
@property
def serialize(self):
return {
'node_properties': {
"id": self.uid,
"destination": self.destination,
"start_date": self.start_date,
"end_date": self.end_date,
"adults": self.adults,
"infants": self.infants,
"estimated_budget_start": self.estimated_budget_start,
"estimated_budget_end": self.estimated_budget_end,
"events": self.events,
},
}
@property
def serialize_connections(self):
return [
{
'nodes_type': 'User',
'nodes_related': self.serialize_relationships(self.user.all()),
},
]
|
[
"nabeel_akhtar20@hotmail.com"
] |
nabeel_akhtar20@hotmail.com
|
fbcf2fca48d9207fd6d531d83f43f44da2312148
|
0120813c649236fcb4732723c4b25f6538de04fb
|
/Image Stitching/Source/main.py
|
a232db81366c8c7dc347f44a463ca2d9345047ab
|
[] |
no_license
|
shubh0906/Computer-Vision
|
47f1e8e55f54138acd070f5f39b722b17a5747b2
|
e83bd827f1ed9de9218af5e973e69510d826d100
|
refs/heads/master
| 2021-01-25T11:15:36.124527
| 2018-03-06T09:01:35
| 2018-03-06T09:01:35
| 123,387,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,840
|
py
|
# Instructions:
# Do not change the output file names, use the helper functions as you see fit
import os
import sys
import cv2
import numpy as np
import matplotlib.pyplot as plt
import math
def help_message():
print("Usage: [Question_Number] [Input_Options] [Output_Options]")
print("[Question Number]")
print("1 Perspective warping")
print("2 Cylindrical warping")
print("3 Bonus perspective warping")
print("4 Bonus cylindrical warping")
print("[Input_Options]")
print("Path to the input images")
print("[Output_Options]")
print("Output directory")
print("Example usages:")
print(sys.argv[0] + " 1 " + "[path to input image1] " + "[path to input image2] " + "[path to input image3] " + "[output directory]")
'''
Detect, extract and match features between img1 and img2.
Using SIFT as the detector/extractor, but this is inconsequential to the user.
Returns: (pts1, pts2), where ptsN are points on image N.
The lists are "aligned", i.e. point i in pts1 matches with point i in pts2.
Usage example:
im1 = cv2.imread("image1.jpg", 0)
im2 = cv2.imread("image2.jpg", 0)
(pts1, pts2) = feature_matching(im1, im2)
plt.subplot(121)
plt.imshow(im1)
plt.scatter(pts1[:,:,0],pts1[:,:,1], 0.5, c='r', marker='x')
plt.subplot(122)
plt.imshow(im2)
plt.scatter(pts1[:,:,0],pts1[:,:,1], 0.5, c='r', marker='x')
'''
def feature_matching(img1, img2, savefig=False):
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
# FLANN parameters
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50) # or pass empty dictionary
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches2to1 = flann.knnMatch(des2,des1,k=2)
matchesMask_ratio = [[0,0] for i in xrange(len(matches2to1))]
match_dict = {}
for i,(m,n) in enumerate(matches2to1):
if m.distance < 0.7*n.distance:
matchesMask_ratio[i]=[1,0]
match_dict[m.trainIdx] = m.queryIdx
good = []
recip_matches = flann.knnMatch(des1,des2,k=2)
matchesMask_ratio_recip = [[0,0] for i in xrange(len(recip_matches))]
for i,(m,n) in enumerate(recip_matches):
if m.distance < 0.7*n.distance: # ratio
if m.queryIdx in match_dict and match_dict[m.queryIdx] == m.trainIdx: #reciprocal
good.append(m)
matchesMask_ratio_recip[i]=[1,0]
if savefig:
draw_params = dict(matchColor = (0,255,0),
singlePointColor = (255,0,0),
matchesMask = matchesMask_ratio_recip,
flags = 0)
img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,recip_matches,None,**draw_params)
plt.figure(),plt.xticks([]),plt.yticks([])
plt.imshow(img3,)
plt.savefig("feature_matching.png",bbox_inches='tight')
return ([ kp1[m.queryIdx].pt for m in good ],[ kp2[m.trainIdx].pt for m in good ])
'''
Warp an image from cartesian coordinates (x, y) into cylindrical coordinates (theta, h)
Returns: (image, mask)
Mask is [0,255], and has 255s wherever the cylindrical images has a valid value.
Masks are useful for stitching
Usage example:
im = cv2.imread("myimage.jpg",0) #grayscale
h,w = im.shape
f = 700
K = np.array([[f, 0, w/2], [0, f, h/2], [0, 0, 1]]) # mock calibration matrix
imcyl = cylindricalWarpImage(im, K)
'''
def cylindricalWarpImage(img1, K, savefig=False):
f = K[0,0]
im_h,im_w = img1.shape
# go inverse from cylindrical coord to the image
# (this way there are no gaps)
cyl = np.zeros_like(img1)
cyl_mask = np.zeros_like(img1)
cyl_h,cyl_w = cyl.shape
x_c = float(cyl_w) / 2.0
y_c = float(cyl_h) / 2.0
for x_cyl in np.arange(0,cyl_w):
for y_cyl in np.arange(0,cyl_h):
theta = (x_cyl - x_c) / f
h = (y_cyl - y_c) / f
X = np.array([math.sin(theta), h, math.cos(theta)])
X = np.dot(K,X)
x_im = X[0] / X[2]
if x_im < 0 or x_im >= im_w:
continue
y_im = X[1] / X[2]
if y_im < 0 or y_im >= im_h:
continue
cyl[int(y_cyl),int(x_cyl)] = img1[int(y_im),int(x_im)]
cyl_mask[int(y_cyl),int(x_cyl)] = 255
if savefig:
plt.imshow(cyl, cmap='gray')
plt.savefig("cyl.png",bbox_inches='tight')
return (cyl,cyl_mask)
'''
Calculate the geometric transform (only affine or homography) between two images,
based on feature matching and alignment with a robust estimator (RANSAC).
Returns: (M, pts1, pts2, mask)
Where: M is the 3x3 transform matrix
pts1 are the matched feature points in image 1
pts2 are the matched feature points in image 2
mask is a binary mask over the lists of points that selects the transformation inliers
Usage example:
im1 = cv2.imread("image1.jpg", 0)
im2 = cv2.imread("image2.jpg", 0)
(M, pts1, pts2, mask) = getTransform(im1, im2)
# for example: transform im1 to im2's plane
# first, make some room around im2
im2 = cv2.copyMakeBorder(im2,200,200,500,500, cv2.BORDER_CONSTANT)
# then transform im1 with the 3x3 transformation matrix
out = cv2.warpPerspective(im1, M, (im1.shape[1],im2.shape[0]), dst=im2.copy(), borderMode=cv2.BORDER_TRANSPARENT)
plt.imshow(out, cmap='gray')
plt.show()
'''
def getTransform(src, dst, method='affine'):
pts1,pts2 = feature_matching(src,dst)
src_pts = np.float32(pts1).reshape(-1,1,2)
dst_pts = np.float32(pts2).reshape(-1,1,2)
if method == 'affine':
M, mask = cv2.estimateAffine2D(src_pts, dst_pts, cv2.RANSAC, ransacReprojThreshold=5.0)
#M = np.append(M, [[0,0,1]], axis=0)
if method == 'homography':
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
return (M, pts1, pts2, mask)
# ===================================================
# ================ Perspective Warping ==============
# ===================================================
def Perspective_warping(im1, im2, im3):
im1 = cv2.copyMakeBorder(im1,200,200,500,500, cv2.BORDER_CONSTANT)
(M, pts1, pts2, mask) = getTransform(im2, im1, 'homography')
out = cv2.warpPerspective(im2, M, (im1.shape[1],im1.shape[0]), dst=im1.copy(), borderMode=cv2.BORDER_TRANSPARENT)
(M, pts1, pts2, mask) = getTransform(im3, out, 'homography')
out = cv2.warpPerspective(im3, M, (out.shape[1],out.shape[0]), dst=out.copy(), borderMode=cv2.BORDER_TRANSPARENT)
output_image = out # This is dummy output, change it to your output
# Write out the result
output_name = sys.argv[5] + "output_homography.png"
cv2.imwrite(output_name, output_image)
imM = cv2.imread('example_output1.png', 0);
#print '---q1---', RMSD(1, out, imM);
return True
def Bonus_perspective_warping(img1, img2, img3):
# Write your codes here
output_image = img1 # This is dummy output, change it to your output
# Write out the result
output_name = sys.argv[5] + "output_homography_lpb.png"
cv2.imwrite(output_name, output_image)
return True
# ===================================================
# =============== Cynlindrical Warping ==============
# ===================================================
def Cylindrical_warping(im1, im2, im3):
f = 500
h,w = im1.shape
K = np.array([[f, 0, w/2], [0, f, h/2], [0, 0, 1]]) # mock calibration matrix
im1, k1 = cylindricalWarpImage(im1, K)
h,w = im2.shape
K = np.array([[f, 0, w/2], [0, f, h/2], [0, 0, 1]]) # mock calibration matrix
im2, k2 = cylindricalWarpImage(im2, K)
h,w = im3.shape
K = np.array([[f, 0, w/2], [0, f, h/2], [0, 0, 1]]) # mock calibration matrix
im3, k3 = cylindricalWarpImage(im3, K)
im1 = cv2.copyMakeBorder(im1, 50, 50, 300, 300, cv2.BORDER_CONSTANT)
(M1, pts1, pts2, mask) = getTransform(im2, im1)
(M2, pts1, pts2, mask) = getTransform(im3, im1)
out1 = cv2.warpAffine(im2, M1, (im1.shape[1],im1.shape[0]))
outM1 = cv2.warpAffine(k2, M1, (im1.shape[1],im1.shape[0]))
r,c = im1.shape
for i in xrange(r):
for j in xrange(c):
if outM1[i,j] == 255:
im1[i, j] = out1[i, j]
(M, pts1, pts2, mask) = getTransform(im3, im1)
out = cv2.warpAffine(im3, M2, (out1.shape[1],out1.shape[0]))
outM = cv2.warpAffine(k3, M2, (out1.shape[1],out1.shape[0]))
r,c = im1.shape
for i in xrange(r):
for j in xrange(c):
if outM[i,j] == 255:
im1[i, j] = out[i, j]
output_image = im1 # This is dummy output, change it to your output
# Write out the result
output_name = sys.argv[5] + "output_cylindrical.png"
cv2.imwrite(output_name, output_image)
imM = cv2.imread('example_output2.png', 0);
#print RMSD(2, im1, imM);
return True
'''# Write your codes here
output_image = img1 # This is dummy output, change it to your output
# Write out the result
output_name = sys.argv[5] + "output_cylindrical.png"
cv2.imwrite(output_name, output_image)
return True'''
def Bonus_cylindrical_warping(img1, img2, img3):
# Write your codes here
output_image = img1 # This is dummy output, change it to your output
# Write out the result
output_name = sys.argv[5] + "output_cylindrical_lpb.png"
cv2.imwrite(output_name, output_image)
return True
'''
This exact function will be used to evaluate your results for HW2
Compare your result with master image and get the difference, the grading
criteria is posted on Piazza
'''
'''def RMSD(target, master):
# Get width, height, and number of channels of the master image
master_height, master_width = master.shape[:2]
master_channel = len(master.shape)
# Get width, height, and number of channels of the target image
target_height, target_width = target.shape[:2]
target_channel = len(target.shape)
# Validate the height, width and channels of the input image
if (master_height != target_height or master_width != target_width or master_channel != target_channel):
return -1
else:
total_diff = 0.0;
master_channels = cv2.split(master);
target_channels = cv2.split(target);
for i in range(0, len(master_channels), 1):
dst = cv2.absdiff(master_channels[i], target_channels[i])
dst = cv2.pow(dst, 2)
mean = cv2.mean(dst)
total_diff = total_diff + mean[0]**(1/2.0)
return total_diff;'''
def RMSD(questionID, target, master):
# Get width, height, and number of channels of the master image
master_height, master_width = master.shape[:2]
master_channel = len(master.shape)
# Get width, height, and number of channels of the target image
target_height, target_width = target.shape[:2]
target_channel = len(target.shape)
# Validate the height, width and channels of the input image
if (master_height != target_height or master_width != target_width or master_channel != target_channel):
return -1
else:
nonZero_target = cv2.countNonZero(target)
nonZero_master = cv2.countNonZero(master)
if (questionID == 1):
if (nonZero_target < 1200000):
return -1
elif(questionID == 2):
if (nonZero_target < 700000):
return -1
else:
return -1
total_diff = 0.0;
master_channels = cv2.split(master);
target_channels = cv2.split(target);
for i in range(0, len(master_channels), 1):
dst = cv2.absdiff(master_channels[i], target_channels[i])
dst = cv2.pow(dst, 2)
mean = cv2.mean(dst)
total_diff = total_diff + mean[0]**(1/2.0)
return total_diff;
if __name__ == '__main__':
question_number = -1
# Validate the input arguments
if (len(sys.argv) != 6):
help_message()
sys.exit()
else:
question_number = int(sys.argv[1])
if (question_number > 4 or question_number < 1):
print("Input parameters out of bound ...")
sys.exit()
input_image1 = cv2.imread(sys.argv[2], 0)
input_image2 = cv2.imread(sys.argv[3], 0)
input_image3 = cv2.imread(sys.argv[4], 0)
function_launch = {
1 : Perspective_warping(input_image1, input_image2, input_image3),
2 : Cylindrical_warping(input_image1, input_image2, input_image3),
3 : Bonus_perspective_warping(input_image1, input_image2, input_image3),
4 : Bonus_cylindrical_warping(input_image1, input_image2, input_image3),
}
# Call the function
function_launch[question_number]()
|
[
"noreply@github.com"
] |
noreply@github.com
|
9cee7ee01989eaac94944331ddf1326936e6d652
|
b45df36da7d09b3170c503753dabf62ac28db8d8
|
/book/exercise/Key-valueOperations/Python/TweetService.py
|
da23adede52957c795737d066a9b02e35525edcb
|
[
"MIT"
] |
permissive
|
skatsuta/aerospike-training
|
42b9a6e9372bd396d3489ec73778df7c00f5829e
|
d08d1805e19277b08327df5670e4c335ee63b853
|
refs/heads/master
| 2023-08-16T13:03:09.930256
| 2014-12-12T17:18:02
| 2014-12-12T17:18:02
| 27,905,857
| 0
| 0
|
MIT
| 2023-08-03T19:46:21
| 2014-12-12T04:52:47
|
C#
|
UTF-8
|
Python
| false
| false
| 8,093
|
py
|
#!/usr/bin/env python
#
# * Copyright 2012-2014 by Aerospike.
# *
# * Permission is hereby granted, free of charge, to any person obtaining a copy
# * of this software and associated documentation files (the "Software"), to
# * deal in the Software without restriction, including without limitation the
# * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# * sell copies of the Software, and to permit persons to whom the Software is
# * furnished to do so, subject to the following conditions:
# *
# * The above copyright notice and this permission notice shall be included in
# * all copies or substantial portions of the Software.
# *
# * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# * IN THE SOFTWARE.
#
from __future__ import print_function
import aerospike
import sys
import time
from aerospike import predicates as p
import random
AS_POLICY_W_EXISTS = "exists"
AS_POLICY_EXISTS_UNDEF = 0 # Use default value
AS_POLICY_EXISTS_IGNORE= 1 # Write the record, regardless of existence.
AS_POLICY_EXISTS_CREATE= 2 # Create a record, ONLY if it doesn't exist.
AS_POLICY_EXISTS_UPDATE= 3 # Update a record, ONLY if it exist (NOT YET IMPL).
class TweetService(object):
def __init__(self, client):
self.client = client
def createTweet(self):
print("\n********** Create Tweet **********\n")
# /*********************///
# /*****Data Model*****///
# Namespace: test
# Set: tweets
# Key: <username:<counter>>
# Bins:
# tweet - string
# ts - int (Stores epoch timestamp of the tweet)
# username - string
# Sample Key: dash:1
# Sample Record:
# { tweet: 'Put. A. Bird. On. It.',
# ts: 1408574221,
# username: 'dash'
# }
# /*********************///
userRecord = None
userKey = None
tweetKey = None
# Get username
username = str()
username = raw_input("Enter username: ")
if len(username) > 0:
# Check if username exists
# Exercise 2
print("\nTODO: Check if username exists")
meta = None
policy = None
record = {}
if userRecord:
# Set Tweet Count
if 'tweetcount' in userRecord:
nextTweetCount = int(userRecord['tweetcount']) + 1
else:
nextTweetCount = 1
# Get tweet
record['tweet'] = raw_input("Enter tweet for " + username + ":")
# Create timestamp to store along with the tweet so we can
# query, index and report on it
ts= self.getTimeStamp()
# TODO: Create WritePolicy instance
# Exercise 2
print("\nTODO: Create WritePolicy instance");
#TODO: Create Key and Bin instances for the tweet record. HINT: tweet key should be in username:nextTweetCount format
# Exercise 2
print("\nTODO: Create Key and Bin instances for the tweet record");
# TODO: Write tweet record
# Exercise 2
print("\nTODO: Write tweet record");
# TODO: Update tweet count and last tweeted timestamp in the user
# Exercise 2
print("\nINFO: Tweet record created!\n",record,tweetKey)
# Update tweet count and last tweet'd timestamp in the user record
else:
print("ERROR: User record not found!\n")
def scanAllTweetsForAllUsers(self):
# Initiate scan operation that invokes callback for outputting tweets on the console
# Exercise 4
print("\nTODO: Initiate scan operation that invokes callback for outputting tweets to the console")
def updateUser(self, client, userKey, policy, ts, tweetCount):
# TODO: Update tweet count and last tweeted timestamp in the user record
# Exercise 2
print("\nTODO: Update tweet count and last tweeted timestamp in the user record")
def updateUserUsingOperate(self, client, userKey, policy, ts):
""" operate not supported in Python Client """
print("\nINFO: The tweet count now is: ")
def queryTweetsByUsername(self):
print("\n********** Query Tweets By Username **********\n")
def queryUsersByTweetCount(self):
print("\n********** Query Users By Tweet Count Range **********\n")
def getTimeStamp(self):
return int(round(time.time() * 1000))
def createTweets(self):
randomTweets = ["For just $1 you get a half price download of half of the song and listen to it just once.", "People tell me my body looks like a melted candle", "Come on movie! Make it start!", "Byaaaayy", "Please, please, win! Meow, meow, meow!", "Put. A. Bird. On. It.", "A weekend wasted is a weekend well spent", "Would you like to super spike your meal?", "We have a mean no-no-bring-bag up here on aisle two.", "SEEK: See, Every, EVERY, Kind... of spot", "We can order that for you. It will take a year to get there.", "If you are pregnant, have a soda.", "Hear that snap? Hear that clap?", "Follow me and I may follow you", "Which is the best cafe in Portland? Discuss...", "Portland Coffee is for closers!", "Lets get this party started!", "How about them portland blazers!", "You got school'd, yo", "I love animals", "I love my dog", "What's up Portland", "Which is the best cafe in Portland? Discuss...", "I dont always tweet, but when I do it is on Tweetaspike"]
totalUsers = 10000
maxTweets = 20
username = str()
ts = 0
wr_policy = {
AS_POLICY_W_EXISTS: AS_POLICY_EXISTS_IGNORE
}
print("\nCreate up to " , maxTweets , " tweets each for " , totalUsers , " users. Press any key to continue...\n")
raw_input("..")
j = 0
while j < totalUsers:
username = "user" + str(random.randint(1,totalUsers))
userKey = ("test", "users", username)
meta = None
policy = None
ts = None
k = 0
(key, metadata,userRecord) = self.client.get(userKey,policy)
if userRecord:
totalTweets = random.randint(1,maxTweets)
while k <= totalTweets:
record = {}
ts = self.getTimeStamp()
tweetKey = ("test", "tweets", username + ":" + str(k))
record["tweet"] = random.choice(randomTweets)
record["ts"] = ts
record["username"]= username
self.client.put(tweetKey,record, meta, wr_policy)
k += 1
# Create timestamp to store along with the tweet so we can
# query, index and report on it
print("\nWrote " , totalTweets , " tweets for " , username , "!")
if totalTweets > 0:
# Update tweet count and last tweet'd timestamp in the user
# record
self.updateUser(self.client, userKey, wr_policy, ts, totalTweets)
j += 1
# Check if user record exists
# create up to maxTweets random tweets for this user
# Create timestamp to store along with the tweet so we can
# query, index and report on it
# Update tweet count and last tweet'd timestamp in the user
# record
print("\n\nDone creating up to " , maxTweets , " tweets each for " , totalUsers , " users!\n")
|
[
"katsuta_soshi@cyberagent.co.jp"
] |
katsuta_soshi@cyberagent.co.jp
|
03295f3478f5d7e47e2204ea64855e9de3926e54
|
121ab18bfbdb690a3365e84b7da6d1b143fb8768
|
/tabTools/add_tag.py
|
aacad3786e6de8bf2b6c20cec752c44ac4347af1
|
[] |
no_license
|
zy041225/python_bin
|
1661ef120ce22d63f7f8929dfe637bf12e7b31b4
|
4b0219b4a99394b48c3dad19727cbfb2038d2e3e
|
refs/heads/master
| 2020-09-13T15:34:02.145545
| 2019-11-20T02:07:34
| 2019-11-20T02:07:34
| 222,830,326
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
#!/usr/bin/env python3
import sys
import getopt
if len(sys.argv) != 6 or sys.argv[5] == '-':
sys.exit("python3 %s <tab> <tab_column> <tag_lst> <tag_column> <tag_name>\n<tag_name> should NOT be '-'"% (sys.argv[0]))
dict = {}
tag_lst = open(sys.argv[3])
for line in tag_lst:
line = line.rstrip()
tmp = line.split('\t')
dict[tmp[int(sys.argv[4])-1]] = 1
tab = open(sys.argv[1])
for line in tab:
line = line.rstrip()
tmp = line.split('\t')
tag = '-'
if tmp[int(sys.argv[2])-1] in dict:
tag = sys.argv[5]
line += '\t%s' % (tag)
print(line)
|
[
"zy041225@gmail.com"
] |
zy041225@gmail.com
|
9e8faea111d72f8b2e5ec2a6e79cd03f082cad11
|
8f24e443e42315a81028b648e753c50967c51c78
|
/python/ray/train/examples/horovod/horovod_pytorch_example.py
|
f4d15ae0515b7fad3547711c35c685f4407f2ced
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
simon-mo/ray
|
d07efdada8d05c6e10417f96e8dfc35f9ad33397
|
1e42e6cd15e2fb96c217cba8484e59ed0ef4b0c8
|
refs/heads/master
| 2023-03-06T00:09:35.758834
| 2022-12-23T18:46:48
| 2022-12-23T18:46:48
| 122,156,396
| 4
| 2
|
Apache-2.0
| 2023-03-04T08:56:56
| 2018-02-20T04:47:06
|
Python
|
UTF-8
|
Python
| false
| false
| 8,402
|
py
|
import argparse
from filelock import FileLock
import horovod.torch as hvd
import os
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data.distributed
from torchvision import datasets, transforms
from ray.air import session
from ray.air.config import ScalingConfig
from ray.train.horovod import HorovodTrainer
from ray.train.torch.torch_checkpoint import TorchCheckpoint
import ray.train.torch
def metric_average(val, name):
tensor = torch.tensor(val)
avg_tensor = hvd.allreduce(tensor, name=name)
return avg_tensor.item()
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
def setup(config):
data_dir = config.get("data_dir", None)
seed = config.get("seed", 42)
batch_size = config.get("batch_size", 64)
use_adasum = config.get("use_adasum", False)
lr = config.get("lr", 0.01)
momentum = config.get("momentum", 0.5)
use_cuda = config.get("use_cuda", False)
# Horovod: initialize library.
hvd.init()
torch.manual_seed(seed)
if use_cuda:
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(1)
kwargs = {"pin_memory": True} if use_cuda else {}
data_dir = data_dir or "~/data"
with FileLock(os.path.expanduser("~/.horovod_lock")):
train_dataset = datasets.MNIST(
data_dir,
train=True,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
)
# Horovod: use DistributedSampler to partition the training data.
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, num_replicas=hvd.size(), rank=hvd.rank()
)
# Note, don't set `num_workers` in DataLoader (not even 1),
# as that will separately start multiple processes (each corresponding to 1 worker)
# to load the data. This is known to cause issues with Ray.
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler, **kwargs
)
model = Net()
# By default, Adasum doesn't need scaling up learning rate.
lr_scaler = hvd.size() if not use_adasum else 1
if use_cuda:
# Move model to GPU.
model.cuda()
# If using GPU Adasum allreduce, scale learning rate by local_size.
if use_adasum and hvd.nccl_built():
lr_scaler = hvd.local_size()
# Horovod: scale learning rate by lr_scaler.
optimizer = optim.SGD(model.parameters(), lr=lr * lr_scaler, momentum=momentum)
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
op=hvd.Adasum if use_adasum else hvd.Average,
)
return model, optimizer, train_loader, train_sampler
def train_epoch(
model, optimizer, train_sampler, train_loader, epoch, log_interval, use_cuda
):
loss = None
model.train()
# Horovod: set epoch to sampler for shuffling.
train_sampler.set_epoch(epoch)
for batch_idx, (data, target) in enumerate(train_loader):
if use_cuda:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
# Horovod: use train_sampler to determine the number of
# examples in this worker's partition.
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data),
len(train_sampler),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
)
return loss.item() if loss else None
def train_func(config):
num_epochs = config.get("num_epochs", 10)
log_interval = config.get("log_interval", 10)
use_cuda = config.get("use_cuda", False)
save_model_as_dict = config.get("save_model_as_dict", False)
model, optimizer, train_loader, train_sampler = setup(config)
results = []
for epoch in range(num_epochs):
loss = train_epoch(
model, optimizer, train_sampler, train_loader, epoch, log_interval, use_cuda
)
if save_model_as_dict:
checkpoint = TorchCheckpoint.from_state_dict(model.state_dict())
else:
checkpoint = TorchCheckpoint.from_model(model)
results.append(loss)
session.report(dict(loss=loss), checkpoint=checkpoint)
# Only used for testing.
return results
def main(num_workers, use_gpu, kwargs):
trainer = HorovodTrainer(
train_loop_per_worker=train_func,
train_loop_config={
"num_epochs": kwargs["num_epochs"],
"log_interval": kwargs["log_interval"],
"use_cuda": kwargs["use_cuda"],
},
scaling_config=ScalingConfig(num_workers=num_workers, use_gpu=use_gpu),
)
result = trainer.fit()
print(result)
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(
description="PyTorch MNIST Example",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--num-epochs",
type=int,
default=5,
metavar="N",
help="number of epochs to train (default: 10)",
)
parser.add_argument(
"--lr",
type=float,
default=0.01,
metavar="LR",
help="learning rate (default: 0.01)",
)
parser.add_argument(
"--momentum",
type=float,
default=0.5,
metavar="M",
help="SGD momentum (default: 0.5)",
)
parser.add_argument(
"--use-gpu", action="store_true", default=False, help="enables CUDA training"
)
parser.add_argument(
"--seed", type=int, default=42, metavar="S", help="random seed (default: 42)"
)
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--use-adasum",
action="store_true",
default=False,
help="use adasum algorithm to do reduction",
)
parser.add_argument(
"--num-workers",
type=int,
default=2,
help="Number of Ray workers to use for training.",
)
parser.add_argument(
"--data-dir",
help="location of the training dataset in the local filesystem ("
"will be downloaded if needed)",
)
parser.add_argument(
"--address",
required=False,
type=str,
default=None,
help="Address of Ray cluster.",
)
args = parser.parse_args()
if args.address:
ray.init(args.address)
else:
ray.init()
use_cuda = args.use_gpu if args.use_gpu is not None else False
kwargs = {
"data_dir": args.data_dir,
"seed": args.seed,
"use_cuda": use_cuda,
"batch_size": args.batch_size,
"use_adasum": args.use_adasum if args.use_adasum else False,
"lr": args.lr,
"momentum": args.momentum,
"num_epochs": args.num_epochs,
"log_interval": args.log_interval,
}
main(num_workers=args.num_workers, use_gpu=use_cuda, kwargs=kwargs)
|
[
"noreply@github.com"
] |
noreply@github.com
|
5a4ce30950656174d6f9d7daeedd4c6f6fb567c7
|
7e83e6b4e1f6c461e8b77eb620a9a1c851b611e8
|
/hello_django/hello_django/urls.py
|
f5dc4fe5f9d8a8d42a862b0098131600a0887911
|
[] |
no_license
|
guess3233qa/opendata
|
fa64e6331b1c1f08972d44a339966444dc45ff96
|
b3d95ce69ce6cfd23947a2f32b7ab2f058eb7ed2
|
refs/heads/master
| 2021-01-17T05:17:38.871841
| 2017-06-28T01:06:27
| 2017-06-28T01:06:27
| 95,610,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 818
|
py
|
"""hello_django URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url , include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'' , include('hello.urls')),
]
|
[
"noreply@github.com"
] |
noreply@github.com
|
5db923fb9cbe244877c30b08690665cc6022a117
|
2b98c555e146487a08b0f9c2e94f2564977623dd
|
/main.py
|
14aeb66d5b362c719da26bf8648a29d9ed64f39f
|
[] |
no_license
|
RossT18/Binary-Tree
|
400eb9efc79f3c3072a46d6e5c7ae2e37be4940b
|
5f22c8d76208cfe6283f0bfef573b481002c209e
|
refs/heads/main
| 2023-03-27T00:25:06.151084
| 2021-03-28T17:57:01
| 2021-03-28T17:57:01
| 331,387,644
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
import BinaryTree as bt
# https://upload.wikimedia.org/wikipedia/commons/d/da/Binary_search_tree.svg
root = bt.Node(8)
tree = bt.BinaryTree(root)
tree.add(3)
tree.add(1)
tree.add(6)
tree.add(4)
tree.add(7)
tree.add(10)
tree.add(14)
tree.add(13)
# Expected output order:
# 1,3,4,6,7,8,10,13,14
print(tree)
print("Length: " + str(len(tree)))
|
[
"rtinsley1199@gmail.com"
] |
rtinsley1199@gmail.com
|
fad1fce49448f8fa4f4b57f21e6b48648aff90b7
|
282234bf6467224f769b67c923b111add740c775
|
/Course_2_Python_Data_Structures/ex_06_05/ex_06_05.py
|
82c9334e74a4ad3f1c8c105755813523c36f1ccb
|
[] |
no_license
|
gravitonGB/python_for_everybody
|
73d0767db1cf0c705144e287fba3fe66bc42ec43
|
8b3d197247bf52c66551867f2851d3006f7da484
|
refs/heads/master
| 2020-03-09T11:45:37.238749
| 2018-04-09T12:35:03
| 2018-04-09T12:35:03
| 128,765,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
#Exercise 6.5
#print("Excercise 6.5")
text = "X-DSPAM-Confidence: 0.8475";
pos = text.find(':')
t2 = text[pos+1:len(text)]
last_text = t2.strip()
last_value = float(last_text)
print(last_value)
|
[
"gokhanbegce@gmail.com"
] |
gokhanbegce@gmail.com
|
cce85590e54ae1deb81edc2cb58d4281f38508e8
|
2d27f9735aa104cffb0701683d70021d61729f57
|
/template.py
|
8d2e3a024ba2f3223f9cdd13efe292b2c6176d09
|
[] |
no_license
|
indexample/AOC2020
|
2b84f07fd479c4fc7df0b1b3c2b65176fcf6a7de
|
300363a693e933173f3a5edfc3bc1797428f0346
|
refs/heads/main
| 2023-02-06T03:19:43.667864
| 2020-12-22T13:04:18
| 2020-12-22T13:04:18
| 318,496,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
import os
import timeit
#import numpy as np
#from itertools import count
# ideas
# part1:
# part2:
def log(*args):
if LOGGING:
for i in args:
print( str(i) )
def timefunc(iter, function, *args):
def wrap():
function(*args)
t = timeit.Timer(wrap)
return t.timeit(iter) / iter #average
def solve1(d):
pass
LOGGING = 1
f_loc = 'D:/GIT/AOC2020-1/day14/input.txt'
#set = {}, list = [], generator = ()
#data = [int(x) for line in open(f_loc, 'r').read().rstrip().split("\n") for x in line.split(',') if x != 'x' ] #or read().splitlines()
#data = [x for line in open(f_loc, 'r').read().rstrip().split("\n") for x in line.split(',') ]
data = [line for line in open(f_loc, 'r').read().rstrip().split("\n") ]
#data = list(map(char_to_int, open(f_loc, 'r').readlines()))
#i = dict(enumerate(data))
print('\n---- part 1 ----')
print(f': {solve1(data)}')
print('\n---- part 2 ----')
#print(f': {solve2(data)}')
# timeit
#print(f'timeit: {timefunc(10, solve1, data)}' )
# part 1:
# part 2:
|
[
"3052927+devale@users.noreply.github.com"
] |
3052927+devale@users.noreply.github.com
|
a06a1754d3ec0bbd190006a04d477e5c50c00b20
|
4e3f83be95d2225fd18047fc6eaf09992d4491ea
|
/src/utils/Datetimes.py
|
b01224589683651301f55b46dc3ec0fa6b2561c3
|
[
"Apache-2.0"
] |
permissive
|
Gabvaztor/TFBoost
|
7f1d56e44498a23f2e9d884fcd66942cb9e6d622
|
a37b906f5cb47becc3275def8282ff395d06ef45
|
refs/heads/master
| 2021-06-06T07:56:05.995559
| 2019-10-14T15:27:29
| 2019-10-14T15:36:43
| 83,965,500
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 119
|
py
|
from datetime import datetime
def date_from_format(date, format="%Y-%m-%d %H:%M:%S"):
return date.strftime(format)
|
[
"gabvaztor@gmail.com"
] |
gabvaztor@gmail.com
|
73f8ddeef35504d0ce12496657651f487254073c
|
9edff345ec24494058d5ce01b4fb46012dc8bf29
|
/中级算法/回溯算法/单词搜索.py
|
180c8e196a24a8a9cd990344a4e486c18e38aac9
|
[] |
no_license
|
gump1368/leetcode-python
|
65909bb9931e3840d29743222d92af3a77902f33
|
2d4f47e21cc105700feccb0d525408bdc54c663c
|
refs/heads/master
| 2023-01-24T03:50:19.447898
| 2023-01-12T07:03:12
| 2023-01-12T07:03:12
| 235,063,257
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,073
|
py
|
#! -*- coding: utf-8 -*-
"""
@Author: gump
@Create Time: 20220714
@Info: 单词搜索
"""
from typing import List
# def exist(board: List[List[str]], word: str) -> bool:
# """
# 输入:board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "ABCB"
# 输出:false
#
# :param board:
# :param word:
# :return:
# """
# row_length = len(board)
# col_length = len(board[0])
# size = row_length * col_length
#
# def trace_back(value, k, last_position):
# if k >= len(word) or value < 0:
# return k
#
# while value < size:
# row = value // col_length
# col = value % col_length
# if board[row][col] == word[k]:
# left = value - 1
# right = value + 1
# up = value - row_length
# down = value + row_length
# k += 1
# k = trace_back(left, k, 'left') if last_position != 'right' else k
# k = trace_back(right, k, 'right') if last_position != 'left' else k
# k = trace_back(up, k, 'up') if last_position != 'down' else k
# k = trace_back(down, k, 'down') if last_position != 'up' else k
# else:
# value += 1
#
# if k >= len(word):
# break
#
# return k
#
# pos = trace_back(0, 0, 'None')
# return True if pos == len(word) else False
def exist(board: List[List[str]], word: str) -> bool:
"""
输入:board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "ABCB"
输出:false
:param board:
:param word:
:return:
"""
row_length = len(board)
col_length = len(board[0])
def trace_back(i, j, k, temp_path):
if (i, j) in temp_path:
return k - 1
if k >= len(word):
return k
temp_path.append((i, j))
depth = k
if j-1 >= 0 and board[i][j-1] == word[k]:
temp_1 = trace_back(i, j-1, k+1, temp_path)
depth = temp_1 if temp_1 > depth else depth
if j+1 < col_length and board[i][j+1] == word[k]:
temp_1 = trace_back(i, j+1, k + 1, temp_path)
depth = temp_1 if temp_1 > depth else depth
if i-1 >= 0 and board[i-1][j] == word[k]:
temp_1 = trace_back(i-1, j, k+1, temp_path)
depth = temp_1 if temp_1 > depth else depth
if i+1 < row_length and board[i+1][j] == word[k]:
temp_1 = trace_back(i+1, j, k+1, temp_path)
depth = temp_1 if temp_1 > depth else depth
temp_path.pop(-1)
return depth
for row in range(row_length):
for col in range(col_length):
if board[row][col] == word[0]:
pos = trace_back(row, col, 1, [])
if pos >= len(word):
return True
return False
if __name__ == '__main__':
board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]]
word = "ABCCED"
print(exist(board, word))
|
[
"guan_dongpu@gowild.cn"
] |
guan_dongpu@gowild.cn
|
da4d9970097abb9879bdaf10f8d859c5287053b0
|
5b8fcb1bf82a7c1ef5b6c2a939b1d1597bc7a24b
|
/create_json_for_airtable_operator.py
|
e00238b2f39eae43c6d55eae4974dcf2d194d262
|
[] |
no_license
|
katerinekhh/airflow_custom_stuff
|
2420c3ee95dab01e5eeeb8248500e253126e5b48
|
43ba78d96770a575ba7ab11a691b101e6d6604af
|
refs/heads/master
| 2022-10-12T13:55:01.916266
| 2020-06-12T13:17:06
| 2020-06-12T13:17:06
| 271,645,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,245
|
py
|
from datetime import datetime
import json
from airflow.utils.decorators import apply_defaults
from airflow.models.baseoperator import BaseOperator
from airflow.hooks.http_hook import HttpHook
class CreateJsonForAirtableOperator(BaseOperator):
@apply_defaults
def __init__(
self,
endpoint: str,
http_conn_id: str,
message_id_filepath: str,
update_filepath: str,
method='GET',
request_params=None,
*args, **kwargs,
):
super().__init__(*args, **kwargs)
self.http_conn_id = http_conn_id
self.request_params = request_params or {}
self.endpoint = endpoint
self.message_id_filepath = message_id_filepath
self.update_filepath = update_filepath
self.hook = HttpHook(
method=method,
http_conn_id=http_conn_id)
def execute(self, context):
response = self.hook.run(self.endpoint, data=self.request_params)
with open(self.message_id_filepath, 'r') as id_file:
message_id = id_file.read()
json_response = json.loads(response.text)
airtable_updates_data = {}
airtable_updates_data['records'] = []
for update in json_response['result']:
update_data_fields = {}
update_data = {}
if update['callback_query']['message']['message_id'] == int(message_id):
chat_id = update['callback_query']['message']['chat']['id']
username = update['callback_query']['from']['username']
triggered_at = datetime.fromtimestamp(
update['callback_query']['message']['date']).isoformat()[:-3] + "Z"
update_data['chat_id'] = chat_id
update_data['username'] = username
update_data['triggered_at'] = triggered_at
update_data['event_type'] = 'push_go_button'
update_data['reporter_name'] = 'khkaterina'
update_data_fields['fields'] = update_data
airtable_updates_data['records'].append(update_data_fields)
with open(self.update_filepath, 'w') as file:
json.dump(airtable_updates_data, file)
|
[
"you@example.com"
] |
you@example.com
|
7713fd10c64850e9770370122883e5b6ea01086f
|
e2ae96b74289a04a2386294bf51bacad92e2a830
|
/city_scrapers_core/spiders/legistar.py
|
29c3176db02b4b0851fd939f9f79845a629163c5
|
[
"MIT"
] |
permissive
|
will-snavely/city-scrapers-core
|
6afa9d78fb1c325420baaae030633b01111f11bb
|
cb865069e49d09ab251b7f99247df5e13c5d0241
|
refs/heads/main
| 2022-12-11T21:39:03.307347
| 2020-09-09T13:29:53
| 2020-09-09T13:29:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,265
|
py
|
from datetime import datetime
from typing import Iterable, List, Mapping, Optional, Tuple
from urllib.parse import urlparse
import scrapy
from legistar.events import LegistarEventsScraper
from ..items import Meeting
from .spider import CityScrapersSpider
LINK_TYPES = ["Agenda", "Minutes", "Video", "Summary", "Captions"]
class LegistarSpider(CityScrapersSpider):
"""Subclass of :class:`CityScrapersSpider` that handles processing Legistar sites,
which almost always share the same components and general structure.
Uses the `Legistar events scraper <https://github.com/opencivicdata/python-legistar-scraper/blob/master/legistar/events.py>`_
from the ```python-legistar-scraper`` library <https://github.com/opencivicdata/python-legistar-scraper>`.
Any methods that don't pull the correct values can be replaced.
""" # noqa
link_types = []
def parse(self, response: scrapy.http.Response) -> Iterable[Meeting]:
"""Parse response from the :class:`LegistarEventsScraper`. Ignores the ``scrapy``
:class:`Response` which is still requested to be able to hook into ``scrapy``
broadly.
:param response: Scrapy response to be ignored
:return: Iterable of processed meetings
"""
events = self._call_legistar()
return self.parse_legistar(events)
def parse_legistar(
self, events: Iterable[Tuple[Mapping, Optional[str]]]
) -> Iterable[Meeting]:
"""Method to be implemented by Spider classes that will handle the response from
Legistar. Functions similar to ``parse`` for other Spider classes.
:param events: Iterable consisting of tuples of a dict-like object of scraped
results from legistar and an agenda URL (if available)
:raises NotImplementedError: Must be implemented in subclasses
:return: [description]
"""
raise NotImplementedError("Must implement parse_legistar")
def _call_legistar(
self, since: int = None
) -> Iterable[Tuple[Mapping, Optional[str]]]:
les = LegistarEventsScraper()
les.BASE_URL = self.base_url
les.EVENTSPAGE = f"{self.base_url}/Calendar.aspx"
if not since:
since = datetime.today().year
return les.events(since=since)
def legistar_start(self, item: Mapping) -> datetime:
"""Pulls the start time from a Legistar item
:param item: Scraped item from Legistar
:return: Meeting start datetime
"""
start_date = item.get("Meeting Date")
start_time = item.get("Meeting Time")
if start_date and start_time:
try:
return datetime.strptime(
f"{start_date} {start_time}", "%m/%d/%Y %I:%M %p"
)
except ValueError:
return datetime.strptime(start_date, "%m/%d/%Y")
def legistar_links(self, item: Mapping) -> List[Mapping[str, str]]:
"""Pulls relevant links from a Legistar item
:param item: Scraped item from Legistar
:return: List of meeting links
"""
links = []
for link_type in LINK_TYPES + self.link_types:
if isinstance(item.get(link_type), dict) and item[link_type].get("url"):
links.append({"href": item[link_type]["url"], "title": link_type})
return links
def legistar_source(self, item: Mapping) -> str:
"""Pulls the source URL from a Legistar item. Pulls a specific meeting URL if
available, otherwise defaults to the general Legistar calendar page.
:param item: Scraped item from Legistar
:return: Source URL
"""
default_url = f"{self.base_url}/Calendar.aspx"
if isinstance(item.get("Name"), dict):
return item["Name"].get("url", default_url)
if isinstance(item.get("Meeting Details"), dict):
return item["Meeting Details"].get("url", default_url)
return default_url
@property
def base_url(self) -> str:
"""Property with the Legistar site's base URL
:return: Legistar base URL
"""
parsed_url = urlparse(self.start_urls[0])
return f"{parsed_url.scheme}://{parsed_url.netloc}"
|
[
"pjsier@gmail.com"
] |
pjsier@gmail.com
|
b3d7305f178eead8c1316698c2989f3d44540d31
|
71b3715408330a42c62c7176a0f8fb1901d3ba6c
|
/src/day01/HelloWorld.py
|
3767e64837fc2b4f8dab04a81c60ccf1cac9530c
|
[] |
no_license
|
hubin9218/LearnPython
|
41fac40b4d883fde62c4a0f0405da76da0bda5df
|
a9fd0e55bddc5f1a2a07212a7f80c603ea2dc735
|
refs/heads/master
| 2021-02-17T06:13:48.945425
| 2020-03-23T09:49:39
| 2020-03-23T09:49:39
| 245,076,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,489
|
py
|
"""
绘制小猪佩奇...
"""
from turtle import *
def nose(x,y):
"""画鼻子"""
penup()
# 将海龟移动到指定的坐标
goto(x,y)
pendown()
# 设置海龟的方向(0-东、90-北、180-西、270-南)
setheading(-30)
begin_fill()
a = 0.4
for i in range(120):
if 0 <= i < 30 or 60 <= i <90:
a = a + 0.08
# 向左转3度
left(3)
# 向前走
forward(a)
else:
a = a - 0.08
left(3)
forward(a)
end_fill()
penup()
setheading(90)
forward(25)
setheading(0)
forward(10)
pendown()
# 设置画笔的颜色(红, 绿, 蓝)
pencolor(255, 155, 192)
setheading(10)
begin_fill()
circle(5)
color(160, 82, 45)
end_fill()
penup()
setheading(0)
forward(20)
pendown()
pencolor(255, 155, 192)
setheading(10)
begin_fill()
circle(5)
color(160, 82, 45)
end_fill()
def head(x, y):
"""画头"""
color((255, 155, 192), "pink")
penup()
goto(x,y)
setheading(0)
pendown()
begin_fill()
setheading(180)
circle(300, -30)
circle(100, -60)
circle(80, -100)
circle(150, -20)
circle(60, -95)
setheading(161)
circle(-300, 15)
penup()
goto(-100, 100)
pendown()
setheading(-30)
a = 0.4
for i in range(60):
if 0<= i < 30 or 60 <= i < 90:
a = a + 0.08
lt(3) #向左转3度
fd(a) #向前走a的步长
else:
a = a - 0.08
lt(3)
fd(a)
end_fill()
def ears(x,y):
"""画耳朵"""
color((255, 155, 192), "pink")
penup()
goto(x, y)
pendown()
begin_fill()
setheading(100)
circle(-50, 50)
circle(-10, 120)
circle(-50, 54)
end_fill()
penup()
setheading(90)
forward(-12)
setheading(0)
forward(30)
pendown()
begin_fill()
setheading(100)
circle(-50, 50)
circle(-10, 120)
circle(-50, 56)
end_fill()
def eyes(x,y):
"""画眼睛"""
color((255, 155, 192), "white")
penup()
setheading(90)
forward(-20)
setheading(0)
forward(-95)
pendown()
begin_fill()
circle(15)
end_fill()
color("black")
penup()
setheading(90)
forward(12)
setheading(0)
forward(-3)
pendown()
begin_fill()
circle(3)
end_fill()
color((255, 155, 192), "white")
penup()
seth(90)
forward(-25)
seth(0)
forward(40)
pendown()
begin_fill()
circle(15)
end_fill()
color("black")
penup()
setheading(90)
forward(12)
setheading(0)
forward(-3)
pendown()
begin_fill()
circle(3)
end_fill()
def cheek(x,y):
"""画脸颊"""
color((255, 155, 192))
penup()
goto(x,y)
pendown()
setheading(0)
begin_fill()
circle(30)
end_fill()
def mouth(x,y):
"""画嘴巴"""
color(239, 69, 19)
penup()
goto(x, y)
pendown()
setheading(-80)
circle(30, 40)
circle(40, 80)
def setting():
"""设置参数"""
pensize(4)
# 隐藏海龟
hideturtle()
colormode(255)
color((255, 155, 192), "pink")
setup(840, 500)
speed(10)
def main():
"""主函数"""
setting()
nose(-100, 100)
head(-69, 167)
ears(0, 160)
eyes(0, 140)
cheek(80, 10)
mouth(-20, 30)
done()
if __name__ == '__main__':
main()
|
[
"hubin9218@163.com"
] |
hubin9218@163.com
|
7adb46a971ce547c265474004b96ae65283904dc
|
a0cfde6971fbe3b2c1de726a7bfc1c60fba3a137
|
/userbot/plugins/git.py
|
8d3857c9917303b94e1f9cebb82f052502dce43e
|
[
"MIT"
] |
permissive
|
No-OnE-Kn0wS-Me/FridayUserbot
|
6efb25dfd2eb06674b99bc158a6bbddcd128012c
|
9ef60066b72fa085300408855010ea05b9026f82
|
refs/heads/master
| 2022-11-19T23:10:49.342679
| 2020-06-22T13:49:56
| 2020-06-22T13:49:56
| 274,447,417
| 2
| 1
|
MIT
| 2020-06-23T15:52:27
| 2020-06-23T15:52:26
| null |
UTF-8
|
Python
| false
| false
| 648
|
py
|
from telethon import events
import asyncio
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.1
animation_ttl = range(0, 101)
input_str = event.pattern_match.group(1)
if input_str == "githubs":
await event.edit(input_str)
animation_chars = [
"https://github.com/midhunkm1294-bit/friday",
"https://github.com/midhunkm1294-bit/friday"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 2])
|
[
"66051049+StarkGang@users.noreply.github.com"
] |
66051049+StarkGang@users.noreply.github.com
|
93e2cb9162dfaedfe3a58c9892ccb9936f9405c9
|
9e7d7b4d029554eed0f760a027cd94558b919ae2
|
/CHAPTER15/overlaying.py
|
e320bf396d4410f1a0cc189810fc886ac93deca0
|
[] |
no_license
|
pooja1506/AutomateTheBoringStuff_2e
|
8247b68a195d5e1976c6474f0e97d947906ffd35
|
5bab9ccdcdb22ee10fe1272c91042be40fd67c17
|
refs/heads/master
| 2022-04-10T19:21:44.402829
| 2020-04-05T12:10:32
| 2020-04-05T12:10:32
| 249,620,282
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
import PyPDF2
minutesFile = open('meetingminutes.pdf', 'rb')
pdfReader = PyPDF2.PdfFileReader(minutesFile)
minutesFirstPage = pdfReader.getPage(0)
pdfWatermarkReader = PyPDF2.PdfFileReader(open('watermark.pdf', 'rb'))
minutesFirstPage.mergePage(pdfWatermarkReader.getPage(0))
pdfWriter = PyPDF2.PdfFileWriter()
pdfWriter.addPage(minutesFirstPage)
for pageNum in range(1, pdfReader.numPages):
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage(pageObj)
resultPdfFile = open('watermarkedCover.pdf', 'wb')
pdfWriter.write(resultPdfFile)
minutesFile.close()
resultPdfFile.close()
|
[
"pooja.dmehta15@gmail.com"
] |
pooja.dmehta15@gmail.com
|
ef2911b4133217bc48dbf92e02a62bd1d9b5d171
|
e168a16fdd43d3023d16d8a643ccca318a44c327
|
/evm/logic/call.py
|
42acedd0f1791f1cebd63438077524bdee541b46
|
[] |
no_license
|
DavidKnott/py-evm
|
c589c88af55c121ea375bfdb0a53ecc6a4836119
|
66c47f58a62e995b5ce89e47007c8b03796c80b9
|
refs/heads/master
| 2021-01-01T04:08:39.921768
| 2017-07-18T13:03:45
| 2017-07-18T13:03:45
| 97,128,228
| 1
| 0
| null | 2017-07-13T13:54:57
| 2017-07-13T13:54:56
| null |
UTF-8
|
Python
| false
| false
| 7,349
|
py
|
from evm import constants
from evm.opcode import (
Opcode,
)
from evm.utils.address import (
force_bytes_to_address,
)
class BaseCall(Opcode):
def compute_msg_gas(self, computation, gas, to, value):
raise NotImplementedError("Must be implemented by subclasses")
def get_call_params(self, computation):
raise NotImplementedError("Must be implemented by subclasses")
def __call__(self, computation):
computation.gas_meter.consume_gas(
self.gas_cost,
reason=self.mnemonic,
)
(
gas,
value,
to,
sender,
code_address,
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
should_transfer_value,
) = self.get_call_params(computation)
computation.extend_memory(memory_input_start_position, memory_input_size)
computation.extend_memory(memory_output_start_position, memory_output_size)
call_data = computation.memory.read(memory_input_start_position, memory_input_size)
#
# Message gas allocation and fees
#
child_msg_gas, child_msg_gas_fee = self.compute_msg_gas(computation, gas, to, value)
computation.gas_meter.consume_gas(child_msg_gas_fee, reason=self.mnemonic)
# Pre-call checks
sender_balance = computation.state_db.get_balance(
computation.msg.storage_address,
)
insufficient_funds = should_transfer_value and sender_balance < value
stack_too_deep = computation.msg.depth + 1 > constants.STACK_DEPTH_LIMIT
if insufficient_funds or stack_too_deep:
if self.logger:
if insufficient_funds:
err_message = "Insufficient Funds: have: {0} | need: {1}".format(
sender_balance,
value,
)
elif stack_too_deep:
err_message = "Stack Limit Reached"
else:
raise Exception("Invariant: Unreachable code path")
self.logger.debug(
"%s failure: %s",
self.mnemonic,
err_message,
)
computation.gas_meter.return_gas(child_msg_gas)
computation.stack.push(0)
else:
if code_address:
code = computation.state_db.get_code(code_address)
else:
code = computation.state_db.get_code(to)
child_msg_kwargs = {
'gas': child_msg_gas,
'value': value,
'to': to,
'data': call_data,
'code': code,
'code_address': code_address,
'should_transfer_value': should_transfer_value,
}
if sender is not None:
child_msg_kwargs['sender'] = sender
child_msg = computation.prepare_child_message(**child_msg_kwargs)
if child_msg.is_create:
child_computation = computation.vm.apply_create_message(child_msg)
else:
child_computation = computation.vm.apply_message(child_msg)
computation.children.append(child_computation)
if child_computation.error:
computation.stack.push(0)
else:
actual_output_size = min(memory_output_size, len(child_computation.output))
computation.gas_meter.return_gas(child_computation.gas_meter.gas_remaining)
computation.memory.write(
memory_output_start_position,
actual_output_size,
child_computation.output[:actual_output_size],
)
computation.stack.push(1)
class Call(BaseCall):
def compute_msg_gas(self, computation, gas, to, value):
account_exists = computation.state_db.account_exists(to)
transfer_gas_fee = constants.GAS_CALLVALUE if value else 0
create_gas_fee = constants.GAS_NEWACCOUNT if not account_exists else 0
total_fee = gas + transfer_gas_fee + create_gas_fee
child_msg_gas = gas + (constants.GAS_CALLSTIPEND if value else 0)
return child_msg_gas, total_fee
def get_call_params(self, computation):
gas = computation.stack.pop(type_hint=constants.UINT256)
to = force_bytes_to_address(computation.stack.pop(type_hint=constants.BYTES))
(
value,
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
) = computation.stack.pop(num_items=5, type_hint=constants.UINT256)
return (
gas,
value,
to,
None, # sender
None, # code_address
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
True, # should_transfer_value,
)
class CallCode(BaseCall):
def compute_msg_gas(self, computation, gas, to, value):
transfer_gas_cost = constants.GAS_CALLVALUE if value else 0
total_fee = transfer_gas_cost + gas
child_msg_gas = gas + (constants.GAS_CALLSTIPEND if value else 0)
return child_msg_gas, total_fee
def get_call_params(self, computation):
gas = computation.stack.pop(type_hint=constants.UINT256)
code_address = force_bytes_to_address(computation.stack.pop(type_hint=constants.BYTES))
(
value,
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
) = computation.stack.pop(num_items=5, type_hint=constants.UINT256)
to = computation.msg.storage_address
sender = computation.msg.storage_address
return (
gas,
value,
to,
sender,
code_address,
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
True, # should_transfer_value,
)
class DelegateCall(CallCode):
def compute_msg_gas(self, computation, gas, to, value):
return gas, gas
def get_call_params(self, computation):
gas = computation.stack.pop(type_hint=constants.UINT256)
code_address = force_bytes_to_address(computation.stack.pop(type_hint=constants.BYTES))
(
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
) = computation.stack.pop(num_items=4, type_hint=constants.UINT256)
to = computation.msg.storage_address
sender = computation.msg.sender
value = computation.msg.value
return (
gas,
value,
to,
sender,
code_address,
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
False, # should_transfer_value,
)
|
[
"pipermerriam@gmail.com"
] |
pipermerriam@gmail.com
|
9dce78213c77274a834e67aa526b49d3187883d8
|
767318c4ddf2713a8a035aa3bf68cd8260409aa0
|
/travellow/urls.py
|
a644dc789f9aa6062a14454cf7171e52bf44b7fb
|
[] |
no_license
|
sag-coder/travelbooking
|
704573b145ca04587bbaf2415f4bbdb6ad50b26f
|
dfc482ca01d1be324aba900075b2a64dc2fd1d88
|
refs/heads/master
| 2023-06-11T23:22:44.114545
| 2021-07-10T23:47:37
| 2021-07-10T23:47:37
| 384,562,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
path('', views.index, name='index'),
]
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"sagar@sagars-MacBook-Pro.local"
] |
sagar@sagars-MacBook-Pro.local
|
0757a5984a57b6a43f6f9c8dbc15fe28c3b58c96
|
9f77118ac2fdfbdc3174c1fdfaa32bcf2b5ead40
|
/ALL_FILES/poxresources.py
|
71b122023b8723299b5cc25a607861109073ad60
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
gilneidp/FinalProject
|
309979cb65a115a980c58433decc921e295147bf
|
ec4f35d154bc4383ccde113126e493c1521ad21a
|
refs/heads/master
| 2021-01-01T18:42:30.789219
| 2015-10-30T10:40:47
| 2015-10-30T10:40:47
| 41,107,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 779
|
py
|
import os
import sys
import datetime
from datetime import timedelta
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "madapp.settings")
from django.core.management import execute_from_command_line
from django.db.models import Count, Avg
from madapp import settings
from madapp.mad.models import *
import psutil
cpu = psutil.cpu_times()
memory_used = psutil.virtual_memory()
memory_free = (memory_used.free/1024)/1024
#oxstats = UsageTable.objects.get(servername = 'POX')
#oxstats.cpu_usage = cpu
#oxstats.save()
print cpu
#print (((memory_used.used/1024)/1024)*100)/(((memory_used.free/1024)/1024) + ((memory_used.used/1024)/1024))
#print (memory_used.used/1024)/1024
#print ((memory_used.free/1024)/1024) + ((memory_used.used/1024)/1024)
#print cpu
#print memory_used.percent
|
[
"gilnei@gilnei.com.br"
] |
gilnei@gilnei.com.br
|
588948095f2db1f4d431c649e77a76b72ecf54b8
|
68f57fd1dd274be72af6d85762b67bbf8d2ef6d6
|
/tests/test_cosine.py
|
3ac719652f889a7529befb8bcbf87a328c003cfa
|
[] |
no_license
|
afcarl/simplecosine
|
287cbf40ef8aa2251ea538b7b3c2d28c5b6f2488
|
1ba869198ab3211dd4b0412e80e670308007f687
|
refs/heads/master
| 2020-03-17T23:56:28.854494
| 2017-06-15T21:33:36
| 2017-06-15T21:33:36
| 134,069,251
| 1
| 0
| null | 2018-05-19T14:29:05
| 2018-05-19T14:29:05
| null |
UTF-8
|
Python
| false
| false
| 2,909
|
py
|
import unittest
from simplecosine.cosine import CosineSetSimilarity, CosineTextSimilarity
import numpy
import pickle
class TestSetCosineClass(unittest.TestCase):
def setUp(self):
self.ilist = [('a', 'b', 'c'),
['b', 'c', 'd k'],
('d k', 'e', 'f')
]
def test_cosine(self):
cosine = CosineSetSimilarity(self.ilist)
s1 = self.ilist[0]
s2 = self.ilist[1]
cosine_sim = cosine(s1, s2)
self.assertAlmostEqual(cosine_sim, 0.378, places=3)
cosine_sim = cosine(('g', 'h', 'd k', 'd k'), s2)
self.assertAlmostEqual(cosine_sim, 0.267, places=3)
def test_cosine_na(self):
cosine = CosineSetSimilarity(self.ilist)
cosine_sim = cosine(self.ilist[0], ())
assert numpy.isnan(cosine_sim)
def test_cosine_identical(self):
cosine = CosineSetSimilarity(self.ilist)
cosine_sim = cosine(self.ilist[0], self.ilist[0])
self.assertAlmostEqual(cosine_sim, 1, places=5)
def test_cosine_cache(self):
cosine = CosineSetSimilarity(self.ilist)
s1 = self.ilist[0]
s2 = self.ilist[1]
cosine_sim = cosine(s1, s2)
self.assertAlmostEqual(cosine_sim, 0.378, places=3)
cosine_sim = cosine(s1, s2)
self.assertAlmostEqual(cosine_sim, 0.378, places=3)
def test_cosine_no_corpus(self):
cosine = CosineSetSimilarity([])
s1 = self.ilist[0]
s2 = self.ilist[1]
cosine_sim = cosine(s1, s2)
self.assertAlmostEqual(cosine_sim, 0.667, places=3)
cosine_sim = cosine(('g', 'h', 'd k'), s2)
self.assertAlmostEqual(cosine_sim, 0.333, places=3)
def test_cosine_pickle(self) :
cosine = CosineSetSimilarity(self.ilist)
s1 = self.ilist[0]
s2 = self.ilist[1]
cosine_sim = cosine(s1, s2)
pickle.dumps(cosine)
cosine = CosineSetSimilarity([])
s1 = self.ilist[0]
s2 = self.ilist[1]
cosine_sim = cosine(s1, s2)
pickle.dumps(cosine)
class TestTextCosineClass(unittest.TestCase):
def setUp(self):
self.ilist = ['a b c',
'b c d',
'd e f']
def test_cosine(self):
cosine = CosineTextSimilarity(self.ilist)
s1 = self.ilist[0]
s2 = self.ilist[1]
cosine_sim = cosine(s1, s2)
self.assertAlmostEqual(cosine_sim, 0.378, places=3)
def test_cosine_na(self):
cosine = CosineTextSimilarity(self.ilist)
cosine_sim = cosine(self.ilist[0], '')
assert numpy.isnan(cosine_sim)
def test_cosine_identical(self):
cosine = CosineTextSimilarity(self.ilist)
cosine_sim = cosine(self.ilist[0], self.ilist[0])
self.assertAlmostEqual(cosine_sim, 1, places=5)
if __name__ == '__main__':
unittest.main()
|
[
"fgregg@uchicago.edu"
] |
fgregg@uchicago.edu
|
73fb980e24519d85b6a114bf1263927c71ea7335
|
88deebcc2251f406c0c34325025cd0cbccb142e7
|
/Drosophila/ABC/melano_simulans_simulate_ms_ABC.py
|
0c443a1bf2620a4e48345ce53a52e4623d284f5e
|
[] |
no_license
|
cllong112s/CNN_spDelimitation_Piloso
|
27dab9269253c4ca360e6f8c4f1d70560bf15d84
|
5a5cae2498e89291357733f8614c5399558be7c0
|
refs/heads/master
| 2023-08-15T20:15:52.292721
| 2021-09-16T15:08:42
| 2021-09-16T15:08:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,287
|
py
|
#!/usr/bin/python
## in order to use this code you have to have ms installed on your computer
## ms can be freely downloaded from:
## http://home.uchicago.edu/rhudson1/source/mksamples.html
import random
import os
import math
### variable declarations
#define the number of simulations
Priorsize = 10000
## nDNA sample size of Dmelanogaster.
nDNADme = 3
## nDNA sample size of Dsimulans.
nDNADsi = 3
## nDNA sample sizes (number of alleles).
nDNANsam = nDNADme + nDNADsi
#number of segregating sites for each marker
segsites = [36,36]
## create a file to store parameters and one to store the models
parameters = file("parameters.txt","w")
models = file("models.txt","w")
### One Species Model
for i in range(Priorsize):
### Define parameters
## Theta values from 1 to 15
Theta = random.uniform(5,300)
## divergence time prior set to 0.
coalRootDivTime = 0
## ms commands
for s in range(len(segsites)):
## nDNA markers
com=os.system("./ms %d 1 -s %d -t %f -I 2 %d %d -ej %f 1 2 | sed '/prob/d' | perl msSS.pl >> simModel1.txt" % (nDNANsam, segsites[s],Theta, nDNADme, nDNADsi, coalRootDivTime))
## mtDNA marker
com=os.system("./ms %d 1 -s 36 -t %f -I 2 %d %d -ej %f 1 2 | sed '/prob/d' | perl msSS.pl >> simModel1.txt" % (nDNANsam, Theta/4, nDNADme, nDNADsi, coalRootDivTime/2))
## save parameter values and models
parameters.write("%f\t%f\n" % (Theta, coalRootDivTime))
models.write("1\n")
### Two Species Model
for i in range(Priorsize):
### Define parameters
## Theta values from 5 to 300
Theta = random.uniform(5,300)
## divergence time prior following an uniform distribution from 0.01 to 0.5.
coalRootDivTime = random.uniform(0.01,0.5)
## ms commands
for s in range(len(segsites)):
## nDNA markers
com=os.system("./ms %d 1 -s %d -t %f -I 2 %d %d -ej %f 1 2 | sed '/prob/d' | perl msSS.pl >> simModel2.txt" % (nDNANsam, segsites[s],Theta, nDNADme, nDNADsi, coalRootDivTime))
## mtDNA marker
com=os.system("./ms %d 1 -s 36 -t %f -I 2 %d %d -ej %f 1 2 | sed '/prob/d' | perl msSS.pl >> simModel2.txt" % (nDNANsam, Theta/4, nDNADme, nDNADsi, coalRootDivTime/2))
## save parameter values and models
parameters.write("%f\t%f\n" % (Theta, coalRootDivTime))
models.write("2\n")
com=os.system("cat simModel* > SuSt_melano_simulans.txt")
|
[
"manolo@macbook-pro-de-manolo.home"
] |
manolo@macbook-pro-de-manolo.home
|
ff8cefcde863d2211483e1d36ab5250a2795db09
|
667e52d9501e04ad9d301c405a1ffc57dabc439e
|
/checkio/find_friends.py
|
dcb04ca3265d5c32614ffd7993960e1e3624939c
|
[] |
no_license
|
merryjane/python
|
c9ab15f09b83a0f0069b49fe5f2f7ed6e601831c
|
423aa3cdf226404b2bf9f5958b5d03fe84b93f74
|
refs/heads/master
| 2021-01-23T16:37:05.387564
| 2014-07-25T14:17:13
| 2014-07-25T14:17:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,690
|
py
|
#!/usr/bin/env python3
def look_for_neighbors(network,node):
neighbors = set()
for couples in network:
if couples.split('-')[0] == node:
neighbors.add(couples.split('-')[1])
elif couples.split('-')[1] == node:
neighbors.add(couples.split('-')[0])
return neighbors
def check_connection(network, first, second):
visited = set()
visited.add(first)
future = set()
future.update(look_for_neighbors(network,first))
for node in future:
if node == second:
return True
else:
visited.add(node)
future.remove(node)
print(future)
print(visited)
check_connection(
("dr101-mr99", "mr99-out00", "dr101-out00", "scout1-scout2",
"scout3-scout1", "scout1-scout4", "scout4-sscout", "sscout-super"),
"scout2", "scout3")
'''
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
assert check_connection(
("dr101-mr99", "mr99-out00", "dr101-out00", "scout1-scout2",
"scout3-scout1", "scout1-scout4", "scout4-sscout", "sscout-super"),
"scout2", "scout3") == True, "Scout Brotherhood"
assert check_connection(
("dr101-mr99", "mr99-out00", "dr101-out00", "scout1-scout2",
"scout3-scout1", "scout1-scout4", "scout4-sscout", "sscout-super"),
"super", "scout2") == True, "Super Scout"
assert check_connection(
("dr101-mr99", "mr99-out00", "dr101-out00", "scout1-scout2",
"scout3-scout1", "scout1-scout4", "scout4-sscout", "sscout-super"),
"dr101", "sscout") == False, "I don't know any scouts."
'''
|
[
"ibiryulin@qsoft.ru"
] |
ibiryulin@qsoft.ru
|
2bff7ce472c638cc2952ee313e844673778ab37c
|
5faecec9b20d262150e48ac9f31c396f840b1f2f
|
/migrations/0010_auto_20200804_0913.py
|
f175b678b5857527caa863cd6db136e7bc3d803b
|
[] |
no_license
|
binkesi/blogsgn
|
fb767b0d22e3eb1c32ea7ee8fd0796766e3a8600
|
579b374f802a5651d20c3b3f85d8ff6a22476bdd
|
refs/heads/master
| 2022-11-27T23:24:45.574601
| 2020-08-04T10:06:28
| 2020-08-04T10:06:28
| 283,161,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
# Generated by Django 3.0.6 on 2020-08-04 01:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogsgn', '0009_auto_20200804_0653'),
]
operations = [
migrations.AlterField(
model_name='author',
name='nation',
field=models.CharField(choices=[('CH', 'China'), ('US', 'America'), ('UK', 'England'), ('GE', 'German'), ('CA', 'Canada')], max_length=80, verbose_name='Nationality'),
),
]
|
[
"sjtusgn@163.com"
] |
sjtusgn@163.com
|
848257d62f49ecdcc747c38384d79aa0afb7700b
|
8db1ab4f9a2e47f7e8d69a685837d7e747bf9442
|
/cocos2d-x-tool/py_tool/syncResToProject.py
|
0773ceebfd0313dd7ab2c0df0f04cec7b688b661
|
[] |
no_license
|
tanzuoliang/python
|
051d6e46cebd7fdb74a0173aca0ca7a2b3ef5986
|
70f782cf3c72d2b7043727910509eb2d2f2fe065
|
refs/heads/master
| 2021-10-20T05:36:03.732738
| 2019-02-26T02:37:18
| 2019-02-26T02:37:18
| 111,288,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,595
|
py
|
#!/usr/bin/python
#encoding=utf-8
from myutil.utils import syncDir,checkSVNStatus
import os
fromRoot = '/Users/tanzuoliang/art_resource'
toRoot = "../res/new_ui"
toLanguage = "../res"
ll = [
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ icon','天天坦克/UI 效果图+输出 20170214 优化版/00 icon','icon'),
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ button','天天坦克/UI 效果图+输出 20170214 优化版/00 button','button'),
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ wenzi','天天坦克/UI 效果图+输出 20170214 优化版/00 wenzi','wenzi'),
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ 通用','天天坦克/UI 效果图+输出 20170214 优化版/00 通用','common'),
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ 字体','天天坦克/UI 效果图+输出 20170214 优化版/00 字体','fnt'),
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ BG','天天坦克/UI 效果图+输出 20170214 优化版/00 BG','bg')
]
"""
语言分类资源
"""
lll = [
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ 英文翻译','天天坦克/UI 效果图+输出 20170214 优化版/00 英文翻译','lang_en'),
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ 翻译原版','天天坦克/UI 效果图+输出 20170214 优化版/00 翻译原版','lang_chs')
]
from myutil.utils import getDirsize
import os
if os.path.exists('../res-new') and getDirsize('../res') < getDirsize('../res-new'):
print "当前res是压缩后的"
else:
os.system('svn up %s'%toLanguage)
for tu in ll:
fromDir = os.path.join(fromRoot, tu[0])
toDir = os.path.join(toRoot, tu[2])
os.system("svn up %s"%fromDir)
fromDir = os.path.join(fromRoot, tu[1])
syncDir(fromDir, toDir,False)
checkSVNStatus(toRoot,[tu[2]])
for tu in lll:
fromDir = os.path.join(fromRoot, tu[0])
toDir = os.path.join(toLanguage, "language_img", tu[2],"res","new_ui")
os.system("svn up %s"%fromDir)
fromDir = os.path.join(fromRoot, tu[1])
if not os.path.exists(toDir):
os.makedir(toDir)
syncDir(fromDir, toDir,False)
checkSVNStatus(os.path.join(toLanguage, "language_img"),[tu[2]])
"""
英文引导
"""
# os.system("cp %s %s"%(os.path.join(fromRoot,"天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ 英文翻译/Novice\ guide/controlexplain.jpg"),os.path.join(toRoot, "bg/lang_en_controlexplain.jpg")))
os.system("rm -rf %s"%(os.path.join(toLanguage,"language_img/lang_en/res/new_ui/Novice\ guide")))
os.system('svn ci %s -m "同步资源"'%toLanguage)
|
[
"ysjwdaypm@163.com"
] |
ysjwdaypm@163.com
|
cf4cd861229bcd84028e5670c4292b18a9ce0692
|
e0cf5219ff9ad4eab2000516739ee651d7aa4c8f
|
/models/nets.py
|
ca62b8e0f15aec8c1b12dff278ba3f28a6d8c6c6
|
[
"MIT"
] |
permissive
|
Perfec-Yu/Lifelong-ED
|
cbf32f6e2d3ccf393eec08e5dbfb29e5e3c1b28b
|
f1af49129dd6ed4ff545f84e680565cccdb5b55a
|
refs/heads/main
| 2023-07-19T23:52:41.932196
| 2021-09-02T15:26:36
| 2021-09-02T15:26:36
| 402,468,387
| 13
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32,092
|
py
|
import numpy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd
import math
from typing import Any, Dict, Tuple, List, Union, Set
import warnings
from collections import OrderedDict
from torch.nn.modules.linear import Linear
from torchmeta.modules import MetaLinear, MetaSequential, MetaModule
from tqdm import tqdm
class LInEx(MetaModule):
def __init__(self,input_dim:int,hidden_dim:int,max_slots:int,init_slots:int,device:Union[torch.device, None]=None,**kwargs)->None:
super().__init__()
if input_dim != hidden_dim:
self.input_map = MetaSequential(OrderedDict({
"linear_0": MetaLinear(input_dim, hidden_dim),
"relu_0": nn.ReLU(),
"dropout_0": nn.Dropout(0.2),
"linear_1": MetaLinear(hidden_dim, hidden_dim),
"relu_1": nn.ReLU()
}))
else:
self.input_map = lambda x:x
self.classes = MetaLinear(hidden_dim, max_slots, bias=False)
_mask = torch.zeros(1, max_slots, dtype=torch.float, device=device)
_mask[:, init_slots:] = float("-inf")
self.register_buffer(name="_mask", tensor=_mask)
self.crit = nn.CrossEntropyLoss()
self.device = device
self.to(device=device)
self.nslots = init_slots
self.max_slots = max_slots
self.maml = True
self.outputs = {}
self.history = None
self.exemplar_features = None
self.exemplar_labels = None
self.dev_exemplar_features = None
self.dev_exemplar_labels = None
@property
def mask(self,):
self._mask[:, :self.nslots] = 0
self._mask[:, self.nslots:] = float("-inf")
return self._mask
def idx_mask(self, idx:Union[torch.LongTensor, int, List[int], None]=None, max_idx:Union[torch.LongTensor, int, None]=None):
assert (idx is not None) or (max_idx is not None)
assert (idx is None) or (max_idx is None)
mask = torch.zeros_like(self._mask) + float("-inf")
if idx is not None:
mask[:, idx] = 0
if max_idx is not None:
if isinstance(max_idx, torch.LongTensor):
max_idx = max_idx.item()
mask[:, :max_idx] = 0
return mask
@property
def features(self):
return self.classes.weight[:self.nslots]
def forward(self, batch, nslots:int=-1, exemplar:bool=False, exemplar_distill:bool=False, feature_distill:bool=False, mul_distill=False, distill:bool=False, return_loss:bool=True, return_feature:bool=False, tau:float=1.0, log_outputs:bool=True, params=None):
if isinstance(batch, (tuple, list)) and len(batch) == 2:
features, labels = batch
else:
features, labels = batch.features, batch.labels
inputs = self.input_map(features, params=self.get_subdict(params, "input_map"))
scores = self.classes(inputs, params=self.get_subdict(params, "classes"))
if torch.any(torch.isnan(scores)):
print(scores[0])
input('a')
if nslots == -1:
scores += self.mask
if torch.any(torch.isnan(scores)):
print(scores[0])
input()
nslots = self.nslots
else:
scores += self.idx_mask(max_idx=nslots)
scores[:, 0] = 0
if scores.size(0) != labels.size(0):
assert scores.size(0) % labels.size(0) == 0
labels = labels.repeat_interleave(scores.size(0) // labels.size(0), dim=0)
else:
labels = labels
if log_outputs:
pred = torch.argmax(scores, dim=1)
acc = torch.mean((pred == labels).float())
self.outputs["accuracy"] = acc.item()
self.outputs["prediction"] = pred.detach().cpu()
self.outputs["label"] = labels.detach().cpu()
self.outputs["input_features"] = features.detach().cpu()
self.outputs["encoded_features"] = inputs.detach().cpu()
if return_loss:
labels.masked_fill_(labels >= nslots, 0)
valid = labels < nslots
nvalid = torch.sum(valid.float())
if nvalid == 0:
loss = 0
else:
loss = self.crit(scores[valid], labels[valid])
if torch.isnan(loss):
print(labels, nslots, scores[:, :nslots])
input()
if distill and self.history is not None:
old_scores, old_inputs = self.forward(batch, nslots=self.history["nslots"], return_loss=False, log_outputs=False, return_feature=True, params=self.history["params"])
old_scores = old_scores.detach()
old_inputs = old_inputs.detach()
new_scores = scores[:, :self.history["nslots"]]
if mul_distill:
loss_distill = - torch.sum(torch.softmax(old_scores*tau, dim=1) * torch.log_softmax(new_scores*tau, dim=1), dim=1).mean()
old_dist = torch.softmax(old_scores/tau, dim=1)
old_valid = (old_dist[:, 0] < 0.9)
old_num = torch.sum(old_valid.float())
if old_num > 0:
# print(old_dist[old_valid].topk(5, dim=1), batch.labels[old_valid])
# input()
loss_mul_distill = - torch.sum(old_dist[old_valid] * torch.log_softmax(new_scores[old_valid], dim=1), dim=1).sum()
loss_distill = (loss_distill * old_dist.size(0) + loss_mul_distill) / (old_dist.size(0) + old_num)
else:
loss_distill = - torch.sum(torch.softmax(old_scores*tau, dim=1) * torch.log_softmax(new_scores*tau, dim=1), dim=1).mean()
if feature_distill:
loss_f_distill = (1 - (old_inputs / old_inputs.norm(dim=-1, keepdim=True) * inputs / inputs.norm(dim=-1, keepdim=True)).sum(dim=-1)).mean(dim=0)
loss_distill += loss_f_distill
d_weight = self.history["nslots"]
c_weight = (self.nslots - self.history["nslots"])
loss = ( d_weight * loss_distill+ c_weight* loss) / (d_weight+c_weight)
if torch.isnan(loss):
print(old_scores, new_scores)
input()
if exemplar and self.exemplar_features is not None:
if self.exemplar_features.size(0) < 128:
exemplar_inputs = self.input_map(self.exemplar_features.to(self.device), params=self.get_subdict(params, "input_map"))
exemplar_scores = self.classes(exemplar_inputs, params=self.get_subdict(params, "classes"))
else:
exemplar_scores = []
exemplar_inputs = []
for _beg in range(0, self.exemplar_features.size(0), 128):
_features = self.exemplar_features[_beg:_beg+128, :]
_inputs = self.input_map(_features.to(self.device), params=self.get_subdict(params, "input_map"))
exemplar_inputs.append(_inputs)
exemplar_scores.append(self.classes(_inputs, params=self.get_subdict(params, "classes")))
exemplar_inputs = torch.cat(exemplar_inputs, dim=0)
exemplar_scores = torch.cat(exemplar_scores, dim=0)
exemplar_scores[:, 0] = 0.
loss_exemplar = self.crit(exemplar_scores+self.mask, self.exemplar_labels.to(self.device))
if torch.isnan(loss_exemplar):
print(self.exemplar_labels, nslots)
input()
if exemplar_distill:
if self.exemplar_features.size(0) < 128:
exemplar_old_inputs = self.input_map(self.exemplar_features.to(self.device), params=self.get_subdict(self.history["params"], "input_map"))
exemplar_old_scores = self.classes(exemplar_old_inputs, params=self.get_subdict(self.history["params"], "classes"))
else:
exemplar_old_scores = []
exemplar_old_inputs = []
for _beg in range(0, self.exemplar_features.size(0), 128):
_features = self.exemplar_features[_beg:_beg+128, :]
_inputs = self.input_map(_features.to(self.device), params=self.get_subdict(self.history["params"], "input_map"))
exemplar_old_inputs.append(_inputs)
exemplar_old_scores.append(self.classes(_inputs, params=self.get_subdict(self.history["params"], "classes")))
exemplar_old_inputs = torch.cat(exemplar_old_inputs, dim=0)
exemplar_old_scores = torch.cat(exemplar_old_scores, dim=0)
exemplar_old_scores[:, 0] = 0.
exemplar_old_scores = exemplar_old_scores[:self.history["nslots"]]
loss_exemplar_distill = - torch.sum(torch.softmax(exemplar_old_scores[:self.history["nslots"]]*tau, dim=1) * torch.log_softmax(exemplar_scores[:self.history["nslots"]], dim=1), dim=1).mean()
if feature_distill:
loss_exemplar_feat_distill = (1 - (exemplar_old_inputs / exemplar_old_inputs.norm(dim=-1, keepdim=True) * exemplar_inputs / exemplar_inputs.norm(dim=-1, keepdim=True)).sum(dim=-1)).mean(dim=0)
loss_exemplar_distill += loss_exemplar_feat_distill
d_weight = self.history["nslots"]
c_weight = (self.nslots - self.history["nslots"])
loss_exemplar = (d_weight * loss_exemplar_distill+ c_weight* loss_exemplar) / (d_weight+c_weight)
e_weight = self.exemplar_features.size(0)
loss = (nvalid * loss + e_weight * loss_exemplar) / (nvalid + e_weight)
if torch.isnan(loss):
print(loss, loss_exemplar)
return loss
else:
if return_feature:
return scores[:, :nslots], inputs
else:
return scores[:, :nslots]
def score(self, *args, **kwargs):
return self.forward(*args, **kwargs)
def clone_params(self,):
return OrderedDict({k:v.clone().detach() for k,v in self.meta_named_parameters()})
def set_history(self,):
self.history = {"params": self.clone_params(), "nslots": self.nslots}
def set_exemplar(self, dataloader, q:int=20, params=None, label_sets:Union[List, Set, None]=None, collect_none:bool=False, use_input:bool=False, output_only:bool=False, output:Union[str, None]=None):
self.eval()
with torch.no_grad():
ifeat = []; ofeat = []; label = []
num_batches = len(dataloader)
for batch in tqdm(dataloader, "collecting exemplar", ncols=128):
batch = batch.to(self.device)
loss = self.forward(batch, params=params)
ifeat.append(self.outputs["input_features"])
if use_input:
ofeat.append(self.outputs["input_features"])
else:
ofeat.append(self.outputs["encoded_features"])
label.append(self.outputs["label"])
ifeat = torch.cat(ifeat, dim=0)
ofeat = torch.cat(ofeat, dim=0)
label = torch.cat(label, dim=0)
nslots = max(self.nslots, torch.max(label).item()+1)
exemplar = {}
if label_sets is None:
if collect_none:
label_sets = range(nslots)
else:
label_sets = range(1, nslots)
else:
if collect_none:
if 0 not in label_sets:
label_sets = sorted([0] + list(label_sets))
else:
label_sets = sorted(list(label_sets))
else:
label_sets = sorted([t for t in label_sets if t != 0])
for i in label_sets:
idx = (label == i)
if i == 0:
# random sample for none type
nidx = torch.nonzero(idx, as_tuple=True)[0].tolist()
exemplar[i] = numpy.random.choice(nidx, q, replace=False).tolist()
continue
if torch.any(idx):
exemplar[i] = []
nidx = torch.nonzero(idx, as_tuple=True)[0].tolist()
mfeat = torch.mean(ofeat[idx], dim=0, keepdims=True)
if len(nidx) < q:
exemplar[i].extend(nidx * (q // len(nidx)) + nidx[:(q % len(nidx))])
else:
for j in range(q):
if j == 0:
dfeat = torch.sum((ofeat[nidx] - mfeat)**2, dim=1)
else:
cfeat = ofeat[exemplar[i]].sum(dim=0, keepdims=True)
cnum = len(exemplar[i])
dfeat = torch.sum((mfeat * (cnum + 1) - ofeat[nidx] - cfeat)**2, )
tfeat = torch.argmin(dfeat)
exemplar[i].append(nidx[tfeat])
nidx.pop(tfeat.item())
exemplar = {i: ifeat[v] for i,v in exemplar.items()}
exemplar_features = []
exemplar_labels = []
for label, features in exemplar.items():
exemplar_features.append(features)
exemplar_labels.extend([label]*features.size(0))
exemplar_features = torch.cat(exemplar_features, dim=0).cpu()
exemplar_labels = torch.LongTensor(exemplar_labels).cpu()
if not output_only or output is not None:
if output == "train" or output is None:
if self.exemplar_features is None:
self.exemplar_features = exemplar_features
self.exemplar_labels = exemplar_labels
else:
self.exemplar_features = torch.cat((self.exemplar_features, exemplar_features), dim=0)
self.exemplar_labels = torch.cat((self.exemplar_labels, exemplar_labels), dim=0)
elif output == "dev":
if self.dev_exemplar_features is None:
self.dev_exemplar_features = exemplar_features
self.dev_exemplar_labels = exemplar_labels
else:
self.dev_exemplar_features = torch.cat((self.dev_exemplar_features, exemplar_features), dim=0)
self.dev_exemplar_labels = torch.cat((self.dev_exemplar_labels, exemplar_labels), dim=0)
return {i: v.cpu() for i,v in exemplar.items()}
def initialize(self, exemplar, ninstances:Dict[int, int], gamma:float=1.0, tau:float=1.0, alpha:float=0.5, params=None):
self.eval()
with torch.no_grad():
weight_norm = torch.norm(self.classes.weight[1:self.nslots], dim=1).mean(dim=0)
label_inits = []
label_kt = {}
for label, feats in exemplar.items():
exemplar_inputs = self.input_map(feats.to(self.device), params=self.get_subdict(params, "input_map"))
exemplar_scores = self.classes(exemplar_inputs, params=self.get_subdict(params, "classes"))
exemplar_scores = exemplar_scores + self.mask
exemplar_scores[:, 0] = 0
exemplar_weights = torch.softmax(exemplar_scores * tau, dim=1)
normalized_inputs = exemplar_inputs / torch.norm(exemplar_inputs, dim=1, keepdim=True) * weight_norm
proto = (exemplar_weights[:, :1] * normalized_inputs).mean(dim=0)
knowledge = torch.matmul(exemplar_weights[:, 1:self.nslots], self.classes.weight[1:self.nslots]).mean(dim=0)
gate = alpha * math.exp(- ninstances[label] * gamma)
# gate = 1 / (1 + ninstances[label] * gamma)
rnd = torch.randn_like(proto) * weight_norm / math.sqrt(self.classes.weight.size(1))
initvec = proto * gate + knowledge * gate + (1 - gate) * rnd
label_inits.append((label, initvec.cpu()))
label_kt[label] = exemplar_weights.mean(dim=0).cpu()
label_inits.sort(key=lambda t:t[0])
inits = []
for i, (label, init) in enumerate(label_inits):
assert label == self.nslots + i
inits.append(init)
inits = torch.stack(inits, dim=0)
self.outputs["new2old"] = label_kt
return inits.detach()
def initialize2(self, exemplar, ninstances:Dict[int, int], gamma:float=1.0, tau:float=1.0, alpha:float=0.5, delta:float=0.5, params=None):
self.eval()
def top_p(probs, p=0.9):
_val, _idx = torch.sort(probs, descending=True, dim=1)
top_mask = torch.zeros_like(probs).float() - float("inf")
for _type in range(probs.size(0)):
accumulated = 0
_n = 0
while accumulated < p or _n <= 1:
top_mask[_type, _idx[_type, _n]] = 0
accumulated += _val[_type, _n]
_n += 1
return top_mask
with torch.no_grad():
weight_norm = torch.norm(self.classes.weight[1:self.nslots], dim=1).mean(dim=0)
label_inits = []
label_kt = {}
for label, feats in exemplar.items():
exemplar_inputs = self.input_map(feats.to(self.device), params=self.get_subdict(params, "input_map"))
exemplar_scores = self.classes(exemplar_inputs, params=self.get_subdict(params, "classes"))
exemplar_scores = exemplar_scores + self.mask
exemplar_scores[:, 0] = 0
top_mask = top_p(torch.softmax(exemplar_scores, dim=1))
exemplar_scores = exemplar_scores + top_mask
exemplar_scores[:, 0] = 0
exemplar_weights = torch.softmax(exemplar_scores * tau, dim=1)
normalized_inputs = exemplar_inputs / torch.norm(exemplar_inputs, dim=1, keepdim=True) * weight_norm
proto = delta * (exemplar_weights[:, :1] * normalized_inputs).mean(dim=0)
kweight = (1 - exemplar_weights[:, :1])
knowledge = torch.matmul((1-delta*exemplar_weights[:, :1]) * (exemplar_weights[:, 1:self.nslots] + 1e-8) / torch.clamp(1 - exemplar_weights[:, :1], 1e-8), self.classes.weight[1:self.nslots]).mean(dim=0)
gate = alpha * math.exp(- ninstances[label] * gamma)
rnd = torch.randn_like(proto) * weight_norm / math.sqrt(self.classes.weight.size(1))
initvec = proto * gate + knowledge * gate + (1 - gate) * rnd
if torch.any(torch.isnan(initvec)):
print(proto, knowledge, rnd, gate, exemplar_weights[:, :1], exemplar_scores[-1, :self.nslots])
input()
label_inits.append((label, initvec.cpu()))
label_kt[label] = exemplar_weights.mean(dim=0).cpu()
label_inits.sort(key=lambda t:t[0])
inits = []
for i, (label, init) in enumerate(label_inits):
assert label == self.nslots + i
inits.append(init)
inits = torch.stack(inits, dim=0)
self.outputs["new2old"] = label_kt
return inits.detach()
def set(self, features:torch.tensor, ids:Union[int, torch.Tensor, List, None]=None, max_id:int=-1):
with torch.no_grad():
if isinstance(ids, (torch.Tensor, list)):
if torch.any(ids > self.nslots):
warnings.warn("Setting features to new classes. Using 'extend' or 'append' is preferred for new classes")
self.classes.weight[ids] = features
elif isinstance(ids, int):
self.classes.weight[ids] = features
else:
if max_id == -1:
raise ValueError(f"Need input for either ids or max_id")
self.classes.weight[:max_id] = features
def append(self, feature):
with torch.no_grad():
self.classes.weight[self.nslots] = feature
self.nslots += 1
def extend(self, features):
with torch.no_grad():
features = features.to(self.device)
if len(features.size()) == 1:
warnings.warn("Extending 1-dim feature vector. Using 'append' instead is preferred.")
self.append(features)
else:
nclasses = features.size(0)
self.classes.weight[self.nslots:self.nslots+nclasses] = features
self.nslots += nclasses
class BIC(LInEx):
def __init__(self,input_dim:int,hidden_dim:int,max_slots:int,init_slots:int,device:Union[torch.device, None]=None, **kwargs)->None:
super().__init__(input_dim,hidden_dim,max_slots,init_slots,device,**kwargs)
self.correction_weight = nn.Parameter(torch.ones(1, dtype=torch.float, device=self.device, requires_grad=True))
self.correction_bias = nn.Parameter(torch.zeros(1, dtype=torch.float, device=self.device, requires_grad=True))
self.correction_stream = [init_slots]
def add_stream(self, num_classes):
self.correction_stream.append(self.correction_stream[-1]+num_classes)
def forward(self, batch, nslots:int=-1, bias_correction:str="none", exemplar:bool=False, exemplar_distill:bool=False, distill:bool=False, return_loss:bool=True, tau:float=1.0, log_outputs:bool=True, params=None):
assert bias_correction in ["none", "last", "current"]
if distill:
assert bias_correction != "current"
if isinstance(batch, (tuple, list)) and len(batch) == 2:
features, labels = batch
else:
features, labels = batch.features, batch.labels
inputs = self.input_map(features, params=self.get_subdict(params, "input_map"))
scores = self.classes(inputs, params=self.get_subdict(params, "classes"))
if nslots == -1:
scores += self.mask
nslots = self.nslots
else:
scores += self.idx_mask(max_idx=nslots)
scores[:, 0] = 0
if bias_correction == "current":
assert len(self.correction_stream) >= 2
scores[:, self.correction_stream[-2]:self.correction_stream[-1]] *= self.correction_weight
scores[:, self.correction_stream[-2]:self.correction_stream[-1]] += self.correction_bias
if scores.size(0) != labels.size(0):
assert scores.size(0) % labels.size(0) == 0
labels = labels.repeat_interleave(scores.size(0) // labels.size(0), dim=0)
else:
labels = labels
if log_outputs:
pred = torch.argmax(scores, dim=1)
acc = torch.mean((pred == labels).float())
self.outputs["accuracy"] = acc.item()
self.outputs["prediction"] = pred.detach().cpu()
self.outputs["label"] = labels.detach().cpu()
self.outputs["input_features"] = features.detach().cpu()
self.outputs["encoded_features"] = inputs.detach().cpu()
if return_loss:
labels.masked_fill_(labels >= nslots, 0)
valid = labels < nslots
nvalid = torch.sum(valid.float())
if nvalid == 0:
loss = 0
else:
loss = self.crit(scores[valid], labels[valid])
if distill and self.history is not None:
old_scores = self.forward(batch, nslots=self.history["nslots"], return_loss=False, log_outputs=False, params=self.history["params"]).detach()
if bias_correction == "last":
old_scores[:, self.correction_stream[-2]:self.correction_stream[-1]] *= self.history['correction_weight']
old_scores[:, self.correction_stream[-2]:self.correction_stream[-1]] += self.history['correction_bias']
new_scores = scores[:, :self.history["nslots"]]
loss_distill = - torch.sum(torch.softmax(old_scores*tau, dim=1) * torch.log_softmax(new_scores*tau, dim=1), dim=1).mean()
d_weight = self.history["nslots"]
c_weight = (self.nslots - self.history["nslots"])
loss = ( d_weight * loss_distill+ c_weight* loss) / (d_weight+c_weight)
if exemplar and self.exemplar_features is not None:
if self.exemplar_features.size(0) < 128:
exemplar_inputs = self.input_map(self.exemplar_features.to(self.device), params=self.get_subdict(params, "input_map"))
exemplar_scores = self.classes(exemplar_inputs, params=self.get_subdict(params, "classes"))
else:
exemplar_scores = []
for _beg in range(0, self.exemplar_features.size(0), 128):
_features = self.exemplar_features[_beg:_beg+128, :]
_inputs = self.input_map(_features.to(self.device), params=self.get_subdict(params, "input_map"))
exemplar_scores.append(self.classes(_inputs, params=self.get_subdict(params, "classes")))
exemplar_scores = torch.cat(exemplar_scores, dim=0)
exemplar_scores[:, 0] = 0.
loss_exemplar = self.crit(exemplar_scores+self.mask, self.exemplar_labels.to(self.device))
if exemplar_distill:
if self.exemplar_features.size(0) < 128:
exemplar_old_inputs = self.input_map(self.exemplar_features.to(self.device), params=self.get_subdict(self.history["params"], "input_map"))
exemplar_old_scores = self.classes(exemplar_old_inputs, params=self.get_subdict(self.history["params"], "classes"))
else:
exemplar_old_scores = []
for _beg in range(0, self.exemplar_features.size(0), 128):
_features = self.exemplar_features[_beg:_beg+128, :]
_inputs = self.input_map(_features.to(self.device), params=self.get_subdict(self.history["params"], "input_map"))
exemplar_old_scores.append(self.classes(_inputs, params=self.get_subdict(self.history["params"], "classes")))
exemplar_old_scores = torch.cat(exemplar_old_scores, dim=0)
exemplar_old_scores[:, 0] = 0.
if bias_correction == "last":
exemplar_old_scores[:, self.correction_stream[-2]:self.correction_stream[-1]] *= self.history['correction_weight']
exemplar_old_scores[:, self.correction_stream[-2]:self.correction_stream[-1]] += self.history['correction_bias']
exemplar_old_scores = exemplar_old_scores[:self.history["nslots"]]
loss_exemplar_distill = - torch.sum(torch.softmax(exemplar_old_scores[:self.history["nslots"]]*tau, dim=1) * torch.log_softmax(exemplar_scores[:self.history["nslots"]], dim=1), dim=1).mean()
d_weight = self.history["nslots"]
c_weight = (self.nslots - self.history["nslots"])
loss_exemplar = (d_weight * loss_exemplar_distill+ c_weight* loss_exemplar) / (d_weight+c_weight)
e_weight = self.exemplar_features.size(0)
loss = (nvalid * loss + e_weight * loss_exemplar) / (nvalid + e_weight)
if torch.isnan(loss):
print(loss, loss_exemplar)
return loss
else:
return scores[:, :nslots]
def forward_correction(self, *args, **kwargs):
'''
training:
entropy: normal
distill:
old, last
Fold, Fold * correction_weight + correction_bias,
'''
if len(args) >= 3:
args[2] = "current"
else:
kwargs["bias_correction"] = "current"
return self.forward(*args,**kwargs)
def set_history(self):
super().set_history()
self.history["correction_weight"] = self.correction_weight.item()
self.history["correction_bias"] = self.correction_bias.item()
def score(self, *args, **kwargs):
if len(self.correction_stream) >= 2:
return self.forward_correction(*args, **kwargs)
else:
if len(args) >= 3:
args[2] = "none"
else:
kwargs["bias_correction"] = "none"
return self.forward(*args, **kwargs)
class ICARL(LInEx):
def __init__(self,input_dim:int,hidden_dim:int,max_slots:int,init_slots:int,device:Union[torch.device, None]=None, **kwargs)->None:
super().__init__(input_dim,hidden_dim,max_slots,init_slots,device,**kwargs)
self.none_feat = None
def set_none_feat(self, dataloader, params=None):
self.eval()
with torch.no_grad():
ifeat = []; ofeat = []; label = []
num_batches = len(dataloader)
for batch in tqdm(dataloader, "collecting exemplar"):
batch = batch.to(self.device)
loss = self.forward(batch, params=params)
ifeat.append(self.outputs["input_features"])
ofeat.append(self.outputs["encoded_features"])
label.append(self.outputs["label"])
ifeat = torch.cat(ifeat, dim=0)
ofeat = torch.cat(ofeat, dim=0)
label = torch.cat(label, dim=0)
nslots = max(self.nslots, torch.max(label).item()+1)
exemplar = {}
idx = (label == 0)
self.none_feat = ofeat[idx].mean(dim=0).cpu()
return self.none_feat
def score(self, batch, exemplar=None, params=None):
if exemplar is None:
exemplar_labels, exemplar_features = self.exemplar_labels, self.exemplar_features
else:
exemplar_labels, exemplar_features = exemplar
inputs = self.input_map(batch.features, params=self.get_subdict(params, "input_map"))
scores = []
scores.append(- torch.sum((inputs - self.none_feat.to(inputs.device).unsqueeze(0))**2, dim=1))
for i in range(1, self.nslots):
label_idx = (exemplar_labels == i)
label_features = exemplar_features[label_idx]
label_inputs = self.input_map(label_features.to(inputs.device), params=self.get_subdict(params, "input_map")).mean(dim=0, keepdim=True)
scores.append(- torch.sum((inputs - label_inputs)**2, dim=1))
scores = torch.stack(scores, dim=0).transpose(0, 1)
labels = batch.labels
if scores.size(0) != labels.size(0):
assert scores.size(0) % labels.size(0) == 0
labels = labels.repeat_interleave(scores.size(0) // labels.size(0), dim=0)
pred = torch.argmax(scores, dim=1)
acc = torch.mean((pred == labels).float())
labels.masked_fill_(labels >= self.nslots, 0)
valid = labels < self.nslots
nvalid = torch.sum(valid.float())
if nvalid == 0:
loss = 0
else:
loss = self.crit(scores[valid], labels[valid])
self.outputs["accuracy"] = acc.item()
self.outputs["prediction"] = pred.detach().cpu()
self.outputs["label"] = labels.detach().cpu()
self.outputs["input_features"] = batch.features.detach().cpu()
self.outputs["encoded_features"] = inputs.detach().cpu()
return loss
def test(): # sanity check
m = LInEx(nhead=8,nlayers=3,hidden_dim=512,input_dim=2048,max_slots=30,init_slots=9,device=torch.device("cpu"))
if __name__ == "__main__":
test()
|
[
"life4pf@163.com"
] |
life4pf@163.com
|
f0a82e1bd0914b8007e6b425025033a3628ec23c
|
3badd0d2ea861e56748f5c16beba67e1685dc7c3
|
/functional_tests.py
|
75114b2d2c52b96f38a980fbc54472aef4cb095d
|
[] |
no_license
|
chris-seals/tdd-book
|
a8b72c0ba83e1a27088783225a3fd30fc32bdb28
|
28d9742e19d510fd07fe21928ede7c316958f4fa
|
refs/heads/master
| 2022-11-22T03:34:23.940099
| 2020-07-19T14:27:00
| 2020-07-19T14:27:00
| 258,937,633
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,246
|
py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import unittest
class NewVisitorTest(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def test_can_start_a_list_and_retrieve_it_later(self):
# Edith has heard about a cool new online to-do app.
# She goes to check out the homepage.
self.browser.get('http://localhost:8000')
# She notices the page title and header mention to-do lists
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# She is invited to enter a to-do item straight away
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter a to-do item'
)
# She types "Buy peacock feathers" into a text box (Edith's hobby is
# tying fly-fishing lures)
inputbox.send_keys('Buy peacock feathers')
# When she hits enter, the page updates, and now the page lists
# "1: Buy peacock feathers" as an item in a to-do lists
inputbox.send_keys(Keys.ENTER)
time.sleep(1)
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertTrue(
any(row.text == '1: Buy peacock feathers' for row in rows),
"New to-do item did not appear in table"
)
# There is still a text box inviting her to add another item. She enters
# "Use peacock feathers to make a fly" (Edith is very methodical)
self.fail('Finish the test!')
# The page updates again, and now shows both items on her lists
# Edith wonders whether the site will remember her list. Then she sees
# that the site has generated a unique URL for her -- there is some
# explanatory text to that effect.
# She visits that URL - her to-do list is still there.
# Satisfied, she goes back to sleep
if __name__ == '__main__':
unittest.main(warnings='ignore')
|
[
"christopher.seals@gmail.com"
] |
christopher.seals@gmail.com
|
3f60cafc9d44646bf3475d2b1f730a3648b8e27b
|
863c2fcfd5ebed9153c43a298488abeb6e96d627
|
/time_series_classification.py
|
ee0ba8dbea36985a0524ff0d3e4a63708bed7170
|
[] |
no_license
|
liang112233/time-series-classification
|
6d51dd7e80b044f6fbc7e64c2dd4e4bf6f1ae8f5
|
d5e847b302d855bb9dc975e888b2c0e50be32f8e
|
refs/heads/master
| 2022-11-25T13:18:35.772054
| 2020-07-30T15:16:58
| 2020-07-30T15:16:58
| 280,761,492
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,092
|
py
|
# https://www.analyticsvidhya.com/blog/2019/01/introduction-time-series-classification/
import os
import pandas as pd
import numpy as np
# matplotlib inline
import matplotlib.pyplot as plt
from os import listdir
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4096)])
from keras.preprocessing import sequence
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.optimizers import Adam
from keras.models import load_model
from tensorflow.python.keras.callbacks import ModelCheckpoint
from keras.callbacks import ModelCheckpoint
df1 = pd.read_csv('/home/liang/PycharmProjects/time-series-classification/MovementAAL/dataset/MovementAAL_RSS_1.csv')
df2 = pd.read_csv('/home/liang/PycharmProjects/time-series-classification/MovementAAL/dataset/MovementAAL_RSS_2.csv')
path = '/home/liang/PycharmProjects/time-series-classification/MovementAAL/dataset/MovementAAL_RSS_'
sequences = list()
for i in range(1,315):
file_path = path + str(i) + '.csv'
print(file_path)
df = pd.read_csv(file_path, header=0)
values = df.values
sequences.append(values)
targets = pd.read_csv('/home/liang/PycharmProjects/time-series-classification/MovementAAL/dataset/MovementAAL_target.csv')
targets = targets.values[:,1]
groups = pd.read_csv('MovementAAL/groups/MovementAAL_DatasetGroup.csv', header=0)
groups = groups.values[:,1]
len_sequences = []
for one_seq in sequences:
len_sequences.append(len(one_seq))
pd.Series(len_sequences).describe()
# Padding the sequence with the values in last row to max length
to_pad = 129
new_seq = []
for one_seq in sequences:
len_one_seq = len(one_seq)
last_val = one_seq[-1]
n = to_pad - len_one_seq
to_concat = np.repeat(one_seq[-1], n).reshape(4, n).transpose()
new_one_seq = np.concatenate([one_seq, to_concat])
new_seq.append(new_one_seq)
final_seq = np.stack(new_seq)
# truncate the sequence to length 60
seq_len = 60
final_seq = sequence.pad_sequences(final_seq, maxlen=seq_len, padding='post', dtype='float', truncating='post')
train = [final_seq[i] for i in range(len(groups)) if (groups[i]==2)]
validation = [final_seq[i] for i in range(len(groups)) if groups[i]==1]
test = [final_seq[i] for i in range(len(groups)) if groups[i]==3]
train_target = [targets[i] for i in range(len(groups)) if (groups[i]==2)]
validation_target = [targets[i] for i in range(len(groups)) if groups[i]==1]
test_target = [targets[i] for i in range(len(groups)) if groups[i]==3]
train = np.array(train)
validation = np.array(validation)
test = np.array(test)
train_target = np.array(train_target)
train_target = (train_target+1)/2
validation_target = np.array(validation_target)
validation_target = (validation_target+1)/2
test_target = np.array(test_target)
test_target = (test_target+1)/2
model = Sequential()
model.add(LSTM(256, input_shape=(seq_len, 4)))
model.add(Dense(1, activation='sigmoid'))
model.summary()
adam = Adam(lr=0.001)
# model_filename = "test-Epoch-{epoch:02d}"
# checkpoint_path = os.path.join('models/', model_filename)
chk = ModelCheckpoint(
'best_model.pkl',
monitor='val_accuracy',
verbose=1,
save_best_only=False,
save_weights_only=False,
mode='max')
# chk = ModelCheckpoint('best_model.pkl', monitor='val_acc', save_best_only=True, mode='max', verbose=1)
model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
model.fit(train, train_target, epochs=5, batch_size=128, callbacks=[chk], validation_data=(validation,validation_target))
#
# # #loading the model and checking accuracy on the test data
model = load_model('best_model.pkl')
#
from sklearn.metrics import accuracy_score
test_preds = model.predict_classes(test)
accuracy_score(test_target, test_preds)
print("score",accuracy_score(test_target, test_preds))
|
[
"sisheng.liang@ttu.edu"
] |
sisheng.liang@ttu.edu
|
64c56cb8b06a7ea97fa9eaa40bc7a4d99d330d48
|
11b0c124262ac40de87d756389082462d8452e4d
|
/keras/keras44_3_cancer_conv1d.py
|
7aaef711d66a0e504dbd364159570a7de778fe5d
|
[] |
no_license
|
Hyunwoo29/keras01
|
164b519f9bb70d55f7bfa91c66529cee4d012b24
|
494611a94420f8e268c37085ccf0b0de4aa71048
|
refs/heads/main
| 2023-08-06T07:19:13.288634
| 2021-10-05T14:03:56
| 2021-10-05T14:03:56
| 383,744,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,188
|
py
|
from sklearn.preprocessing import MaxAbsScaler, RobustScaler, QuantileTransformer, PowerTransformer, StandardScaler
from tensorflow.keras.callbacks import EarlyStopping
import numpy as np
from sklearn.datasets import load_breast_cancer
from icecream import ic
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM, Conv1D, Flatten
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
import time
from sklearn.metrics import r2_score
#1. 데이터
datasets = load_breast_cancer()
x = datasets.data
y = datasets.target
ic(x, y) # ic| x.shape: (569, 30), y.shape: (569,)
x_train, x_test, y_train, y_test = train_test_split(x,y, train_size=0.7, random_state=60) # train 309, test 133
# scaler = QuantileTransformer()
scaler = StandardScaler()
# scaler = PowerTransformer()
# scaler = RobustScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)
# ic(x_train.shape, x_test.shape)
# ic(np.unique(y))
model = Sequential()
model.add(LSTM(40, activation='relu', input_shape=(30,1), return_sequences=True))
model.add(Conv1D(128, 2))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(16, activation='relu'))
model.add(Flatten())
model.add(Dense(1,activation='sigmoid'))
model.summary()
#3. 컴파일, 훈련
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
es = EarlyStopping(monitor='acc', patience=20, mode='auto', verbose=1)
start = time.time()
model.fit(x_train, y_train, epochs=100, verbose=1, validation_split=0.2, batch_size=32, shuffle=True, callbacks=[es])
걸린시간 = round((time.time() - start) /60,1)
#4. 평가, 예측
y_predict = model.predict(x_test)
loss = model.evaluate(x_test, y_test)
ic(loss[0])
ic(loss[1])
ic(f'{걸린시간}분')
# ic| loss[0]: 0.028651483356952667
# ic| loss[1]: 0.988304078578949
# ic| f'{걸린시간}분': '0.8분'
|
[
"nbaksa3@gamil.com"
] |
nbaksa3@gamil.com
|
fb82d01ee42c4f74d1b66246033ac584b40173c8
|
9bb28ccac08cbb00b3b973ed1283a4e63db045de
|
/venv/Scripts/pip3.8-script.py
|
6f8e9a2a91deacf7a08089af0bac8dcaf76cb0fa
|
[] |
no_license
|
sanghee5/polls_example2
|
762202280519efdddff7015cb4399cf80fac94d4
|
6f0511a6bf0994f46168902c54266ef8f9107519
|
refs/heads/master
| 2022-08-21T03:59:38.140488
| 2020-05-26T11:47:10
| 2020-05-26T11:47:10
| 267,026,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
#!C:\Users\woo\PycharmProjects\polls_example2\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
|
[
"dnstks12345@naver.com"
] |
dnstks12345@naver.com
|
4b77666f51cdd6605d73087ff62fc22b273bc31e
|
0da0173a046bc8f2ea67e553b2e4cf52619ae8b6
|
/puq/adaptive.py
|
cdce1f14264c94239b9692dafc4b84b69b293067
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
dalg24/puq
|
7cb6e9ba487ad867a9ce1a5c9b1bc7986aedfd7b
|
ea547cd80205f65d6227049868153b6ca154334b
|
refs/heads/master
| 2020-12-26T02:32:08.149124
| 2016-06-29T06:41:28
| 2016-06-29T06:41:28
| 64,855,996
| 0
| 0
| null | 2016-08-03T15:13:17
| 2016-08-03T15:13:17
| null |
UTF-8
|
Python
| false
| false
| 15,541
|
py
|
"""
h-Adaptive Stochastic Collocation
"""
import numpy as np
from puq.hdf import get_result, get_params, get_param_names
from puq.options import options
from puq.psweep import APSweep
from adap import uqsolver
from logging import info, debug, exception, warning, critical
import h5py, sys
from puq.util import process_data
from puq.pdf import PDF
import matplotlib
from puq.meshgridn import meshgridn
from puq.response import SampledFunc
class AdapStocColl(APSweep):
"""
Class implementing h-Adaptive Stochastic Collocation.
- **params** : Input list of :class:`Parameter`\s
- **tol** : Tolerance. Try 0.1 first, then decrease if further\
accuracy is needed.
- **max_iterations** : Maximum number of iterations to perform.\
The method will loop, performaning additional calculations and\
refining its results until either the specified tolerance is met,\
or the number of iterations is *max_iterations*. Default\
is None.
- **level** : Interpolation level. Default is 2
- **sel** : Dimensional Selectivity. Default is 0.5.
- **callback** : Optional function that is called every iteration.
"""
def __init__(self, params, tol, max_iterations=None, level=2, sel=0.5, callback=None):
APSweep.__init__(self)
self.params = params
self.level = level
self.tol = tol
self.sel = sel
self.max_iter = max_iterations
self._callback = callback
self._uqsolver = uqsolver(params, level, tol, sel)
def reinit(self):
print "REINIT %s %s %s %s" % (self.params, self.level, self.tol, self.sel)
APSweep.reinit(self)
self._callback = None # FIXME
self._uqsolver = uqsolver(self.params, self.level, self.tol, self.sel)
for p in self.params:
del p.values
return True
def extend(self, h5, args):
from optparse import OptionParser
debug(args)
usage = "Usage: sweep extend [keyword args] hdf5_filename.\n"
parser = OptionParser(usage)
parser.add_option("--tol", type='float', default = self.tol)
parser.add_option("--max_iter", type='int', default = self.max_iter)
(opt, ar) = parser.parse_args(args=list(args))
if opt.tol > self.tol:
print "Error: Previous tolerance was %s. You cannot" % self.tol
print "increase the tolerance."
sys.exit(1)
if opt.max_iter == self.max_iter and opt.tol == self.tol:
print "Error: Tolerance and Iterations are unchanged."
print "Nothing to do here."
sys.exit(0)
if opt.max_iter and self.max_iter and opt.max_iter < self.max_iter \
and opt.tol == self.tol:
print "Error: Previous iterations was %s. You cannot" % self.iter_max
print "decrease the iterations."
sys.exit(1)
if opt.tol != self.tol:
print "Changing tol from %s to %s" % (self.tol, opt.tol)
if opt.max_iter != self.max_iter:
print "Changing max_iter from %s to %s" % (self.max_iter, opt.max_iter)
self.tol = opt.tol
self.max_iter = opt.max_iter
self._sweep._reinit = True
self.reinit()
# Remove old results
try:
del h5['output/data']
except:
pass
self._sweep.host.reinit()
# Returns a list of name,value tuples
# For example, [('t', 1.0), ('freq', 133862.0)]
def get_args(self):
par = self._uqsolver.iadaptiveparams()
plist = par.tolist()
if plist == []:
return
for i, p in enumerate(self.params):
pcol = par[:,i]
try:
p.values.append(pcol)
except AttributeError:
p.values = [pcol]
for row in plist:
yield zip([p.name for p in self.params], row)
def analyze(self, hf):
process_data(hf, 'AdapStocColl', self._do_pdf)
def iteration_cb(self, sw, iter):
"""
Callback for each iteration. The sweep method calls this for
every iteration. This method then calls its registered callback.
"""
z = sw.get_result(iteration=iter)
# fixme: z must be floats
m,v,e = self._uqsolver.doiadaptive(z)
"""
put mean, var, std, err, pdf in /AdapStocColl
These will be indexed for each iteration, so
/AdapStocColl/mean/1 will be the mean after iteration 1.
"""
hf = h5py.File(sw._fname)
try:
hf['/AdapStocColl/mean/%d' % iter] = m
hf['/AdapStocColl/variance/%d' % iter] = v
hf['/AdapStocColl/std/%d' % iter] = np.sqrt(v)
hf['/AdapStocColl/error/%d' % iter] = e
except:
pass
# Call the callback, if defined
if self._callback:
finished = self._callback(iter, hf, z, m, v, e)
else:
finished = False
if iter == 0:
print "Iter mean var dev errind points cached"
print "%d: %.4e %.4e %.4e %.4e %5d %5d" \
% (iter, m, v, np.sqrt(v), e, self._num_jobs, self._num_jobs_cached)
hf.close()
if self.max_iter and iter >= self.max_iter:
finished = True
return finished
# plot types:
# surface - resampled using interpolate()
# scatter - all points
# scatter - for each iteration
def plot_response(self, h5, ivars=''):
fmt = options['plot']['format']
if fmt == 'png' or fmt == 'i':
load = options['plot']['iformat']
else:
load = fmt
matplotlib.use(load, warn=False)
import matplotlib.pyplot as plt
if ivars:
num_params = len(ivars)
else:
ivars = get_param_names(h5)
num_params = len(ivars)
if num_params > 2:
print "Error: Cannot plot in more than three dimensions."
print "Use '-v' to select a subset of input parameters."
raise ValueError
if num_params > 1:
self.scatter3(h5, ivars)
self.scatter3(h5, ivars, iteration='sum')
else:
self.scatter2(h5, ivars[0])
self.scatter2(h5, ivars[0], iteration='sum')
if fmt == 'i':
try:
plt.show()
except KeyboardInterrupt :
pass
def _do_pdf(self, hf, data):
num = 10000
params = get_params(hf['/'])
ndims = len(params)
pts = np.empty((num, ndims + 1))
for i, p in enumerate(params):
pts[:,i] = p.pdf.ds(num)
self._uqsolver.interpolate(pts)
rs = self.response_func()
last_iter = self.iteration_num-1
mean = hf['/AdapStocColl/mean/%d' % last_iter].value
var = hf['/AdapStocColl/variance/%d' % last_iter].value
std = hf['/AdapStocColl/std/%d' % last_iter].value
error = hf['/AdapStocColl/error/%d' % last_iter].value
return [('sampled_pdf', pts[:,-1]),
('mean', mean),
('dev', std),
('var', var),
('error', error),
('response_func', rs)]
def response_func(self):
iters = self.iteration_num
ndims = len(self.params)
# calculate the optimal flat grid based on the hierarchal grid
vecs = []
for p in self.params:
x = []
for iteration in range(0, iters):
x = np.concatenate((x, p.values[iteration]))
last = None
mindist = 1e309
for v in sorted(x):
if v != last:
if last != None:
mindist = min(mindist, v-last)
last = v
debug("%s: %s %s grids" % (p.name, mindist,
(p.pdf.range[1] - p.pdf.range[0])/mindist))
vecs.append(np.arange(p.pdf.range[0], p.pdf.range[1] + mindist, mindist))
xx = meshgridn(*vecs)
pts = np.vstack(map(np.ndarray.flatten, xx)).T
# add column for results
pts = np.append(pts, np.zeros((len(pts),1)), axis=1)
# interpolate function requires array in contiguous memory
if pts.flags['C_CONTIGUOUS'] == False:
pts = np.ascontiguousarray(pts)
self._uqsolver.interpolate(pts)
return SampledFunc(pts, params=self.params)
"""
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
fig = plot_figure()
ax = Axes3D(fig, azim = 30, elev = 30)
X = pts[:,0].reshape(xx[0].shape)
Y = pts[:,1].reshape(xx[0].shape)
try:
Z = pts[:,2].reshape(xx[0].shape)
ax.plot_surface(X,Y,Z, rstride = 1, cstride = 1, cmap=cm.jet, alpha = 0.5)
except:
plt.plot(X, Y, color='green')
plt.show()
"""
"""
def scatter2(self, hf, input_var='', output_var='', iteration='all'):
import matplotlib.pyplot as plt
from matplotlib import cm
fmt = options['plot']['format']
parameters = hdf5_get_params(hf)
parameter_names = [p.name for p in parameters]
if input_var:
ivar = [p for p in parameters if p.name == input_var][0]
else:
ivar = parameters[0]
if not ivar:
print "Error: Unrecognized input variable: %s" % input_var
raise ValueError
num_iterations = hdf5_get_iterations(hf)
if iteration == 'all':
for iteration in range(0, num_iterations):
fig = plot_figure()
plt.xlabel(ivar.description)
data = hdf5_get_result(hf, var=output_var, iteration=iteration)
plt.scatter(ivar.values[iteration], data)
plt.suptitle("Iteration %s" % iteration)
fig.canvas.manager.set_window_title("Iteration %s" % iteration)
elif iteration == 'sum':
fig = plot_figure()
plt.xlabel(ivar.description)
x = []
y = []
iters = []
for iteration in range(0, num_iterations):
x = np.concatenate((x, ivar.values[iteration]))
tmp = np.empty((len(ivar.values[iteration])))
tmp[:] = float(iteration)
iters = np.concatenate((iters, tmp))
data = hdf5_get_result(hf, var=output_var, iteration='sum')
plt.scatter(x, data, c=iters, cmap=cm.jet)
plt.suptitle("All %s Iterations" % num_iterations)
fig.canvas.manager.set_window_title("All %s Iterations" % num_iterations)
else:
fig = plot_figure()
plt.xlabel(ivar.description)
plt.suptitle("Iteration %s" % iteration)
fig.canvas.manager.set_window_title("Iteration %s" % iteration)
data = hdf5_get_result(hf, var=output_var, iteration=iteration)
plt.scatter(ivar.values[iteration], data, color='blue', alpha=.5)
#plot_customize()
if fmt != 'i':
plt.savefig("%s-scatter[%s].%s" % (output_var, input_var, fmt))
# 3D scatter plot
# iteration='all', 'last', 'sum' or number
def scatter3(self, hf, input_vars=[], output_var='', iteration='all'):
print "scatter %s" % (input_vars)
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
input_vars = hdf5_get_params(hf, input_vars)
outvars = hdf5_get_output_names(hf)
outdesc = hdf5_prog_description(hf)
if output_var and not output_var in outvars:
print "Error: Unrecognized output variable: %s" % output_var
return
if not output_var:
output_var = outvars[0]
fmt = options['plot']['format']
num_iterations = hdf5_get_iterations(hf)
if iteration == 'all':
for iteration in range(0, num_iterations):
print "iteration: %s" % iteration
fig = plot_figure()
ax = Axes3D(fig, azim = 30, elev = 30)
plt.xlabel(param_description(input_vars[0]))
plt.ylabel(param_description(input_vars[1]))
plt.suptitle("Iteration %s" % iteration)
fig.canvas.manager.set_window_title("Iteration %s" % iteration)
x = np.array(input_vars[0].values[iteration])
y = np.array(input_vars[1].values[iteration])
odata = hdf5_get_result(hf, var=output_var, iteration=iteration)
ax.scatter(x, y, odata, linewidths=(2.,))
ax.set_zlabel(hdf5_data_description(hf, output_var))
elif iteration == 'sum':
fig = plot_figure()
ax = Axes3D(fig, azim = 30, elev = 30)
ax.set_zlabel(hdf5_data_description(hf, output_var))
x = []
y = []
iters = []
for iteration in range(0, num_iterations):
x = np.concatenate((x, input_vars[0].values[iteration]))
y = np.concatenate((y, input_vars[1].values[iteration]))
tmp = np.empty((len(input_vars[0].values[iteration])))
tmp[:] = float(iteration)
iters = np.concatenate((iters, tmp))
odata = hdf5_get_result(hf, var=output_var, iteration='sum')
ax.scatter(x, y, odata, c=iters, cmap=cm.jet)
plt.xlabel(param_description(input_vars[0]))
plt.ylabel(param_description(input_vars[1]))
plt.suptitle("All %s Iterations" % num_iterations)
fig.canvas.manager.set_window_title("All %s Iterations" % num_iterations)
else:
print "iteration: %s" % iteration
fig = plot_figure()
ax = Axes3D(fig, azim = 30, elev = 30)
plt.xlabel(param_description(input_vars[0]))
plt.ylabel(param_description(input_vars[1]))
plt.suptitle("Iteration %s" % iteration)
fig.canvas.manager.set_window_title("Iteration %s" % iteration)
x = np.array(input_vars[0].values[iteration])
y = np.array(input_vars[1].values[iteration])
odata = hdf5_get_result(hf, var=output_var, iteration=iteration)
ax.scatter(x, y, odata, linewidths=(2.,))
ax.set_zlabel(hdf5_data_description(hf, output_var))
#plot_customize()
if fmt != 'i':
plt.savefig("%s-scatter.%s" % ('test', fmt))
def plot_pdfs(self, h5, kde, hist, vars):
from plot import plot_pdf
fmt = options['plot']['format']
if fmt == 'png' or fmt == 'i':
load = options['plot']['iformat']
else:
load = fmt
matplotlib.use(load, warn=False)
import matplotlib.pyplot as plt
if vars:
print "Plotting PDFs with a subset of variables"
print "is not implemented yet."
return
title = hdf5_prog_description(h5)
var = hdf5_get_output_names(h5)[0]
xlabel = hdf5_data_description(h5, var)
data = h5['AdapStocColl/%s/sampled_pdf' % var].value
plot_pdf(data, kde, hist, title, xlabel, var)
if fmt == 'i':
try:
plt.show()
except KeyboardInterrupt :
pass
"""
|
[
"huntmartinm@gmail.com"
] |
huntmartinm@gmail.com
|
f6d96854d2e988c2c0d5f5a1574b5cc6c67840af
|
f31630adc1e677065975a4b57902db5e0700e4e9
|
/Exploratory Analysis.py
|
a0e87bf56446b76e8896020163885234c2d39435
|
[] |
no_license
|
kkoehncke/ML-Cheat-Sheets
|
8a5847a88b49e8d6b92ce5168a637347453fb3c5
|
2e85180a432a3b4b9aa591f6ac65dcbfe7721d2c
|
refs/heads/master
| 2020-03-11T20:08:33.822196
| 2018-04-19T14:40:55
| 2018-04-19T14:40:55
| 130,228,713
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,689
|
py
|
Exploratory Analysis:
# Dataframe dimensions
df.shape
# Column datatypes
df.dtypes
# Summarize numerical features
df.describe()
# Summarize categorical features
df.describe(include=['object'])
#Display first 5 rows; can speciy number inside to show n rows
df.head()
# Display last 5 rows of data
df.tail()
# Filter and display only df.dtypes that are 'object'
df.dtypes[df.dtypes == 'object']
# Segment by <> and display the means within each class; can do the same with .std()
df.groupby('<>').mean()
# Segment by <> and display the means and standard deviations within each class
df.groupby('<>').agg([np.mean, np.std])
# Loop through categorical feature names and print each one
for feature_names in df.dtypes[df.dtypes == 'object'].index:
print (feature_names)
# Plot bar plot for each categorical feature
for feature_names in df.dtypes[df.dtypes == 'object'].index:
sns.countplot(y = feature_names, data=df)
plt.show()
# Plot histogram grid
df.hist(figsize=(14,14), xrot=-45)
# Clear the text "residue"
plt.show()
# Bar plot for '<insert column name>'
sns.countplot(y = '<>', data=df)
# Boxplot of <> and <>
sns.boxplot(x = '<>', y = '<>', data = df)
# Violinplot of <> and <>
sns.violinplot(y = '<>', x = '<>', data = df)
# Make the figsize 10 x 8
plt.figure(figsize=(9,8))
# Plot heatmap of annotated correlations
sns.heatmap(correlations*100,annot = True ,fmt='.0f', cbar=False)
#For classification problems (bivariate)
sns.lmplot(x='<>', y='<>', hue='<binary target variable>', data=df, fit_reg=False)
# If we want scatter of only one of the target variables
sns.lmplot(x='<>', y='<>', data=df[df.<target column> == '<target value>'], fit_reg=False)
|
[
"kkoehncke@captechventures.com"
] |
kkoehncke@captechventures.com
|
3230d5448ef48ac2a50e98f9791b15a0ed770f9f
|
0147677b611e40ac695ba07f914264b3470a7401
|
/src/mac_address_info.py
|
4ad9f15c04cbbf5909df457315f089a8c5f1a0cb
|
[] |
no_license
|
mblomdahl/sniffer
|
a2aed3ee37bb9a39d3c13ad8455ce7c7a2fc58c7
|
9101c59f958bb94fe1443fd90e95d333a02b785f
|
refs/heads/master
| 2021-01-24T00:23:30.318623
| 2015-08-14T12:56:33
| 2015-08-14T12:56:33
| 41,627,533
| 0
| 0
| null | 2015-08-30T12:11:01
| 2015-08-30T12:11:01
| null |
UTF-8
|
Python
| false
| false
| 5,190
|
py
|
import json
import urllib2
import os
class MacAddressInfo:
def __init__(self):
self.mac_address = ""
self.company = ""
self.address1 = ""
self.address2 = ""
self.address3 = ""
self.country = ""
class MacAddressStorage:
def __init__(self):
self.data = [] # creates a new empty list
def mac_address_lookup_from_internet(self, mac_address):
try:
print "Load from Internet %s" % mac_address
# Set the request URL http://www.macvendorlookup.com/api/v2/08-86-3B-D4-90-C0
url = 'http://www.macvendorlookup.com/api/v2/' + mac_address
# Send the GET request
response = urllib2.urlopen(url)
resp = response.read()
mac_object = MacAddressInfo
data = []
if resp:
# Interpret the JSON response
#data = json.loads(resp.decode('utf8'))
data = json.loads(resp)
mac_object.mac_address = mac_address
for company in data:
mac_object.company = company['company']
for address1 in data:
mac_object.address1 = address1['addressL1']
for address2 in data:
mac_object.address2 = address2['addressL2']
for address3 in data:
mac_object.address3 = address3['addressL3']
for country in data:
mac_object.country = country['country']
else:
mac_object.mac_address = mac_address
mac_object.company = ""
mac_object.address1 = ""
mac_object.address2 = ""
mac_object.address3 = ""
mac_object.country = ""
return mac_object
except :
print "Unexpected error:", url, resp
return None
def mac_address_lookup_from_cache(self, mac_address):
try:
self.load_data_from_file()
count = len( self.data["mac addresses"] )
for index in range(count):
if self.data["mac addresses"][index]["macaddress"] == mac_address:
mac_object = MacAddressInfo
mac_object.mac_address = mac_address
mac_object.company = self.data["mac addresses"][index]["company"]
mac_object.address1 = self.data["mac addresses"][index]["address1"]
mac_object.address2 = self.data["mac addresses"][index]["address2"]
mac_object.address3 = self.data["mac addresses"][index]["address3"]
mac_object.country = self.data["mac addresses"][index]["country"]
return mac_object
return None
except :
print "mac_address_lookup_from_cache error:"
return None
def mac_address_lookup(self, mac_address):
try:
mac_object = self.mac_address_lookup_from_cache(mac_address)
if mac_object is None :
mac_object = self.mac_address_lookup_from_internet(mac_address)
if mac_object is not None :
#self.load_data_from_file()
print mac_address
self.data["mac addresses"].append( {"macaddress":mac_address, "company":mac_object.company, "address1":mac_object.address1, "address2":mac_object.address2, "address3":mac_object.address3, "country":mac_object.country} )
self.store_data_to_file()
else :
return None
return mac_object
except :
print "mac_address_lookup error:"
return None
def load_data_from_file(self):
if len( self.data ) == 0:
if os.path.exists("/home/pi/sniffer/mac_addresses.json"):
file_handel = open('/home/pi/sniffer/mac_addresses.json', 'r')
self.data = json.load(file_handel)
#print "Load"
else:
#file_handel = open('/home/pi/sniffer/mac_addresses.json', 'w')
self.data.append( {"mac addresses":[]} )
#print "new"
def store_data_to_file(self):
file_handel = open('/home/pi/sniffer/mac_addresses.json', 'w')
json.dump(self.data, file_handel, sort_keys=True, indent=2)
#file_handel.write('\n')
if __name__ == '__main__':
storage = MacAddressStorage()
mac_object = MacAddressInfo()
#mac_object = storage.mac_address_lookup("08:86:3B:D4:90:C0")
#mac_object = storage.mac_address_lookup("6C:F3:73:E6:0A:11")
mac_object = storage.mac_address_lookup("9C:6C:15:97:76:04")
#print storage.mac_address_lookup("08-86-3B-D4-90-C0").mac_address
if mac_object :
print mac_object.mac_address
print mac_object.company
print mac_object.address1
print mac_object.address2
print mac_object.address3
print mac_object.country
else :
print "Error"
|
[
"="
] |
=
|
da60ebdb0c9f8856df41a22fbe7a5925c0a77927
|
a4d250ce393012dc251cb955096cb8f284c57439
|
/gunion/data/battle.py
|
2e86c5bc5fb1e0344791a01e941d7330c554ed7e
|
[] |
no_license
|
daxingyou/test-2
|
b02af312784d06a46e29acd42e756e92afee6ed9
|
a16c872ba781855a8c891eff41e8e651cd565ebf
|
refs/heads/master
| 2023-03-16T04:21:23.704482
| 2018-09-28T00:51:23
| 2018-09-28T00:51:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,188
|
py
|
#coding:utf8
"""
Created on 2016-07-28
@Author: jiangtaoran(jiangtaoran@ice-time.cn)
@Brief : 联盟战争
"""
import base64
from utils import logger
from utils import utils
from datalib.data_loader import data_loader
class UnionBattleInfo(object):
"""一场联盟战争
"""
BATTLE_STAGE_INVALID = 0 #非法
BATTLE_STAGE_IDLE = 1 #无战争
BATTLE_STAGE_PREPARE = 2 #备战阶段
BATTLE_STAGE_FIGHT = 3 #战斗阶段
BATTLE_STAGE_CLOSE = 4 #结束
__slots__ = [
"id",
"union_id",
"index",
"stage",
"rival_union_id",
"rival_battle_id",
"is_initiator", #是否战争发起方
"launch_time", #战争发起时间
"fight_time", #战争开战时间
"close_time", #战争结束时间
"finish_time", #战争生命周期终止时间,可以开始下一场战争
"is_deployed", #是否已经完成防御部署
"battle_count", #战斗的数量
"score", #胜场积分
"individuals_score", #成员战功之和
"drum",
"attack_level",
"attack_win_count_this_level", #本轮战斗中攻击胜利次数
"attack_lose_count_this_level", #本轮战斗中攻击失败次数
"defend_nodes_level", #防守方的节点level
"record_index",
"accepted_members", #大宝箱领取的记录
"accepted_names",
"accepted_icons",
"reward_items",
"reward_nums",
"accept_times"
]
def __init__(self):
self.id = 0
self.union_id = 0
self.index = 0
self.stage = UnionBattleInfo.BATTLE_STAGE_INVALID
self.rival_union_id = 0
self.rival_battle_id = 0
self.is_initiator = False
self.launch_time = 0
self.fight_time = 0
self.close_time = 0
self.finish_time = 0
self.is_deployed = False
self.battle_count = 0
self.score = 0
self.individuals_score = 0
self.drum = 0
self.attack_level = 1
self.attack_win_count_this_level = 0
self.attack_lose_count_this_level = 0
self.defend_nodes_level = ""
self.record_index = 0
@staticmethod
def generate_id(union_id, index):
id = union_id << 32 | index
return id
@staticmethod
def create(union_id, index, invalid):
battle = UnionBattleInfo()
battle.id = UnionBattleInfo.generate_id(union_id, index)
battle.union_id = union_id
battle.index = index
if invalid:
battle.stage = UnionBattleInfo.BATTLE_STAGE_INVALID #无法发起战争
else:
battle.stage = UnionBattleInfo.BATTLE_STAGE_IDLE
battle.rival_union_id = 0
battle.rival_battle_id = 0
battle.is_initiator = False
battle.launch_time = 0
battle.fight_time = 0
battle.close_time = 0
battle.finish_time = 0
battle.is_deployed = False
battle.battle_count = 0
battle.score = 0
battle.individuals_score = 0
battle.drum = 0
battle.attack_level = 1
battle.attack_win_count_this_level = 0
battle.attack_lose_count_this_level = 0
battle.defend_nodes_level = ""
battle.record_index = 0
battle.accepted_members = "" #奖励箱领取
battle.accepted_names = ""
battle.accepted_icons = ""
battle.reward_items = ""
battle.reward_nums = ""
battle.accept_times = ""
return battle
def force_update_fight_time(self, time):
"""强制改变开战时间(内部接口)
"""
assert self.stage == self.BATTLE_STAGE_PREPARE
self.fight_time = time
def force_update_close_time(self, time):
"""强制改变结束战斗时间(内部接口)
"""
assert self.stage == self.BATTLE_STAGE_FIGHT
self.close_time = time
def force_update_finish_time(self, time):
"""强制改变结束时间(内部接口)
"""
assert self.stage == self.BATTLE_STAGE_CLOSE
self.finish_time = time
def is_able_to_join(self):
"""是否可以参战
"""
return self.stage == self.BATTLE_STAGE_IDLE
def is_able_to_deploy(self):
"""是否可以部署防御
"""
return self.stage == self.BATTLE_STAGE_PREPARE
def is_able_to_drum(self):
"""是否可以擂鼓
"""
return self.stage == self.BATTLE_STAGE_FIGHT
def is_at_war(self):
"""是否在交战中
"""
return (self.stage == self.BATTLE_STAGE_PREPARE or
self.stage == self.BATTLE_STAGE_FIGHT or
self.stage == self.BATTLE_STAGE_CLOSE)
def launch(self, now, rival_union_id, rival_battle_id, initiative = True):
"""发起战争
"""
assert self.stage == self.BATTLE_STAGE_IDLE
self.stage = self.BATTLE_STAGE_PREPARE
self.rival_union_id = rival_union_id
self.rival_battle_id = rival_battle_id
self.is_initiator = initiative
self.launch_time = now
#self.fight_time = utils.get_spec_second(
# self.launch_time, "22:30") + utils.SECONDS_OF_DAY #launch time 次日22:30
#self.close_time = self.fight_time + utils.SECONDS_OF_DAY #fight time 次日22:30
#self.finish_time = utils.get_spec_second(
# self.close_time, "05:00" ) + utils.SECONDS_OF_DAY #close time 次日05:00
self.fight_time = utils.get_spec_second(self.launch_time, "21:00") #fight time 当日21:00
self.close_time = utils.get_spec_second(self.launch_time, "23:00") #close time 当日23:00
self.finish_time = utils.get_spec_second(self.launch_time, "05:00"
) + utils.SECONDS_OF_DAY #finish time 次日05:00
self.is_deployed = False
self.accepted_members = ""
self.accepted_names = ""
self.accepted_icons = ""
self.reward_items = ""
self.reward_nums = ""
self.accept_times = ""
def start_fight(self, now):
"""进入开战阶段
"""
assert self.stage == self.BATTLE_STAGE_PREPARE
self.stage = self.BATTLE_STAGE_FIGHT
self.is_deployed = True
self.attack_level = 1
def is_fight_closed(self, now):
"""战斗结算是否结束
"""
return self.launch_time != 0 and now >= self.close_time
def close_fight(self):
"""战争结束
"""
#assert self.stage == self.BATTLE_STAGE_FIGHT
self.stage = self.BATTLE_STAGE_CLOSE
def is_finished(self, now):
"""战争是否结束
"""
return self.launch_time != 0 and now >= self.finish_time and now >= self.close_time
def is_able_to_start(self):
"""是否可以开战
"""
return self.stage == self.BATTLE_STAGE_FIGHT
def beat_drum(self, value = 1):
"""擂鼓
"""
assert value >= 0
self.drum += value
def get_attack_buff_count(self):
"""获取当前攻击 buff 加成
"""
drum_ratio = int(float(
data_loader.UnionConfInfo_dict["attack_buff_count_per_drum"].value))
lose_ratio = int(float(
data_loader.UnionConfInfo_dict["attack_buff_count_per_lose"].value))
return self.drum * drum_ratio + self.attack_lose_count_this_level * lose_ratio
def get_attack_buff_temporary_count(self):
"""获取当前轮次临时攻击 buff 加成
"""
lose_ratio = int(float(
data_loader.UnionConfInfo_dict["attack_buff_count_per_lose"].value))
return self.attack_lose_count_this_level * lose_ratio
def mark_attack_result(self, win):
"""记录攻击结果
"""
if win:
self.attack_win_count_this_level += 1
#else:
# self.attack_lose_count_this_level += 1
#攻击进入下一轮
count = int(float(data_loader.UnionConfInfo_dict["battle_map_node_count"].value))
if self.attack_win_count_this_level >= count:
self.attack_level += 1
self.attack_win_count_this_level = 0
self.attack_lose_count_this_level = 0
def gain_union_score(self, value = 1):
"""增加联盟胜场积分
"""
assert value >= 0
self.score += value
def gain_individuals_score(self, value):
"""增加成员战功点数
"""
assert value >= 0
self.individuals_score += value
def get_next_record_index(self):
"""获取下一个战斗记录 index
"""
self.record_index += 1
return self.record_index
def is_able_to_accept_box(self):
"""是否可以领取大宝箱/
"""
if self.stage != self.BATTLE_STAGE_CLOSE:
return False
level = int(float(data_loader.UnionConfInfo_dict["battle_map_total_level"].value))
count = int(float(data_loader.UnionConfInfo_dict["battle_map_node_count"].value))
if level * count > self.score:
return False
return True
def get_accepted_members(self):
"""获取领取过奖励的成员"""
return utils.split_to_int(self.accepted_members)
def get_reward_record(self):
"""获取奖励领取记录"""
members = utils.split_to_int(self.accepted_members)
names = utils.split_to_string(self.accepted_names)
icons = utils.split_to_int(self.accepted_icons)
items_id = utils.split_to_int(self.reward_items)
items_num = utils.split_to_int(self.reward_nums)
times = utils.split_to_int(self.accept_times)
names = [base64.b64decode(name) for name in names]
return map(None, members, names, icons, items_id, items_num, times)
def add_reward_record(self, user_id, user_name, icon_id, item_id, item_num, now):
"""添加领奖记录"""
members = utils.split_to_int(self.accepted_members)
names = utils.split_to_string(self.accepted_names)
icons = utils.split_to_int(self.accepted_icons)
items_id = utils.split_to_int(self.reward_items)
items_num = utils.split_to_int(self.reward_nums)
times = utils.split_to_int(self.accept_times)
members.append(user_id)
names.append(user_name)
icons.append(icon_id)
items_id.append(item_id)
items_num.append(item_num)
times.append(now)
self.accepted_members = utils.join_to_string(members)
self.accepted_names = utils.join_to_string(names)
self.accepted_icons = utils.join_to_string(icons)
self.reward_items = utils.join_to_string(items_id)
self.reward_nums = utils.join_to_string(items_num)
self.accept_times = utils.join_to_string(times)
|
[
"luhongwei1@ice-time.cn"
] |
luhongwei1@ice-time.cn
|
d8db374595ea9b2c375ef52a5364a9fa9f258336
|
626a08cf643775368dff313e1049ba55d559e986
|
/libs/metrics/type_composite_index.py
|
01973da142755b0460a52ac3eddbd4907af9cfba
|
[] |
no_license
|
NashLea/SecuritiesAnalysisTools
|
3fd995a4d4d714cff81cd60cb6f885880c175d19
|
3fd5ae12714f56efd5dc395ae7a1e5acc7778aba
|
refs/heads/master
| 2022-11-06T06:43:09.399530
| 2020-06-20T03:27:22
| 2020-06-20T03:27:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,056
|
py
|
import os
import json
import pandas as pd
import numpy as np
from libs.tools import cluster_oscs
from libs.tools import windowed_moving_avg
from libs.utils import download_data_indexes
from libs.utils import dual_plotting, generic_plotting
from libs.utils import ProgressBar, index_appender
from libs.utils import STANDARD_COLORS
ERROR_COLOR = STANDARD_COLORS["error"]
WARNING = STANDARD_COLORS["warning"]
NORMAL = STANDARD_COLORS["normal"]
def type_composite_index(**kwargs) -> list:
"""Type Composite Index (MCI)
Similar to MCI, TCI compares broader market types (sensitive, cyclical, and defensive)
Optional Args:
config {dict} -- controlling config dictionary (default: {None})
plot_output {bool} -- True to render plot in realtime (default: {True})
period {str / list} -- time period for data (e.g. '2y') (default: {None})
clock {float} -- time for prog_bar (default: {None})
data {pd.DataFrame} -- fund datasets (default: {None})
sectors {list} -- list of sectors (default: {None})
returns:
list -- dict contains all tci information, data, sectors
"""
config = kwargs.get('config')
period = kwargs.get('period')
plot_output = kwargs.get('plot_output', True)
clock = kwargs.get('clock')
data = kwargs.get('data')
sectors = kwargs.get('sectors')
if config is not None:
period = config['period']
properties = config['properties']
elif period is None:
print(
f"{ERROR_COLOR}ERROR: config and period both provided {period} " +
f"for type_composite_index{NORMAL}")
return {}
else:
# Support for release 1 versions
period = period
properties = dict()
properties['Indexes'] = {}
properties['Indexes']['Type Sector'] = True
# Validate each index key is set to True in the --core file
if properties is not None:
if 'Indexes' in properties.keys():
props = properties['Indexes']
if 'Type Sector' in props.keys():
if props['Type Sector'] == True:
m_data = get_metrics_content()
if data is None or sectors is None:
data, sectors = metrics_initializer(
m_data, period='2y')
if data:
p = ProgressBar(
19, name='Type Composite Index', offset=clock)
p.start()
tci = dict()
composite = {}
for sect in sectors:
cluster = cluster_oscs(
data[sect],
plot_output=False,
function='market',
wma=False,
progress_bar=p
)
graph = cluster['tabular']
composite[sect] = graph
defensive = type_composites(
composite, m_data, type_type='Defensive')
p.uptick()
sensitive = type_composites(
composite, m_data, type_type='Sensitive')
p.uptick()
cyclical = type_composites(
composite, m_data, type_type='Cyclical')
p.uptick()
d_val = weighted_signals(
data, m_data, type_type='Defensive')
p.uptick()
s_val = weighted_signals(
data, m_data, type_type='Sensitive')
p.uptick()
c_val = weighted_signals(
data, m_data, type_type='Cyclical')
p.uptick()
d_val = windowed_moving_avg(d_val, 3, data_type='list')
c_val = windowed_moving_avg(c_val, 3, data_type='list')
s_val = windowed_moving_avg(s_val, 3, data_type='list')
p.uptick()
tci['defensive'] = {
"tabular": d_val,
"clusters": defensive
}
tci['sensitive'] = {
"tabular": s_val,
"clusters": sensitive
}
tci['cyclical'] = {
"tabular": c_val,
"clusters": cyclical
}
dates = data['VGT'].index
if plot_output:
dual_plotting(y1=d_val, y2=defensive, y1_label='Defensive Index',
y2_label='Clustered Osc', title='Defensive Index', x=dates)
dual_plotting(y1=s_val, y2=sensitive, y1_label='Sensitive Index',
y2_label='Clustered Osc', title='Sensitive Index', x=dates)
dual_plotting(y1=c_val, y2=cyclical, y1_label='Cyclical Index',
y2_label='Clustered Osc', title='Cyclical Index', x=dates)
generic_plotting([d_val, s_val, c_val], legend=[
'Defensive', 'Sensitive', 'Cyclical'], title='Type Indexes', x=dates)
else:
generic_plotting(
[d_val, s_val, c_val],
legend=['Defensive', 'Sensitive', 'Cyclical'],
title='Type Indexes',
x=dates,
saveFig=True,
ylabel='Normalized "Price"',
filename='tci.png'
)
p.end()
return tci, data, sectors
return {}, None, None
def metrics_initializer(m_data: dict, period='2y'):
"""Metrics Initializer
Keyword Arguments:
period {str} -- (default: {'2y'})
Returns:
list -- downloaded_data, sector_list, index, metrics_file data
"""
sectors = m_data['Components']
tickers = " ".join(sectors)
tickers = index_appender(tickers)
all_tickers = tickers.split(' ')
if isinstance(period, (list)):
period = period[0]
# tickers = index_appender(tickers)
print(" ")
print(f'Fetching Type Composite Index funds for {period}...')
data, _ = download_data_indexes(
indexes=sectors, tickers=all_tickers, period=period, interval='1d')
print(" ")
return data, sectors
def get_metrics_content() -> dict:
"""Get Metrics Content
Returns:
dict -- metrics file data
"""
metrics_file = os.path.join("resources", "sectors.json")
if not os.path.exists(metrics_file):
print(
f"{WARNING}WARNING: '{metrics_file}' not found for " +
f"'metrics_initializer'. Failed.{NORMAL}")
return None, [], None
with open(metrics_file) as m_file:
m_data = json.load(m_file)
m_file.close()
m_data = m_data.get("Type_Composite")
return m_data
def type_composites(composite: dict, m_data: dict, type_type='Defensive') -> list:
"""Type Composites
Create the summed clustered composites
Arguments:
composite {dict} -- composite dictionary
m_data {dict} -- data from sectors.json
Keyword Arguments:
type_type {str} -- key for each m_data (default: {'Defensive'})
Returns:
list -- summed list of composites
"""
sector_data = m_data[type_type]
start_key = list(sector_data.keys())[0]
new_composite = []
for i in range(len(composite[start_key])):
value = 0.0
for fund in sector_data:
value += float(composite[fund][i]) * sector_data[fund]
new_composite.append(value)
return new_composite
def weighted_signals(data: dict, m_data: dict, type_type='Defensive') -> list:
"""Weighted Signals
Arguments:
data {dict} -- tci data object
m_data {dict} -- sectors.json content
Keyword Arguments:
type_type {str} -- (default: {'Defensive'})
Returns:
list -- weighted signal
"""
sector_data = m_data[type_type]
start_key = list(sector_data.keys())[0]
new_composite = [25.0]
for i in range(1, len(data[start_key]['Close'])):
value = 0.0
for fund in sector_data:
value += (data[fund]['Close'][i] - data[fund]['Close'][i-1]) /\
data[fund]['Close'][i-1] * sector_data[fund]
value = new_composite[-1] * (1.0 + value)
new_composite.append(value)
return new_composite
|
[
"ngamell@mmm.com"
] |
ngamell@mmm.com
|
87a0d04e73c54c1e0daef6dcf0e338c6af43be21
|
ef187d259d33e97c7b9ed07dfbf065cec3e41f59
|
/work/atcoder/abc/abc024/B/answers/111654_Gale.py
|
b31d17de7f8e5d4c0d019d4cbf95c0c6f7e11513
|
[] |
no_license
|
kjnh10/pcw
|
847f7295ea3174490485ffe14ce4cdea0931c032
|
8f677701bce15517fb9362cc5b596644da62dca8
|
refs/heads/master
| 2020-03-18T09:54:23.442772
| 2018-07-19T00:26:09
| 2018-07-19T00:26:09
| 134,586,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
n, t = map(int, input().split())
a = [int(input()) for i in range(n)]
ans = t
for i in range(1, n):
ans += t
if a[i] <= a[i - 1] + t:
ans = ans - (a[i - 1] + t - a[i])
print(ans)
|
[
"kojinho10@gmail.com"
] |
kojinho10@gmail.com
|
0b583e86f97c1a537be2b27d6980f3a3dd93df1a
|
528c811306faa4a34bf51fca7955b7a24ac2e30c
|
/Python/Valid Anagram.py
|
263508830b33b30fd769bcad02fa5dbf91901f61
|
[] |
no_license
|
ganjingcatherine/LeetCode-1
|
1addbd7e4d9254a146601f9d5e28b8becb8235a6
|
488782d3f1e759da2d32b4e82dbf55b96c431244
|
refs/heads/master
| 2021-05-11T03:15:16.810035
| 2016-02-06T06:19:18
| 2016-02-06T06:19:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
"""
Given two strings s and t, write a function to determine if t is an anagram of s.
For example,
s = "anagram", t = "nagaram", return true.
s = "rat", t = "car", return false.
Note:
You may assume the string contains only lowercase alphabets.
"""
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
if len(s) != len(t): return False
table = {}
for i in xrange(len(s)):
if s[i] not in table:
table[s[i]] = 1
else:
table[s[i]] += 1
for i in xrange(len(t)):
if t[i] in table and table[t[i]] > 0:
table[t[i]] -= 1
else:
return False
return True
|
[
"anthonyjin0619@gmail.com"
] |
anthonyjin0619@gmail.com
|
9d101de5dc8616d67f19ec37db6ac2e7ed86d8b1
|
fde2a3a4858b37cafcd02cf917d3bd69680084b3
|
/Spacegame/Spacegame/scoreboard.py
|
7cb820bd2744867f4d0c0ffc58127b56a0c6463a
|
[] |
no_license
|
CaptainBlowFish/AlienInvasion
|
34b1e013e97c8f2a69aa82e9e51786ffc5e2f78f
|
6ec7f399bbaa92901ad8b015047bc6ebe6dc708e
|
refs/heads/main
| 2023-08-29T20:04:38.130623
| 2021-11-12T13:45:19
| 2021-11-12T13:45:19
| 422,559,716
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,082
|
py
|
from pygame import font
from pygame.sprite import Group
from ship import Ship
class Scoreboard:
"""A class to report scoreings information"""
def __init__(self, ai_game):
"""Initialize scorekeeping attributes"""
self.ai_game = ai_game
self.screen = ai_game.screen
self.screen_rect = self.screen.get_rect()
self.settings = ai_game.settings
self.stats = ai_game.stats
# Font settings for scoring information
self.text_color = (30,30,30)
self.font = font.SysFont(None, 48)
# Prepare the initial score image
self.prep_score()
self.prep_high_score()
self.prep_level()
self.ships = Group()
def prep_score(self):
"""Turn the score into a rendered image"""
rounded_score = round(self.stats.score, -1)
score_str = "{:,}".format(rounded_score)
self.score_image = self.font.render(score_str, True, self.text_color,None)
# Display the se at the top right of the screen
self.score_rect = self.score_image.get_rect()
self.score_rect.right = self.screen_rect.right - 20
self.score_rect.top = 20
def prep_level(self):
"""Turn the level into a rendered image."""
level_str = str(self.stats.level)
self.level_image = self.font.render(level_str, True, self.text_color, None)
# Position the level below the score
self.level_rect = self.level_image.get_rect()
self.level_rect.right = self.score_rect.right
self.level_rect.top = self.score_rect.bottom + 10
def prep_high_score(self):
"""Turn the high score into a rendered image"""
rounded_high_score = round(self.stats.high_score, -1)
high_score_str = "{:,}".format(rounded_high_score)
self.high_score_image = self.font.render(high_score_str, True, self.text_color,None)
# Display the score at the top of the screen
self.high_score_rect = self.high_score_image.get_rect()
self.high_score_rect.right = self.screen_rect.centerx
self.high_score_rect.top = self.score_rect.top
def prep_ships(self):
"""Show howmany ships are left"""
self.ships.empty()
for ship_number in range(self.stats.ships_left):
ship = Ship(self.ai_game)
ship.rect.x = 10 + ship_number * ship.rect.width
ship.rect.y = 10
self.ships.add(ship)
def show_score(self):
"""Draw the score to the screen."""
self.screen.blit(self.score_image, self.score_rect)
self.screen.blit(self.high_score_image, self.high_score_rect)
self.screen.blit(self.level_image, self.level_rect)
self.ships.draw(self.screen)
def check_high_score(self):
"""Check to see if there's a new high score."""
if self.stats.score > self.stats.high_score:
self.stats.high_score = self.stats.score
self.prep_high_score()
|
[
"noreply@github.com"
] |
noreply@github.com
|
626be54fe2c402a3a685abc6d8479c10ea8a75aa
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/CalibMuon/RPCCalibration/python/l1MuonOutputModule_cfi.py
|
6dbdc357f06e53ed7641a5fc49576123b5f1a25e
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 419
|
py
|
import FWCore.ParameterSet.Config as cms
from CalibMuon.RPCCalibration.l1Muon_EventContent_cff import *
L1MuonEventContent = cms.OutputModule("PoolOutputModule",
l1Muon_EventContent,
l1MuonEventSelection,
datasets = cms.untracked.PSet(
filterName = cms.untracked.string('l1Muon_Filter'),
dataTier = cms.untracked.string('USER')
),
fileName = cms.untracked.string('l1Muon.root')
)
|
[
"giulio.eulisse@gmail.com"
] |
giulio.eulisse@gmail.com
|
b54fd0bc290b3f5a82c4cad6ff829f7b399573f4
|
ded81a7568fe04f3227562cc5f67ffc675617cc0
|
/cheer_app/migrations/0002_comment.py
|
a7803e53c60185ed5d941b24bfcce9f91293cac8
|
[] |
no_license
|
shin04/cheer
|
3e220afc1fb0a4329ff7c16bd4823da1c09ee0a9
|
da39bbc584350c0ac89c23dbbfaf1c96ab9148fd
|
refs/heads/master
| 2020-07-02T16:07:44.280390
| 2020-05-20T11:13:03
| 2020-05-20T11:13:03
| 183,242,194
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 915
|
py
|
# Generated by Django 2.2 on 2019-08-05 04:29
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('cheer_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved_comment', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='cheer_app.Post')),
],
),
]
|
[
"daikon0413@gmail.com"
] |
daikon0413@gmail.com
|
e64030f4bfdc9f2ecd066eaf1ad8e5e2b067c849
|
e0eb81aef84ee0929aa3dfc166f29a343251c35b
|
/seafile-pro-server-7.0.10/pro/python/seafevents/tests/conftest.py
|
eadb20a149347ce0520fe528caab3c2e5768a7bc
|
[] |
no_license
|
Sandra-Z/filesharing
|
414fc56abe2f87b80ea390e0814a0bf86148a2bf
|
0e4e637f0c78f96949796b480b51df72d859c4ff
|
refs/heads/master
| 2022-12-11T12:28:07.155281
| 2019-11-19T08:39:51
| 2019-11-19T08:39:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,559
|
py
|
import os
import sys
import ConfigParser
import subprocess
from sqlalchemy import create_engine, text
from sqlalchemy.exc import DisconnectionError
from sqlalchemy.event import contains as has_event_listener, listen as add_event_listener
from urllib import quote_plus
from pytest import yield_fixture
from sqlalchemy.pool import Pool
from sqlalchemy.orm import sessionmaker
SEAHUB_DBNAME = ''
SEAFEVENTS_DBNAME = ''
TEST_DBNAME = ''
@yield_fixture(scope="module")
def test_db():
delete_all_table_if_exists()
# copy_db_from_seahub_with_no_data()
# copy_db_from_seafevent_with_no_data()
apply_tables()
yield None
# delete_all_table_if_exists()
def generate_tables_sql():
seahub_db = read_db_conf('SEAHUBDB')
seafevents_db = read_db_conf('SEAFEVENTSDB')
connection_data = [seahub_db[0]]
connection_data.extend(seahub_db[2:])
connection_data = tuple(connection_data)
cmd = "mysqldump -h%s -u%s -p%s --skip-add-locks --no-data --skip-add-drop-table --skip-comments %s > seahub.sql" % connection_data
cwd = ["bash", "-c", cmd]
subprocess.check_call(cwd, stdout=None, stderr=None)
connection_data = [seafevents_db[0]]
connection_data.extend(seafevents_db[2:])
connection_data = tuple(connection_data)
cmd = "mysqldump -h%s -u%s -p%s --skip-add-locks --no-data --skip-add-drop-table --skip-comments %s > seafevents.sql" % connection_data
cwd = ["bash", "-c", cmd]
subprocess.check_call(cwd, stdout=None, stderr=None)
merge_sql_file('raw_table_sql.sql')
def merge_sql_file(filename):
with open(filename, 'w') as fp:
for fname in ['seahub.sql', 'seafevents.sql']:
with open(fname) as tfp:
fp.write(tfp.read())
fp.write('\n')
def apply_tables():
seafevents_db = read_db_conf('TESTDB')
full_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'raw_table_sql.sql')
cmd = "mysql -h %s -u%s -p%s %s < %s" % (seafevents_db[0], seafevents_db[2], seafevents_db[3], seafevents_db[4], full_path)
cwd = ["bash", "-c", cmd]
try:
subprocess.check_call(cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except Exception as e:
print e.output
def delete_all_table_if_exists():
session = None
try:
session = get_db_session('TESTDB')
session = get_db_session('TESTDB')
sql = text('SET FOREIGN_KEY_CHECKS = 0;')
session.execute(sql)
sql = text('SELECT table_name FROM information_schema.tables where table_schema= :db;')
tables = session.execute(sql, {'db': TEST_DBNAME}).fetchall()
if tables:
for tablename in tables:
del_sql = text('drop table %s' % tablename[0])
session.execute(del_sql)
sql = text('SET FOREIGN_KEY_CHECKS = 1;')
session.execute(sql)
except Exception as e:
sys.stdout.write(str(e))
finally:
if session:
session.close()
def copy_db_from_seahub_with_no_data():
test_session = None
seahub_session = None
try:
test_session = get_db_session('TESTDB')
seahub_session = get_db_session('SEAHUBDB')
sql = text('SELECT table_name FROM information_schema.tables where table_schema= :db')
tables = seahub_session.execute(sql, {'db': SEAHUB_DBNAME}).fetchall()
if tables:
for t_name in tables:
create_sql = text('create table %s like %s' % (t_name[0], "{0}.{1}".format(SEAHUB_DBNAME, t_name[0])))
test_session.execute(create_sql)
except Exception as e:
sys.stdout.write(str(e))
finally:
if seahub_session:
seahub_session.close()
if test_session:
test_session.close()
def copy_db_from_seafevent_with_no_data():
test_session = None
seahub_session = None
try:
test_session = get_db_session('TESTDB')
seahub_session = get_db_session('SEAFEVENTSDB')
sql = text('SELECT table_name FROM information_schema.tables where table_schema= :db')
tables = seahub_session.execute(sql, {'db': SEAFEVENTS_DBNAME}).fetchall()
if tables:
for t_name in tables:
create_sql = text('create table %s like %s' % (t_name[0], "{0}.{1}".format(SEAFEVENTS_DBNAME, t_name[0])))
test_session.execute(create_sql)
except Exception as e:
sys.stdout.write(str(e))
finally:
if seahub_session:
seahub_session.close()
if test_session:
test_session.close()
def get_db_session(section):
config = ConfigParser.ConfigParser()
config.read('./db.cnf')
if not config.has_section(section):
sys.stdout.write("no section: %s" % section)
return
host, port, username, passwd, dbname = read_db_conf(section)
db_url = "mysql+mysqldb://%s:%s@%s:%s/%s?charset=utf8" % (username, quote_plus(passwd), host, port, dbname)
global SEAHUB_DBNAME, SEAFEVENTS_DBNAME, TEST_DBNAME
if section == 'TESTDB':
TEST_DBNAME = dbname
elif section == 'SEAFEVENTSDB':
SEAFEVENTS_DBNAME = dbname
elif section == 'SEAHUBDB':
SEAHUB_DBNAME = dbname
kwargs = dict(pool_recycle=300, echo=False, echo_pool=False)
engine = create_engine(db_url, **kwargs)
if not has_event_listener(Pool, 'checkout', ping_connection):
add_event_listener(Pool, 'checkout', ping_connection)
Session = sessionmaker(bind=engine)
return Session()
def read_db_conf(section):
config = ConfigParser.ConfigParser()
config.read('./db.cnf')
if not config.has_section(section):
sys.stdout.write("no section: %s" % section)
return
if config.has_option(section, 'host'):
host = config.get(section, 'host').lower()
else:
host = 'localhost'
if config.has_option(section, 'port'):
port = config.getint(section, 'port')
else:
port = 3306
username = config.get(section, 'username')
passwd = config.get(section, 'password')
dbname = config.get(section, 'name')
return (host, port, username, passwd, dbname)
def ping_connection(dbapi_connection, connection_record, connection_proxy): # pylint: disable=unused-argument
cursor = dbapi_connection.cursor()
try:
cursor.execute("SELECT 1")
cursor.close()
except:
connection_proxy._pool.dispose() # pylint: disable=protected-access
# Raise DisconnectionError so the pool would create a new connection
raise DisconnectionError()
|
[
"deutschland.gray@gmail.com"
] |
deutschland.gray@gmail.com
|
4d6b4640228f33cec47a54841e46353e8e26d85d
|
94f4b8a12cc09e3056cfc8d5304e5937b33ea6ec
|
/StemmingGUI.py
|
df3b3186cf7d4574b4fdb63f3d24f57296236b3b
|
[] |
no_license
|
Ehtisham09/Machine-Learning-Projects
|
8f1aaa7489bd84333491dbab8432cc76ed62915a
|
dd09308e555cc7aee0db74d91af6f5140e41d689
|
refs/heads/master
| 2020-07-13T03:12:27.513696
| 2019-10-20T18:25:43
| 2019-10-20T18:25:43
| 204,975,654
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,634
|
py
|
from tkinter import *
from tkinter import filedialog
from tkinter import messagebox
import tkinter.scrolledtext as tkscrolled
root = Tk()
root.title("Steming")
root.geometry("1160x600")
root.resizable(0,0)
def showOriginalText():
pass
def showStopWords():
pass
def showUniqueWords():
pass
def showInfix():
pass
def showPrefix():
pass
def showPostfix():
pass
def customization():
pass
def showPostProcessing():
pass
tabbuttons = Frame(root)
b1 = Button(tabbuttons,text="Original Text", command=showOriginalText, height=1, width=20)
b1.grid(row=1, column=0)
b2 = Button(tabbuttons,text="Stop Words", command=showStopWords, height=1, width=20)
b2.grid(row=1, column=1)
b3 = Button(tabbuttons,text="Unique Words", command=showUniqueWords, height=1, width=20)
b3.grid(row=1, column=2)
b4 = Button(tabbuttons,text="Prefix", command=showPrefix, height=1, width=20)
b4.grid(row=1, column=3)
b5 = Button(tabbuttons,text="Postfix", command=showPostfix, height=1, width=20)
b5.grid(row=1, column=4)
b6 = Button(tabbuttons,text="Post-Processing", command=showPostProcessing, height=1, width=20)
b6.grid(row=1, column=5)
b7 = Button(tabbuttons,text="Infix", command=showInfix, height=1, width=20)
b7.grid(row=1, column=6)
tabbuttons.grid(row=1, pady=(30,0))
textbox = tkscrolled.ScrolledText(root, height=20, width=132)
textbox.grid(row=2, pady=(0,20), padx=50)
def InputFile(): # Function For Browsing File
root.filename = filedialog.askopenfilename(title = "Select File", filetypes = [('Text files', '*.txt')])
f = open(root.filename,encoding="utf8")
content = f.read()
# data = str(content)
print(type(content))
textbox.insert(INSERT,content)
# print(content)
# data = content.split("۔")
def stemData():
pass
buttons = Frame(root) # Three Main Buttons Frame
clear = Button(buttons, text= "Clear" , command= lambda: textbox.delete(1.0,END), height=2, width=20) # Clear Button
browsebutton = Button(buttons, text ="Browse From Computer",command = InputFile, height=2) # Browse Button
browsebutton.grid(row=3,column=1, padx=4)
clear.grid(row=3,column=2, padx=4)
buttons.grid()
submitButton = Button(root, text="Stemming", command= stemData, width= 20, height= 2, bg = "yellow", font='bold') # Submit Button
submitButton.config(font=("Calibri", 15))
submitButton.grid(pady=(20,15))
root.mainloop()
|
[
"noreply@github.com"
] |
noreply@github.com
|
85b871b51e585a914eea3800d452e2101a966e14
|
e211fdfc6fe8b79840409f7e2a2ee5e738bf9393
|
/main/migrations/0002_wishlist.py
|
4fcac7a1a83bc649d9aa74f9fdd14ab4a171ac52
|
[] |
no_license
|
Kuuhaku11/wishlist
|
ec28416c628d1df2c1e4a4f2ec8f767e255c1d3f
|
c97346c30c364da30d224edccf87a548b396a24c
|
refs/heads/master
| 2023-08-14T23:18:30.906645
| 2021-09-13T19:04:40
| 2021-09-13T19:04:40
| 404,098,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 897
|
py
|
# Generated by Django 3.2.7 on 2021-09-10 12:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Wishlist',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120)),
('is_hidden', models.BooleanField(default=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('product', models.ManyToManyField(to='main.Product')),
],
),
]
|
[
"kuuhaku112121@gmail.com"
] |
kuuhaku112121@gmail.com
|
d090e5080697eb9ddc699d37c4656032fc8ef74a
|
31f85926c1bbafdb0621a43b320f48be2a1090ff
|
/matrix-cuda/extract_gpu.py
|
315584be16bc7aedece6334b2b66eb32c6cc9b13
|
[] |
no_license
|
subratpp/jetson_yolo
|
f1c3e32812c0c9c65883a4d58b817f5c0bdcc833
|
7e4c0edb55a70353a86e733914819077903b3f00
|
refs/heads/main
| 2023-03-03T10:28:01.331540
| 2021-02-12T17:17:22
| 2021-02-12T17:17:22
| 337,465,387
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
import re
def gpu_workload(filename):
max_use = 0
with open(filename) as fp:
lines = fp.readlines()
for line in lines:
gpu = re.search(r'GR3D_FREQ (.*?)%', line).group(1)
if int(gpu) > max_use:
max_use = float(gpu)
return max_use
print(gpu_worklaod("matmul1000.txt"))
|
[
"subratprasad.mail@gmail.com"
] |
subratprasad.mail@gmail.com
|
9346b461b869d42f8809bb42ec48f7438a393149
|
de8e4b8b43cbf1374dd65a028c3e85951a21a11f
|
/fast-exps/lib/models/new_prop_prototype.py
|
02024f940aa1dda7cd534e8ffcd8a261a8f533e6
|
[] |
no_license
|
tcwltcwl/URT
|
626a94d7ad94c712a25602ef30cefb61ff959229
|
edc551f286ac3b0726370db70db7d6b3b0359f36
|
refs/heads/master
| 2023-04-14T04:30:35.526937
| 2021-04-21T06:48:49
| 2021-04-21T06:48:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,673
|
py
|
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import random, math
# TODO: integrate the two functions into the following codes
def get_dotproduct_score(proto, cache, model):
proto_emb = model['linear_q'](proto)
s_cache_emb = model['linear_k'](cache)
raw_score = F.cosine_similarity(proto_emb.unsqueeze(1), s_cache_emb.unsqueeze(0), dim=-1)
return raw_score
def get_mlp_score(proto, cache, model):
n_proto, fea_dim = proto.shape
n_cache, fea_dim = cache.shape
raw_score = model['w']( model['nonlinear'](model['w1'](proto).view(n_proto, 1, fea_dim) + model['w2'](cache).view(1, n_cache, fea_dim) ) )
return raw_score.squeeze(-1)
# this model does not need query, only key and value
class MultiHeadURT_value(nn.Module):
def __init__(self, fea_dim, hid_dim, temp=1, n_head=1):
super(MultiHeadURT_value, self).__init__()
self.w1 = nn.Linear(fea_dim, hid_dim)
self.w2 = nn.Linear(hid_dim, n_head)
self.temp = temp
def forward(self, cat_proto):
# cat_proto n_class*8*512
n_class, n_extractors, fea_dim = cat_proto.shape
raw_score = self.w2(self.w1(cat_proto)) # n_class*8*n_head
score = F.softmax(self.temp * raw_score, dim=1)
return score
class URTPropagation(nn.Module):
def __init__(self, key_dim, query_dim, hid_dim, temp=1, att="cosine"):
super(URTPropagation, self).__init__()
self.linear_q = nn.Linear(query_dim, hid_dim, bias=True)
self.linear_k = nn.Linear(key_dim, hid_dim, bias=True)
#self.linear_v_w = nn.Parameter(torch.rand(8, key_dim, key_dim))
self.linear_v_w = nn.Parameter( torch.eye(key_dim).unsqueeze(0).repeat(8,1,1))
self.temp = temp
self.att = att
# how different the init is
for m in self.modules():
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.001)
def forward_transform(self, samples):
bs, n_extractors, fea_dim = samples.shape
'''
if self.training:
w_trans = torch.nn.functional.gumbel_softmax(self.linear_v_w, tau=10, hard=True)
else:
# y_soft = torch.softmax(self.linear_v_w, -1)
# index = y_soft.max(-1, keepdim=True)[1]
index = self.linear_v_w.max(-1, keepdim=True)[1]
y_hard = torch.zeros_like(y_soft, memory_format=torch.legacy_contiguous_format).scatter_(-1, index, 1.0)
w_trans = y_hard
# w_trans = y_hard - y_soft.detach() + y_soft
'''
w_trans = self.linear_v_w
# compute regularization
regularization = w_trans @ torch.transpose(w_trans, 1, 2)
samples = samples.view(bs, n_extractors, fea_dim, 1)
w_trans = w_trans.view(1, 8, fea_dim, fea_dim)
return torch.matmul(w_trans, samples).view(bs, n_extractors, fea_dim), (regularization**2).sum()
def forward(self, cat_proto):
# cat_proto n_class*8*512
# return: n_class*8
n_class, n_extractors, fea_dim = cat_proto.shape
q = cat_proto.view(n_class, -1) # n_class * 8_512
k = cat_proto # n_class * 8 * 512
q_emb = self.linear_q(q) # n_class * hid_dim
k_emb = self.linear_k(k) # n_class * 8 * hid_dim | 8 * hid_dim
if self.att == "cosine":
raw_score = F.cosine_similarity(q_emb.view(n_class, 1, -1), k_emb.view(n_class, n_extractors, -1), dim=-1)
elif self.att == "dotproduct":
raw_score = torch.sum( q_emb.view(n_class, 1, -1) * k_emb.view(n_class, n_extractors, -1), dim=-1 ) / (math.sqrt(fea_dim))
else:
raise ValueError('invalid att type : {:}'.format(self.att))
score = F.softmax(self.temp * raw_score, dim=1)
return score
class MultiHeadURT(nn.Module):
def __init__(self, key_dim, query_dim, hid_dim, temp=1, att="cosine", n_head=1):
super(MultiHeadURT, self).__init__()
layers = []
for _ in range(n_head):
layer = URTPropagation(key_dim, query_dim, hid_dim, temp, att)
layers.append(layer)
self.layers = nn.ModuleList(layers)
def forward(self, cat_proto):
score_lst = []
for i, layer in enumerate(self.layers):
score = layer(cat_proto)
score_lst.append(score)
# n_class * n_extractor * n_head
return torch.stack(score_lst, dim=-1)
def get_lambda_urt_sample(context_features, context_labels, target_features, num_labels, model, normalize=True):
if normalize:
context_features = F.normalize(context_features, dim=-1)
target_features = F.normalize(target_features, dim=-1)
score_context, urt_context = model(context_features)
score_target, urt_target = model(target_features)
proto_list = []
for label in range(num_labels):
proto = urt_context[context_labels == label].mean(dim=0)
proto_list.append(proto)
urt_proto = torch.stack(proto_list)
# n_samples*8*512
return score_context, urt_proto, score_target, urt_target
def get_lambda_urt_avg(context_features, context_labels, num_labels, model, normalize=True):
if normalize:
context_features = F.normalize(context_features, dim=-1)
proto_list = []
for label in range(num_labels):
proto = context_features[context_labels == label].mean(dim=0)
proto_list.append(proto)
proto = torch.stack(proto_list)
# n_class*8*512
score_proto = model(proto)
# n_extractors * n_head
return torch.mean(score_proto, dim=0)
def apply_urt_avg_selection(context_features, selection_params, normalize, value="sum", transform=None):
selection_params = torch.transpose(selection_params, 0, 1) # n_head * 8
n_samples, n_extractors, fea_dim = context_features.shape
urt_fea_lst = []
if normalize:
context_features = F.normalize(context_features, dim=-1)
regularization_losses = []
for i, params in enumerate(selection_params):
# class-wise lambda
if transform:
trans_features, reg_loss = transform.module.layers[i].forward_transform(context_features)
regularization_losses.append(reg_loss)
else:
trans_features = context_features
if value == "sum":
urt_features = torch.sum(params.view(1,n_extractors,1) * trans_features, dim=1) # n_sample * 512
elif value == "cat":
urt_features = params.view(1,n_extractors,1) * trans_features # n_sample * 8 * 512
urt_fea_lst.append(urt_features)
if len(regularization_losses) == 0:
return torch.stack( urt_fea_lst, dim=1 ).view(n_samples, -1) # n_sample * (n_head * 512) or n_sample * (8 * 512)
else:
return torch.stack( urt_fea_lst, dim=1 ).view(n_samples, -1), sum(regularization_losses)
def apply_urt_selection(context_features, context_labels, selection_params, normalize):
# class-wise lambda
if normalize:
context_features = F.normalize(context_features, dim=-1)
lambda_lst = []
for lab in context_labels:
lambda_lst.append(selection_params[lab])
lambda_tensor = torch.stack(lambda_lst, dim=0)
n_sample, n_extractors = lambda_tensor.shape
urt_features = torch.sum(lambda_tensor.view(n_sample, n_extractors, 1) * context_features, dim=1)
return urt_features
class PropagationLayer(nn.Module):
def __init__(self, input_dim=512, hid_dim=128, temp=1, transform=False):
super(PropagationLayer, self).__init__()
self.linear_q = nn.Linear(input_dim, hid_dim, bias=False)
self.linear_k = nn.Linear(input_dim, hid_dim, bias=False)
self.temp = temp
if transform:
self.transform = nn.Linear(input_dim, input_dim)
for m in self.modules():
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.001)
def forward(self, proto, s_cache, data2nclss, use_topk):
if 'transform' in self.__dict__:
proto = self.transform(proto)
s_cache = self.transform(s_cache)
proto_emb = self.linear_q(proto)
s_cache_emb = self.linear_k(s_cache)
raw_score = F.cosine_similarity(proto_emb.unsqueeze(1), s_cache_emb.unsqueeze(0), dim=-1)
score = F.softmax(self.temp * raw_score, dim=1)
prop_proto = torch.matmul( score, s_cache ) # n_class * n_cache @ n_cache * n_dim
if random.random() > 0.99:
print("top_1_idx: {} in {} cache".format(torch.topk(raw_score, 1)[1], len(s_cache)))
print("score: {}".format(score))
print("mean:{}, var:{}, min:{}, max:{}".format(torch.mean(score, dim=1).data, torch.var(score, dim=1).data, torch.min(score, dim=1)[0].data, torch.max(score, dim=1)[0].data))
return raw_score, prop_proto
class MultiHeadPropagationLayer(nn.Module):
def __init__(self, input_dim, hid_dim, temp, transform, n_head):
super(MultiHeadPropagationLayer, self).__init__()
layers = []
for _ in range(n_head):
layer = PropagationLayer(input_dim, hid_dim, temp, transform)
layers.append(layer)
self.layers = nn.ModuleList(layers)
def forward(self, proto, s_cache, data2nclss, use_topk):
raw_score_lst, prop_proto_lst = [], []
for i, layer in enumerate(self.layers):
raw_score, prop_proto = layer(proto, s_cache, data2nclss, use_topk)
if torch.isnan(raw_score).any() or torch.isnan(prop_proto).any(): import pdb; pdb.set_trace()
raw_score_lst.append(raw_score)
prop_proto_lst.append(prop_proto)
return torch.stack(raw_score_lst, dim=0).mean(0), torch.stack(prop_proto_lst, dim=0).mean(0)
def get_prototypes(features, labels, num_labels, model, cache):
proto_list = []
for label in range(num_labels):
proto = features[labels == label].mean(dim=0)
proto_list.append(proto)
proto = torch.stack(proto_list)
num_devices = torch.cuda.device_count()
num_slots, feature_dim = cache.shape
cache_for_parallel = cache.view(1, num_slots, feature_dim).expand(num_devices, num_slots, feature_dim)
raw_score, prop_proto = model(proto, cache_for_parallel)
return raw_score, proto, prop_proto
|
[
"lu.liu-10@student.uts.edu.au"
] |
lu.liu-10@student.uts.edu.au
|
720b1ea6f006745bee5abcb72de9509ce028d5e6
|
a73cbe345107af388253ab3437992f09b1a033ed
|
/Aula 8/Aula8-Tuplas.py
|
04c116c1b911114ca5cfc5c9a746ea0c7bd79cc6
|
[
"MIT"
] |
permissive
|
ohanamirella/TrabalhosPython
|
09c586f897cf4ae33a1e49c8340883b17df832d2
|
453a97848654b0391d0d717bf102f6c466f79b3d
|
refs/heads/master
| 2020-09-05T22:54:46.245904
| 2020-02-03T14:51:39
| 2020-02-03T14:51:39
| 220,237,145
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
# Aula 8 - 18-11-2019
# Tuplas
numeros = [1,4,6]
usuario = {'nome':'user', 'passwd':123456 }
pessoa = ('maykon','granemann',0, 45.5, numeros)
# print(numeros)
# print(usuario)
# print(pessoa)
lista = [1]
numeros[1] = 5
usuario['passwd'] = 456123
lista_pessoas = []
lista_pessoas.append(pessoa)
#pessoa[4][1] = 6
print(pessoa[4][1])
|
[
"900153@proway.treina"
] |
900153@proway.treina
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.