blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
808efbb3e5a50d252926f34dd42f8d2f275a33a6
|
b80ee603f5fde501795e026ef2b122baf5c57c9d
|
/pre_commit_hooks/fix_byte_order_marker.py
|
1ffe047de80c3b981b56d37ac9d0c8ba34d4089e
|
[
"MIT"
] |
permissive
|
ADTRAN/pre-commit-hooks
|
384656043c75f70aae7e452c13ad61cb2cfb455a
|
73254720098abd062a99074496e5b19eeba7e1d9
|
refs/heads/master
| 2023-08-07T03:58:03.705712
| 2021-10-11T20:54:25
| 2021-10-11T20:54:25
| 416,055,424
| 0
| 1
|
MIT
| 2021-10-11T20:54:26
| 2021-10-11T19:12:40
|
Python
|
UTF-8
|
Python
| false
| false
| 797
|
py
|
import argparse
from typing import Optional
from typing import Sequence
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to check')
args = parser.parse_args(argv)
retv = 0
for filename in args.filenames:
with open(filename, 'rb') as f_b:
bts = f_b.read(3)
if bts == b'\xef\xbb\xbf':
with open(filename, newline='', encoding='utf-8-sig') as f:
contents = f.read()
with open(filename, 'w', newline='', encoding='utf-8') as f:
f.write(contents)
print(f'{filename}: removed byte-order marker')
retv = 1
return retv
if __name__ == '__main__':
exit(main())
|
[
"asottile@umich.edu"
] |
asottile@umich.edu
|
d1ea9e31207bbebbd7ed7889663442e0c8b6193c
|
2eddcd036a85d040cb2f45adac41efb1cf2eacff
|
/problem_36.py
|
110651f9c305495944d7390e66f5a737cf0bd44b
|
[] |
no_license
|
pmaddi/euler
|
1a16869976054faa36f3fb0aa5ff3d802b1982dd
|
17f8a898b1ab0fdb0e81f72e9ca4711f119e5829
|
refs/heads/master
| 2021-12-28T12:38:40.280036
| 2021-12-25T22:46:50
| 2021-12-25T22:46:50
| 127,155,664
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
if __name__ == '__main__':
def r(n):
return ''.join(reversed(n))
out = 0
for i in range(1, 10**6):
d = str(i)
b = bin(i)[2:]
if r(d) == d and r(b) == b:
out += i
print(out)
|
[
"pranav.maddi@gmail.com"
] |
pranav.maddi@gmail.com
|
1a6b37e19acff97cd68240b8c35ca25fe60944da
|
51f536ae42397da7826a32b942c88e48d95e9f3c
|
/examples/dft/00-simple_dft.py
|
c19b209739a278909fa76796ff2c93fd15a976f7
|
[
"BSD-2-Clause"
] |
permissive
|
xlzan/pyscf
|
8f3b6e3e4b1de27313f99bc94b4aba15e1c84ff7
|
81606c8f384ff1da98a7aa4c817021a78302110a
|
refs/heads/master
| 2020-03-15T01:41:22.938983
| 2018-04-19T19:41:18
| 2018-04-19T19:41:18
| 131,899,354
| 1
| 0
|
BSD-2-Clause
| 2018-05-02T19:55:17
| 2018-05-02T19:55:16
| null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
from pyscf import gto, dft
'''
A simple example to run DFT calculation.
See pyscf/dft/vxc.py for the complete list of available XC functional
'''
mol = gto.Mole()
mol.build(
atom = 'H 0 0 0; F 0 0 1.1', # in Angstrom
basis = '631g',
symmetry = True,
)
mydft = dft.RKS(mol)
#mydft.xc = 'lda,vwn'
#mydft.xc = 'lda,vwn_rpa'
#mydft.xc = 'b86,p86'
#mydft.xc = 'b88,lyp'
#mydft.xc = 'b97,pw91'
#mydft.xc = 'b3p86'
#mydft.xc = 'o3lyp'
mydft.xc = 'b3lyp'
mydft.kernel()
# Orbital energies, Mulliken population etc.
mydft.analyze()
|
[
"osirpt.sun@gmail.com"
] |
osirpt.sun@gmail.com
|
ce1625f50652b0d101a5a0d9b7cb7f38aa6631e1
|
63768dc92cde5515a96d774a32facb461a3bf6e9
|
/jacket/db/compute/sqlalchemy/migrate_repo/versions/230_add_details_column_to_instance_actions_events.py
|
8079a2af04d72e10f2b44e7f7c34eb703f98d723
|
[
"Apache-2.0"
] |
permissive
|
ljZM33nd/jacket
|
6fe9156f6f5789e5c24425afa7ce9237c302673d
|
d7ad3147fcb43131098c2a5210847634ff5fb325
|
refs/heads/master
| 2023-04-16T11:02:01.153751
| 2016-11-15T02:48:12
| 2016-11-15T02:48:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
# Copyright 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db.sqlalchemy import utils
from sqlalchemy import Column, String, Text
from jacket.db.compute.sqlalchemy import api
def upgrade(migrate_engine):
actions_events = utils.get_table(migrate_engine, 'instance_actions_events')
host = Column('host', String(255))
details = Column('details', Text)
actions_events.create_column(host)
actions_events.create_column(details)
shadow_actions_events = utils.get_table(migrate_engine,
api._SHADOW_TABLE_PREFIX + 'instance_actions_events')
shadow_actions_events.create_column(host.copy())
shadow_actions_events.create_column(details.copy())
|
[
"nkapotoxin@gmail.com"
] |
nkapotoxin@gmail.com
|
6a2ad476a403a0d861a3051455c2906fc5c0ad6c
|
88509a8ce62a22acc0639c683900d5d0cb8d69e7
|
/Day23/orm/app/migrations/0002_customer.py
|
227e5263d58c0d580aa8aa135326246264832abf
|
[] |
no_license
|
pytutorial/py2104
|
8b0238ab6f6d2f5395aee5fbe1f4aff03b819cd3
|
48b36d6b1f40730ef2747c310e70fb6997eda388
|
refs/heads/main
| 2023-09-03T16:55:02.285158
| 2021-10-20T05:24:31
| 2021-10-20T05:24:31
| 391,613,464
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
# Generated by Django 3.2 on 2021-08-08 14:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', models.CharField(max_length=20, unique=True)),
('name', models.CharField(max_length=100)),
('address', models.CharField(max_length=200)),
],
),
]
|
[
"duongthanhtungvn01@gmail.com"
] |
duongthanhtungvn01@gmail.com
|
89dd3969b7cbd4d20538ffbf26e73d72bc1c12a8
|
e54867ad23f1c07ebc7632125bb408c3f8294cc0
|
/camera-calibration/calibrated_camera.py
|
caceca1e15189841665f732f3bbd199d27b18f36
|
[] |
no_license
|
pi-test/foo
|
ea2a651e83224ea3616d20dba483470e439b40ec
|
2a0bdf0db7fedd95a1133636067890ff8fe68e51
|
refs/heads/master
| 2020-09-07T08:13:35.363352
| 2019-11-09T23:50:01
| 2019-11-09T23:50:01
| 220,718,447
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 855
|
py
|
import sys
import yaml
import cv2
import numpy as np
with open("data.yaml", "r") as stream:
data = yaml.load(stream)
mtx = data["camera_matrix"]
mtx = np.asarray(mtx)
dist = data["dist_coeff"]
dist = np.asarray(dist)
imagePath = sys.argv[1]
img = cv2.imread(imagePath)
h, w = img.shape[:2]
cv2.imshow("preview", img)
cv2.waitKey(0)
# get undistort matrix and pixel matrix
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
print("===================================================")
print("Valid Pixel ROI:")
print roi
print("===================================================")
# undistort
dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
# crop the image
x,y,w,h = roi
dst = dst[y:y+h, x:x+w]
cv2.imshow("undistort", dst)
cv2.imwrite('img/undistort.jpg', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"sosorry@raspberrypi.com.tw"
] |
sosorry@raspberrypi.com.tw
|
f063b29223c2e1574d1892868e349fa6ff05419f
|
e7116c13ba14d65e2687f47d4e08b8d67ed89cb8
|
/run.py
|
09e751e2b7eaf0278623259c61506d3b1c849418
|
[] |
no_license
|
trzp/target_tracker
|
bc3ccdd4c4fa3701f60db3b8d4346544b4dbe7cf
|
199a730576c5e20345af8af602ad8e4f2c1cc6dc
|
refs/heads/master
| 2020-05-22T06:29:05.786585
| 2019-05-15T11:20:06
| 2019-05-15T11:20:06
| 186,254,958
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,870
|
py
|
import numpy as np
import cv2
import sys
from time import time
import kcftracker
selectingObject = False
initTracking = False
onTracking = False
ix, iy, cx, cy = -1, -1, -1, -1
w, h = 0, 0
inteval = 1
duration = 0.01
# mouse callback function
def draw_boundingbox(event, x, y, flags, param):
global selectingObject, initTracking, onTracking, ix, iy, cx,cy, w, h
if event == cv2.EVENT_LBUTTONDOWN:
selectingObject = True
onTracking = False
ix, iy = x, y
cx, cy = x, y
elif event == cv2.EVENT_MOUSEMOVE:
cx, cy = x, y
elif event == cv2.EVENT_LBUTTONUP:
selectingObject = False
if(abs(x-ix)>10 and abs(y-iy)>10):
w, h = abs(x - ix), abs(y - iy)
ix, iy = min(x, ix), min(y, iy)
initTracking = True
else:
onTracking = False
elif event == cv2.EVENT_RBUTTONDOWN:
onTracking = False
if(w>0):
ix, iy = x-w/2, y-h/2
initTracking = True
if __name__ == '__main__':
if(len(sys.argv)==1):
cap = cv2.VideoCapture(0)
elif(len(sys.argv)==2):
if(sys.argv[1].isdigit()): # True if sys.argv[1] is str of a nonnegative integer
cap = cv2.VideoCapture(int(sys.argv[1]))
else:
cap = cv2.VideoCapture(sys.argv[1])
inteval = 30
else: assert(0), "too many arguments"
tracker = kcftracker.KCFTracker(True, True, True) # hog, fixed_window, multiscale
#if you use hog feature, there will be a short pause after you draw a first boundingbox, that is due to the use of Numba.
cv2.namedWindow('tracking')
cv2.setMouseCallback('tracking',draw_boundingbox)
while(cap.isOpened()):
ret, frame = cap.read()
if not ret:
break
if(selectingObject):
cv2.rectangle(frame,(ix,iy), (cx,cy), (0,255,255), 1)
elif(initTracking):
cv2.rectangle(frame,(ix,iy), (ix+w,iy+h), (0,255,255), 2)
tracker.init([ix,iy,w,h], frame)
initTracking = False
onTracking = True
elif(onTracking):
t0 = time()
boundingbox = tracker.update(frame)
t1 = time()
boundingbox = map(int, boundingbox)
cv2.rectangle(frame,(boundingbox[0],boundingbox[1]), (boundingbox[0]+boundingbox[2],boundingbox[1]+boundingbox[3]), (0,255,255), 1)
duration = 0.8*duration + 0.2*(t1-t0)
#duration = t1-t0
cv2.putText(frame, 'FPS: '+str(1/duration)[:4].strip('.'), (8,20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,0,255), 2)
cv2.imshow('tracking', frame)
c = cv2.waitKey(inteval) & 0xFF
if c==27 or c==ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
[
"mrtang@nudt.edu.cn"
] |
mrtang@nudt.edu.cn
|
c552a5b4b970b671de29935c6c7fec152f9fbb0f
|
77311ad9622a7d8b88707d7cee3f44de7c8860cb
|
/res_bw/scripts/common/lib/plat-mac/carbon/ah.py
|
59522d3bebbfc99e90dd2d0ea96f851b13826555
|
[] |
no_license
|
webiumsk/WOT-0.9.14-CT
|
9b193191505a4560df4e872e022eebf59308057e
|
cfe0b03e511d02c36ce185f308eb48f13ecc05ca
|
refs/heads/master
| 2021-01-10T02:14:10.830715
| 2016-02-14T11:59:59
| 2016-02-14T11:59:59
| 51,606,676
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 355
|
py
|
# 2016.02.14 12:50:05 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/plat-mac/Carbon/AH.py
from _AH import *
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\plat-mac\carbon\ah.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:50:05 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
7f4469f9d0c7551cc80ccda897e83bc80b8bb373
|
3395a234e7c80d011607e79c49cd48bf516f256b
|
/dependencies/jedi/third_party/typeshed/third_party/2and3/geoip2/database.pyi
|
7a8991160162cd32b115800bc513ea9dfd3aaeac
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
srusskih/SublimeJEDI
|
67329b72e184bc9584843968dcc534a002c797a1
|
95c185d778425c04536d53517b0e3fe6dedf8e59
|
refs/heads/master
| 2023-08-24T11:30:37.801834
| 2022-08-30T09:04:17
| 2022-08-30T09:04:17
| 6,241,108
| 669
| 125
|
MIT
| 2022-08-30T09:04:18
| 2012-10-16T08:23:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,094
|
pyi
|
from types import TracebackType
from typing import Optional, Sequence, Text, Type
from maxminddb.reader import Metadata
from geoip2.models import AnonymousIP, ASN, City, ConnectionType, Country, Domain, Enterprise, ISP
_Locales = Optional[Sequence[Text]]
class Reader:
def __init__(self, filename: Text, locales: _Locales = ..., mode: int = ...) -> None: ...
def __enter__(self) -> Reader: ...
def __exit__(self, exc_type: Optional[Type[BaseException]] = ..., exc_val: Optional[BaseException] = ..., exc_tb: Optional[TracebackType] = ...) -> None: ...
def country(self, ip_address: Text) -> Country: ...
def city(self, ip_address: Text) -> City: ...
def anonymous_ip(self, ip_address: Text) -> AnonymousIP: ...
def asn(self, ip_address: Text) -> ASN: ...
def connection_type(self, ip_address: Text) -> ConnectionType: ...
def domain(self, ip_address: Text) -> Domain: ...
def enterprise(self, ip_address: Text) -> Enterprise: ...
def isp(self, ip_address: Text) -> ISP: ...
def metadata(self) -> Metadata: ...
def close(self) -> None: ...
|
[
"srusskih@users.noreply.github.com"
] |
srusskih@users.noreply.github.com
|
84e03392e7e42e41051379fa972bca8d338527b6
|
6dd2d509d44ea035da9d2a9f6cc9797724c12484
|
/run/Cooling/CoolingTest_JHW/plot.py
|
4a01e40fc1d6f0193868ce6e108dd44bcdca1845
|
[
"NCSA",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
appolloford/enzo-dev
|
ea9ebc98036c6e5be0c98ebb903448a354cb4aaf
|
2b20d1c9ee5b9b4ee6706a73e32d2e4a8b7fc8f5
|
refs/heads/master
| 2023-08-06T01:18:34.631354
| 2021-10-25T17:06:06
| 2021-10-25T17:06:06
| 228,542,764
| 2
| 1
|
NOASSERTION
| 2019-12-17T05:50:13
| 2019-12-17T05:50:12
| null |
UTF-8
|
Python
| false
| false
| 3,238
|
py
|
from matplotlib import pyplot
from yt.mods import *
import numpy as na
def _H_NumberDensity(field, data):
"Get total Hydrogen nuclei number density."
if data.pf['MultiSpecies'] == 0:
return (0.76 * data['Density'])
fieldData = na.zeros(data['HI_Density'].shape,
dtype=data['HI_Density'].dtype)
if data.pf['MultiSpecies'] > 0:
fieldData += data["HI_Density"]
fieldData += data["HII_Density"]
if data.pf["MultiSpecies"] > 1:
fieldData += data["HM_Density"]
fieldData += data["H2I_Density"]
fieldData += data["H2II_Density"]
if data.pf["MultiSpecies"] > 2:
fieldData += data["HDI_Density"] / 3.0
return fieldData
def _ConvertHNumberDensity(data):
return (1 / 1.67e-24)
add_field("H_NumberDensity", units=r"\rm{cm}^{-3}",
function=_H_NumberDensity,
convert_function=_ConvertHNumberDensity)
def plot_cooling_rate(input_file, coordinates, axes, labels=None):
"Plot cooling rate vs. T for various densities and metallicities."
pf = load(input_file)
grid = pf.h.grids[0]
cooling = grid['Gas_Energy'] * grid['Density'] / grid['Cooling_Time'] / \
grid['H_NumberDensity']**2
for q, coord in enumerate(coordinates):
if labels is None:
my_coord = list(coord)
my_coord.append(0)
my_coord = tuple(my_coord)
label = "log(n$_{\mathrm{H}}$/cm$^{-3}$) = %.1f, log(Z/Z$_{\odot}$) = %.1f" % \
(na.log10(grid['H_NumberDensity'][my_coord]),
na.log10(grid['Metallicity'][my_coord]))
else:
label = labels[q]
axes.loglog(grid['Temperature'][coord], cooling[coord], label=label)
def plot_cooling_solutions(axes):
"""
Plot some known cooling rates:
1. CIE atomic H/He (Black 1981).
2. Z = 0.5, 1 Z_sun (Sarazin & White 1987).
"""
black1981 = file("primordial_cie.dat")
t_hhe = []
c_hhe = []
for line in black1981:
if not line.startswith('#') and len(line) > 1:
online = line.split()
t_hhe.append(float(online[0]))
c_hhe.append(float(online[1]))
t_hhe = na.power(10, t_hhe)
c_hhe = na.power(10, c_hhe)
sz1987 = file("cool_rates.in")
t_sz = []
c1_sz = []
c2_sz = []
for line in sz1987:
if not line.startswith('#') and len(line) > 1:
online = line.split()
t_sz.append(float(online[0]))
c1_sz.append(float(online[1]))
c2_sz.append(float(online[2]))
t_sz = na.power(10, t_sz)
c1_sz = na.power(10, c1_sz)
c2_sz = na.power(10, c2_sz)
#axes.loglog(t_sz, c2_sz, label='Z = 0.5 Z$_{\odot}$ (Sarazin & White 1987)')
axes.loglog(t_sz, c1_sz, label='Z = Z$_{\odot}$ (Sarazin & White 1987)')
axes.loglog(t_hhe, c_hhe, label='H/He (Black 1981)')
pyplot.clf()
axes = pyplot.axes()
axes.set_xlabel('T [K]')
axes.set_ylabel('$\Lambda/n_{H}^{2}$ [erg s$^{-1}$ cm$^{3}$]')
plot_cooling_rate('DD0001/DD0001', [(1, 4)], axes,
labels=['JHW, Z = Z$_{\odot}$'])
plot_cooling_solutions(axes)
axes.set_xlim(10, 1e8)
axes.legend(prop=dict(size=10), loc='best')
pyplot.savefig('cooling_rates.png')
|
[
"brittonsmith@gmail.com"
] |
brittonsmith@gmail.com
|
b900cba8ee80ef45c51c074ce98053f0c32d3110
|
359496fc90720875cca962b37006551282533ef8
|
/src/andres/graph/python/module/__init__.py
|
8045832ed6519c33662e787a55e456781eb9d87b
|
[] |
no_license
|
DerThorsten/graph
|
66858c6f4bd9a40cc355549138fea2da8120b759
|
7c3a10b446e3ade9ba67dcdb7880bd0798bb2ec3
|
refs/heads/master
| 2020-04-01T21:46:42.806967
| 2016-01-04T11:52:50
| 2016-01-04T11:52:50
| 48,331,910
| 0
| 0
| null | 2015-12-20T18:12:14
| 2015-12-20T18:12:12
| null |
UTF-8
|
Python
| false
| false
| 1,573
|
py
|
from _graph import *
import numpy
def _injectorClass(clsToExtend):
class InjectorClass(object):
class __metaclass__(clsToExtend.__class__):
def __init__(self, name, bases, dict):
for b in bases:
if type(b) not in (self, type):
for k,v in dict.items():
setattr(b,k,v)
tmp = type.__init__(self, name, bases, dict)
return InjectorClass
_LiftedMcModelClasses = [
LiftedMcModelGridGraph2D,LiftedMcModelGridGraph3D,LiftedMcModelGraph
]
for objCls in _LiftedMcModelClasses:
class _MoreLiftedMcModel(_injectorClass(objCls),objCls):
def setCosts(self, uv, costs, overwrite = True):
_uv = numpy.require(uv, dtype='uint64')
_costs = numpy.require(costs, dtype='float32')
self._setCosts(_uv, _costs, bool(overwrite))
def gridGraph(shape):
if len(shape) == 2:
return GridGraph2D(int(shape[0]), int(shape[1]))
elif len(shape) == 3:
return GridGraph3D(int(shape[0]), int(shape[1]), int(shape[2]))
else:
raise RuntimeError("shape has wrong length, GridGraph is only exported to python for 2D and 3D grids")
def liftedMcModel(graph):
if isinstance(graph, GridGraph2D):
return LiftedMcModelGridGraph2D(graph)
elif isinstance(graph, GridGraph3D):
return LiftedMcModelGridGraph3D(graph)
elif isinstance(graph, Graph):
return LiftedMcModelGraph(graph)
else:
raise RuntimeError("graph has wrong type")
|
[
"thorsten.beier@iwr.uni-heidelberg.de"
] |
thorsten.beier@iwr.uni-heidelberg.de
|
8f1135a4fe1bd810fef43a3c51e1e6e339127726
|
38558ac2e78837e7f975364f03a1f55fb02103af
|
/OBJECT ORIENTED PROGRAMMING IN PYTHON/encap1.py
|
a4199e1abcb8eca386472268a776fb523aea2f59
|
[] |
no_license
|
SOURADEEP-DONNY/WORKING-WITH-PYTHON
|
a0bc2ff5ddab1b25563927c8f361c6512683d6ff
|
5198d14f0711a3ba7f2fe8bac61d6404c20ea40c
|
refs/heads/master
| 2023-07-14T04:49:08.399519
| 2021-08-29T15:22:33
| 2021-08-29T15:22:33
| 270,723,307
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,188
|
py
|
class car:
def __init__(self,speed,color):
self.speed=speed
self.color=color
def set_speed(self,value):
self.speed=value
def get_speed(self):
return self.speed
maruti=car(200,'blue')
bmw=car(500,'black')
TATA_safari=car(450,'maroon')
print(TATA_safari.get_speed())
print(bmw.get_speed())
print(maruti.get_speed())
TATA_safari.speed=900
bmw.speed="NULL"
print(TATA_safari.get_speed())
print(bmw.get_speed())
print(maruti.get_speed())
print("\n\n")
# NOW DATA GETTING PROTECTION
class car:
def __init__(self,speed,color):
self.__speed=speed # Making it Private
self.__color=color # Making it Private
def set_speed(self,value):
self.__speed=value
def get_speed(self):
return self.__speed
maruti=car(200,'blue')
bmw=car(500,'black')
TATA_safari=car(450,'maroon')
print(TATA_safari.get_speed())
print(bmw.get_speed())
print(maruti.get_speed())
TATA_safari.speed=900
bmw.speed="NULL"
print(TATA_safari.get_speed())
print(bmw.get_speed())
print(maruti.get_speed())
#clearly no security
|
[
"noreply@github.com"
] |
SOURADEEP-DONNY.noreply@github.com
|
3b5d898fbb6e2df88a271a412c2870043225234e
|
b7843e20aec7f6f60934ce2ea3ce691d4e9046cf
|
/[项目二] seiya/seiya/seiya/analysis/job.py
|
ee18b83280aac4a91c173fca71ac2e3cab0832bb
|
[] |
no_license
|
redsnowc/challenge
|
a54a80012beed5ffe264cb09b01e1c8aaeef5384
|
c097af045228c51290eae03428b6c6135bd0a5d2
|
refs/heads/master
| 2020-05-03T08:50:43.085943
| 2019-10-30T07:44:00
| 2019-10-30T07:44:00
| 178,537,982
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 706
|
py
|
from seiya.db.job import Job, session
from sqlalchemy.sql import func
def analysis_top10_city():
city_list = session.query(
Job.city, func.count(Job.city)).group_by(
Job.city).order_by(func.count(Job.city).desc()).all()
data = [{'name': i[0], 'amount': i[1]} for i in city_list][:10]
return data
def analysis_top10_wage():
salary_list = session.query(Job.city, func.avg(
(Job.salary_upper + Job.salary_lower) / 2)).group_by(
Job.city).order_by(func.avg(
(Job.salary_upper + Job.salary_lower) / 2).desc()).all()
data = [{'name' : i[0], 'salary': float(i[1])}
for i in salary_list][:10]
return data
|
[
"nuanyang.44@gmail.com"
] |
nuanyang.44@gmail.com
|
654adeff233277840881df8e58bf441d22ccbea3
|
7807bb168d52a2f292e81a5ffcfd00f16dacffed
|
/source/scripts/wordindex.py
|
c94239358b2c0de054c9319e5c58458516fdc1b6
|
[
"MIT"
] |
permissive
|
paulscottrobson/rpl-c
|
a248dbf1d3c2b4481fd8371d7faa0827596e1e03
|
2ee72fd0a3381c8b57e7b3af1080733f76e4781d
|
refs/heads/master
| 2020-12-05T11:10:45.403374
| 2020-01-24T16:34:06
| 2020-01-24T16:34:06
| 232,091,109
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,838
|
py
|
# *****************************************************************************
# *****************************************************************************
#
# Name : wordindex.py
# Purpose : Allocate each keyword a specific, final identifier ID.
# Author : Paul Robson (paul@robsons.org.uk)
# Date : 15th January 2020
#
# *****************************************************************************
# *****************************************************************************
import re
# *****************************************************************************
#
# Create a hash mapping word to ID.
#
# *****************************************************************************
class WordIndex(object):
def __init__(self):
if WordIndex.INDEX is None:
x = {}
elements = self.raw().split("\n")
for e in elements:
m = re.match("^\\s*(\d+)\\s*\\:\\:\\:\\s*(.*)$",e)
assert m is not None,"Bad line "+e
assert m.group(2).strip() not in x,"Duplicate "+e
x[m.group(2).strip()] = int(m.group(1))
WordIndex.INDEX = x
def get(self):
return WordIndex.INDEX
# *****************************************************************************
#
# RPL-C's word index. This is manually maintained and does not need
# to be ordered. It does need to be consistent.
#
# *****************************************************************************
def raw(self):
return """
0 ::: !
1 ::: $$!handler
2 ::: $$&handler
3 ::: $$@handler
4 ::: $$call
5 ::: $$comment
6 ::: $$define
7 ::: $$literal
8 ::: $$nextline
9 ::: $$string
10 ::: *
11 ::: +
12 ::: +!
13 ::: -
14 ::: -1
15 ::: ..
16 ::: /
17 ::: 0
18 ::: 0<
19 ::: 0=
20 ::: 1
21 ::: 1+
22 ::: 1-
23 ::: 10
24 ::: 100
25 ::: 1024
26 ::: 127
27 ::: 128
28 ::: 15
29 ::: 16
30 ::: 16*
31 ::: 16/
32 ::: 2
33 ::: 2*
34 ::: 2+
35 ::: 2-
36 ::: 2/
37 ::: 24
38 ::: 255
39 ::: 256
40 ::: 256*
41 ::: 256/
42 ::: 3
43 ::: 32
44 ::: 32767
45 ::: 32768
46 ::: 4
47 ::: 4*
48 ::: 4/
49 ::: 4096
50 ::: 5
51 ::: 512
52 ::: 63
53 ::: 64
54 ::: 8
55 ::: 8*
56 ::: 8/
57 ::: ;
58 ::: <
59 ::: <=
60 ::: <>
61 ::: =
62 ::: >
63 ::: >=
64 ::: ?dup
65 ::: @
66 ::: abs
67 ::: alloc
68 ::: and
69 ::: assert
70 ::: bswap
71 ::: c!
72 ::: c@
73 ::: clr
74 ::: drop
75 ::: dup
76 ::: else
77 ::: end
78 ::: endif
79 ::: for
80 ::: if
81 ::: index
82 ::: list
83 ::: max
84 ::: min
85 ::: mod
86 ::: negate
87 ::: new
88 ::: next
89 ::: nip
90 ::: not
91 ::: or
92 ::: over
93 ::: repeat
94 ::: rnd
95 ::: rot
96 ::: run
97 ::: sgn
98 ::: stop
99 ::: swap
100 ::: sys
101 ::: to.string
102 ::: until
103 ::: vlist
104 ::: xbreak
105 ::: xdump
106 ::: xor
107 ::: save
108 ::: load
109 ::: $$index
110 ::: old
111 ::: $$hexliteral
112 ::: fast
113 ::: slow
""".strip().upper()
WordIndex.INDEX = None
if __name__ == "__main__":
print(WordIndex().get())
|
[
"paulscottrobson@gmail.com"
] |
paulscottrobson@gmail.com
|
95ce651702a1657e575cdcc81117c4f133541ea6
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/116/usersdata/207/26239/submittedfiles/al1.py
|
8de58f1eaa677aefe83a1c90ace636c4dc4d4889
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
from __future__ import division
r=float(input(digite um valor para raio de uma lata:'))
a=float(input('digite um valor para altura de uma lata:'))
v=(3*14159*r*r*a)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
971fc3a8143a232c52ec212094e29eb5b2ca0c29
|
bfc25f1ad7bfe061b57cfab82aba9d0af1453491
|
/data/external/repositories_2to3/206102/kaggle-lmgpip-master/create_datasets.py
|
164f42a999dbbc6ec8339da8be823a36d9f961c4
|
[
"MIT"
] |
permissive
|
Keesiu/meta-kaggle
|
77d134620ebce530d183467202cf45639d9c6ff2
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
refs/heads/master
| 2020-03-28T00:23:10.584151
| 2018-12-20T19:09:50
| 2018-12-20T19:09:50
| 147,406,338
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,393
|
py
|
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.decomposition import *
from sklearn.preprocessing import OneHotEncoder
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
import xgboost as xgb
from sklearn.feature_extraction import DictVectorizer
# LabelEncoder
train = pd.read_csv('../input/train.csv', index_col=None)
test = pd.read_csv('../input/test.csv', index_col=None)
train_cols = train.columns
test_cols = test.columns
labels = np.array(train.Hazard).ravel()
train_ids = np.array(train.Id).ravel()
test_ids = np.array(test.Id).ravel()
train.drop('Id', axis=1, inplace=True)
train.drop('Hazard', axis=1, inplace=True)
test.drop('Id', axis=1, inplace=True)
train = np.array(train)
test = np.array(test)
for i in range(train.shape[1]):
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[:, i]) + list(test[:, i]))
train[:, i] = lbl.transform(train[:, i])
test[:, i] = lbl.transform(test[:, i])
train = np.column_stack((train_ids, labels, train))
test = np.column_stack((test_ids, test))
train = pd.DataFrame(train, columns=train_cols)
test = pd.DataFrame(test, columns=test_cols)
train['Id'] = train['Id'].astype(int)
train['Hazard'] = train['Hazard'].astype(int)
test['Id'] = test['Id'].astype(int)
train.to_csv('../input/train2.csv', index=None)
test.to_csv('../input/test2.csv', index=None)
# DictVectorizer
train = pd.read_csv('../input/train.csv', index_col=None)
test = pd.read_csv('../input/test.csv', index_col=None)
train_cols = train.columns
test_cols = test.columns
labels = np.array(train.Hazard).ravel().astype(int)
train_ids = np.array(train.Id).ravel().astype(int)
test_ids = np.array(test.Id).ravel().astype(int)
train.drop('Id', axis=1, inplace=True)
train.drop('Hazard', axis=1, inplace=True)
test.drop('Id', axis=1, inplace=True)
train = list(train.T.reset_index(drop=True).to_dict().values())
test = list(test.T.reset_index(drop=True).to_dict().values())
vec = DictVectorizer(sparse=False)
train = vec.fit_transform(train)
test = vec.transform(test)
train = np.column_stack((train_ids, labels, train))
test = np.column_stack((test_ids, test))
train = pd.DataFrame(train, columns=['Id', 'Hazard'] + vec.get_feature_names())
test = pd.DataFrame(test, columns=['Id'] + vec.get_feature_names())
train['Id'] = train['Id'].astype(int)
train['Hazard'] = train['Hazard'].astype(int)
test['Id'] = test['Id'].astype(int)
train.to_csv('../input/train3.csv', index=None)
test.to_csv('../input/test3.csv', index=None)
# Factors to hazard mean
train = pd.read_csv('../input/train.csv', index_col=None)
test = pd.read_csv('../input/test.csv', index_col=None)
train_cols = train.columns
test_cols = test.columns
labels = train.Hazard.astype(int)
train_ids = train.Id.astype(int)
test_ids = test.Id.astype(int)
train.drop('Id', axis=1, inplace=True)
test.drop('Id', axis=1, inplace=True)
for feat in train.select_dtypes(include=['object']).columns:
m = train.groupby([feat])['Hazard'].mean()
train[feat].replace(m, inplace=True)
test[feat].replace(m, inplace=True)
train = pd.concat((train_ids, train), axis=1)
test = pd.concat((test_ids, test), axis=1)
train.to_csv('../input/train4.csv', index=None)
test.to_csv('../input/test4.csv', index=None)
|
[
"keesiu.wong@gmail.com"
] |
keesiu.wong@gmail.com
|
76a28d2c4cfe2561d571b026c4614f17660b2703
|
816955010ba7bcd234688e502a29f522ece05771
|
/facility_management/patches/v0_1/add_initial_violation_categories.py
|
8e3e91fe009063ac257842d0672661b623f85987
|
[
"MIT"
] |
permissive
|
f-9t9it/facility_management
|
a87a6c5b2840b152e6f97476089bcbd0e2f75cb7
|
b17d1c47b543427700acdddf91490b59c5357e50
|
refs/heads/master
| 2021-07-05T22:18:03.516323
| 2021-05-12T19:52:32
| 2021-05-12T19:52:32
| 241,338,459
| 4
| 4
|
NOASSERTION
| 2021-05-12T19:52:33
| 2020-02-18T10:57:12
|
Python
|
UTF-8
|
Python
| false
| false
| 449
|
py
|
import frappe
def execute():
for violation_category in ['Building & Structure', 'Exteriors', 'General Rules', 'Noise', 'Pets', 'Trash',
'Vehicles & Parking', 'Others']:
if not frappe.db.exists('Tenant Violation Category', violation_category):
frappe.get_doc({
'doctype': 'Tenant Violation Category',
'category_name': violation_category
}).insert()
|
[
"irayspacii@gmail.com"
] |
irayspacii@gmail.com
|
976a93d9868ac5c4863f05814848d48b2d35828b
|
b40e5ea1bc1d83bfc94641a3469eeb866f4df24b
|
/hwk03/tests/test_roni.py
|
14eaa40040fe140b4894a8d25ba57f2450ec2937
|
[] |
no_license
|
snowdj/LS-88-Demography
|
d250c9fea4979dca8f05d61a9f9e023784465fcb
|
c0b125474701fc00f2f285857a4caf08151684c8
|
refs/heads/master
| 2020-09-27T08:06:07.006160
| 2018-11-30T23:43:06
| 2018-11-30T23:43:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 649
|
py
|
test = {
'name': 'Question',
'points': 1,
'suites': [
{
'type': 'wwpp',
'cases': [
{
'code': r"""
>>> np.isclose(unpd_roni.where('area', 'Malawi').where('period', 2000).column('roni').item(0), 28.261)
True
""",
'hidden': False
},
{
'code': r"""
>>> np.isclose(unpd_roni.where('area', 'Germany').where('period', 1960).column('roni').item(0), 5.069)
True
""",
'hidden': False
},
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
|
[
"feehan@berkeley.edu"
] |
feehan@berkeley.edu
|
18e1a95c1abf23d6839f1e3b9d1c9bdb145e2c6b
|
3a2c449c5e870a96958772c8ba89d9e119f4b098
|
/210-219/214.py
|
57e1ebcaba9eded40fa81fa6a3cfafff5cedca8b
|
[] |
no_license
|
pedrotari7/euler
|
0a8ea69c616b84bddfaeaf5d643592a73f321456
|
692ca67c895e3d0c1655bd53501f3d22c1ebb56c
|
refs/heads/master
| 2020-05-21T04:43:34.816108
| 2017-12-11T19:25:31
| 2017-12-11T19:25:31
| 52,283,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,788
|
py
|
from collections import Counter
from itertools import repeat, takewhile
from fractions import Fraction
def multiply(numbers):
result = 1
for n in numbers:
result *= n
return result
class Primes:
def __init__(self, n):
N = n // 2
sieve = [True] * N
for i in range(3, int(n**0.5) + 1, 2):
if sieve[i // 2]:
start = i ** 2 // 2
sieve[start::i] = repeat(False, len(range(start, N, i)))
self._list = [2] + [2*i+1 for i in range(1, N) if sieve[i]]
self._set = set(self._list)
self.maxn = n
def upto(self, n):
if self.maxn < n:
self.__init__(max(n, 2 * self.maxn))
return takewhile(lambda x: x <= n, self._list)
class Factors:
def __init__(self, maxn):
self.largest = [1] * maxn
for p in primes.upto(maxn):
self.largest[p::p] = repeat(p, len(range(p, maxn, p)))
def totient(self, n):
return int(n * multiply(1 - Fraction(1, p) for p in set(self(n))))
def __call__(self, n):
result = []
if n >= len(self.largest):
for p in primes:
while n % p == 0:
result.append(p)
n = n // p
if n < len(self.largest):
break
while n > 1:
p = self.largest[n]
result.append(p)
n = n // p
return result
toti = dict()
def chain_size(n):
if n == 1: return 1
if n not in toti: toti[n] = 1 + chain_size(factors.totient(n))
return toti[n]
N = 40*10**6
s = 25
primes = Primes(N)
factors = Factors(N)
total = 0
for i in primes.upto(N):
if chain_size(i) == s:
total+=i
print total
|
[
"pedrotari7@gmail.com"
] |
pedrotari7@gmail.com
|
76aad4906a42f7841a9200152a71a8a89d9f4198
|
851f7fde684774ca0388a28cb7035aa1e95f5de0
|
/Ercesscorp/migrations/0003_auto_20190511_1215.py
|
4f63b800ada217b5c6fb3fd7a0f3078b9c00f073
|
[] |
no_license
|
aditya2222/django-tickets
|
01451a724cf97c8f2f338ba85a704e85ae57b008
|
3c2ecd252479fc5821873823cdbbb4641268a2d2
|
refs/heads/master
| 2022-12-16T17:17:07.821446
| 2019-05-12T02:58:05
| 2019-05-12T02:58:05
| 186,204,071
| 0
| 0
| null | 2022-11-22T03:22:25
| 2019-05-12T02:55:47
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 328
|
py
|
# Generated by Django 2.1.7 on 2019-05-11 12:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Ercesscorp', '0002_auto_20190511_0950'),
]
operations = [
migrations.AlterModelTable(
name='users',
table='users',
),
]
|
[
"adityasingh222247@gmail.com"
] |
adityasingh222247@gmail.com
|
eee6625e9a6a9f549bd2be92f4c3b33bd8a94475
|
5aaa310a93d5154a80389e1361bcb0e4a3d4f903
|
/day07/01_奇异值分解.py
|
1c0c63677e68843f96dea8ae5550bc32c6ff93f5
|
[] |
no_license
|
1751660300/ai
|
9ef4afb893e59d99c150e0d2abac378c48568bb8
|
f5eb845d2e4a9eb73fffb089133191e5edecce77
|
refs/heads/master
| 2022-12-11T21:53:05.702992
| 2020-09-09T14:29:57
| 2020-09-09T14:29:57
| 294,137,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,029
|
py
|
# -*- coding:utf-8 -*-
"""
1.什么是奇异值分解?
一个矩阵M,可以分解为3个矩阵U,S,V,使得U * S * V等于M。
U,V都是 正交矩阵(正交矩阵乘于该矩阵的转置等于单位矩阵)
那么S矩阵主对角线上的元素称为矩阵M的奇异值,其他元素都为0.
2.奇异值的应用
可以根据奇异值,逆推原矩阵,跟特征值的作用相似,不同的是奇异值可以提取非方阵,特征值的提取
必须是方阵
3.获取奇异值
使用numpy.linalg.svd(M, full_matrices=False)方法,返回三个矩阵U,S,V,但是S是一个一维数组(就是奇异值)
full_matrices=False:是否返回完全的方阵
"""
import numpy as np
M = np.mat("4 11 14; 8 7 -2")
print(M)
# 奇异值分解
U, S, V = np.linalg.svd(M, full_matrices=False)
# 如果不是设置 full_matrices=False 则函数返回的U,V就是方阵
# U, S, V = np.linalg.svd(M)
print(U * U.T)
print(V * V.T)
print(S)
S = np.diag(S)
print(S)
print(U * S * V)
|
[
"1751660300@qq.com"
] |
1751660300@qq.com
|
a0ca94ab4a085f99eab19d2b5ba6351ef37814b6
|
303d4aa8ec749844d2f7472e1006b46379c62bc6
|
/test/turtle模块绘图.py
|
a390c53b3c474cdcab4ae23fb629576af852457d
|
[] |
no_license
|
suntyneu/test
|
d6f63791006785518d8e4c92d89018049396dd01
|
a05f93fe71cdd0594fba180e0bed52d402b47bb2
|
refs/heads/master
| 2023-03-26T10:55:28.214978
| 2021-03-20T07:04:03
| 2021-03-20T10:02:38
| 349,372,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
"""
绘图工具,提供一个小海龟,理解为一个机器人呢,只能听得懂有限的命令。
绘图窗口原地在(0, 0)默认朝向右侧
导入 turtle库
运动命令:
forward(d) 向前移动d长度
backward(d) 向后移动d长度
笔画控制命令:
其他命令:
done() 程序继续执行
"""
import turtle
turtle.forward(100)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(100)
turtle.done()
|
[
"19904270266@189.cn"
] |
19904270266@189.cn
|
986960919be0320376e0dde6dcf68eafef65c5ed
|
587dbdf730b6cc3e693efc5dca5d83d1dd35ee1a
|
/leetcode/msjd/01-02.py
|
d4550e72553de9dc183448adc39bf81ca9d60ffd
|
[] |
no_license
|
Rivarrl/leetcode_python
|
8db2a15646d68e4d84ab263d8c3b6e38d8e3ea99
|
dbe8eb449e5b112a71bc1cd4eabfd138304de4a3
|
refs/heads/master
| 2021-06-17T15:21:28.321280
| 2021-03-11T07:28:19
| 2021-03-11T07:28:19
| 179,452,345
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 721
|
py
|
# -*- coding: utf-8 -*-
# ======================================
# @File : 01-02.py
# @Time : 2020/5/9 23:06
# @Author : Rivarrl
# ======================================
# [面试题 01.02. 判定是否互为字符重排](https://leetcode-cn.com/problems/check-permutation-lcci/)
from algorithm_utils import *
class Solution:
@timeit
def CheckPermutation(self, s1: str, s2: str) -> bool:
from collections import Counter
d1, d2 = Counter(s1), Counter(s2)
for c in d2:
if not c in d1 or d1[c] != d2[c]:
return False
return True
if __name__ == '__main__':
a = Solution()
a.CheckPermutation("abc", "bca")
a.CheckPermutation("abc", "bad")
|
[
"1049793871@qq.com"
] |
1049793871@qq.com
|
b329ab9cc69df8e15b2920117a312e186e8c8a28
|
aef1ea6df9f7fa7a812d9330873dca97ef205e53
|
/sales/migrations/0004_auto_20200218_1844.py
|
a86f6ea059f5e922dc4fb1f7b23d3d09a6ee6e63
|
[] |
no_license
|
iamshakibulislam/bakery-management
|
0033fec1178d24e427ef68d025682501c5ba6320
|
2751b2cc1f76eeb5825bc3133234ba97e1415569
|
refs/heads/master
| 2023-02-17T17:05:51.078466
| 2021-01-18T08:36:18
| 2021-01-18T08:36:18
| 254,834,024
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,500
|
py
|
# Generated by Django 3.0.3 on 2020-02-18 12:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sales', '0003_auto_20200216_2114'),
]
operations = [
migrations.CreateModel(
name='pay_retail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(default='2020-02-18')),
('amount', models.IntegerField()),
('name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales.saleman_list')),
],
),
migrations.AlterField(
model_name='deposit_from_saleman',
name='date',
field=models.DateField(default='2020-02-18'),
),
migrations.AlterField(
model_name='retail',
name='date',
field=models.DateField(default='2020-02-18'),
),
migrations.AlterField(
model_name='retail_sales',
name='date',
field=models.DateField(default='2020-02-18'),
),
migrations.AlterField(
model_name='saleman_sale',
name='date',
field=models.DateField(default='2020-02-18'),
),
migrations.DeleteModel(
name='pay_saleman',
),
]
|
[
"iamshakibulislam@gmail.com"
] |
iamshakibulislam@gmail.com
|
6cd6f32fe0bd293bf31f942b6055041e058d88de
|
017032b3ea86f938925c33858802b2fb5cb0832f
|
/instagram/config/urls/apis.py
|
7c602cc3ec38f9dd3d92db96002088c384071cba
|
[] |
no_license
|
Fastcampus-WPS-6th/Instagram
|
664d9c9fb2ac1a76381bbeece47f791759240a2c
|
2b859b3b27dab25e59097b17b9f3940fadb5deeb
|
refs/heads/master
| 2022-12-16T00:55:45.638303
| 2017-11-14T07:53:04
| 2017-11-14T07:53:04
| 106,781,002
| 2
| 2
| null | 2022-12-08T00:37:33
| 2017-10-13T05:19:17
|
Python
|
UTF-8
|
Python
| false
| false
| 197
|
py
|
from django.conf.urls import url, include
urlpatterns = [
url(r'^member/', include('member.urls.apis', namespace='member')),
url(r'^post/', include('post.urls.apis', namespace='post')),
]
|
[
"dev@azelf.com"
] |
dev@azelf.com
|
e558be579cdc61dac9294eb4943e5a9f6997937c
|
e34a44c07adb818a15dd0742761a4c2cf4258336
|
/src/homework/homework11/main.py
|
1b7c6fe9b9694138f40a7da83440d5654e1ee2e3
|
[
"MIT"
] |
permissive
|
acc-cosc-1336/cosc-1336-spring-2018-jjmareck
|
629e9cdb3a0f091e440e6dccbd2bc23341df4a2c
|
7abfd79cb9a63192c965f828a185ccd981820bae
|
refs/heads/master
| 2021-05-16T14:08:06.763135
| 2018-05-12T03:33:17
| 2018-05-12T03:33:17
| 118,071,035
| 0
| 0
|
MIT
| 2018-02-25T23:29:58
| 2018-01-19T03:22:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,061
|
py
|
from src.homework.homework11.player import Player
from src.homework.homework11.game_log import GameLog
from src.homework.homework11.die6 import Die6
from src.homework.homework11.die8 import Die8
#write import statements for Die6 and Die8 classes
game_log = GameLog()
#ASSIGNMENT 12: Write statements to create Die6 and Die8 instances
die6 = Die6()
die8 = Die8()
#ASSIGNMENT12: pass the Die6 and Die8 instance object variables to the Player instantiation below as parameters after
#the game_log parameter
Player(game_log,die6,die8).roll_doubles()
game_log.display_log()
#ASSIGNMENT12: Create another GameLog instance
#ASSIGNMENT12: Write statements to create Die6 and Die8 instances
#ASSIGNMENT12: Create a new instance of the Player class and pass it the game log, die6, and die8 instances.
#ASSIGNMENT12: Call the player instance roll_doubles.
#ASSIGNMENT12: Call the game log instance display_log method.
game_log_2 = GameLog()
die6_2 = Die6()
die8_2 = Die8()
player2 = Player(game_log_2,die6_2,die8_2)
player2.roll_doubles()
game_log_2.display_log()
|
[
"noreply@github.com"
] |
acc-cosc-1336.noreply@github.com
|
f700efc6c3f7cee15c14e7308729bd1b4b9dd32f
|
c237dfae82e07e606ba9385b336af8173d01b251
|
/ZServer/medusa/unix_user_handler.py
|
7d737c80d552bdf9b0637945802616f82efc5063
|
[
"ZPL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
OS2World/APP-SERVER-Zope
|
242e0eec294bfb1ac4e6fa715ed423dd2b3ea6ff
|
dedc799bd7eda913ffc45da43507abe2fa5113be
|
refs/heads/master
| 2020-05-09T18:29:47.818789
| 2014-11-07T01:48:29
| 2014-11-07T01:48:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,445
|
py
|
# -*- Mode: Python; tab-width: 4 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
# Copyright 1996, 1997 by Sam Rushing
# All Rights Reserved.
#
RCS_ID = '$Id: unix_user_handler.py,v 1.3 2001/05/01 11:44:49 andreas Exp $'
# support for `~user/public_html'.
import re
import string
import default_handler
import filesys
import os
import pwd
get_header = default_handler.get_header
user_dir = re.compile ('/~([^/]+)(.*)')
class unix_user_handler (default_handler.default_handler):
def __init__ (self, public_html = 'public_html'):
self.public_html = public_html
default_handler.default_handler.__init__ (self, None)
# cache userdir-filesystem objects
fs_cache = {}
def match (self, request):
m = user_dir.match (request.uri)
return m and (m.end() == len (request.uri))
def handle_request (self, request):
# get the user name
user = user_dir.group(1)
rest = user_dir.group(2)
# special hack to catch those lazy URL typers
if not rest:
request['Location'] = 'http://%s/~%s/' % (
request.channel.server.server_name,
user
)
request.error (301)
return
# have we already built a userdir fs for this user?
if self.fs_cache.has_key (user):
fs = self.fs_cache[user]
else:
# no, well then, let's build one.
# first, find out where the user directory is
try:
info = pwd.getpwnam (user)
except KeyError:
request.error (404)
return
ud = info[5] + '/' + self.public_html
if os.path.isdir (ud):
fs = filesys.os_filesystem (ud)
self.fs_cache[user] = fs
else:
request.error (404)
return
# fake out default_handler
self.filesystem = fs
# massage the request URI
request.uri = '/' + rest
return default_handler.default_handler.handle_request (self, request)
def __repr__ (self):
return '<Unix User Directory Handler at %08x [~user/%s, %d filesystems loaded]>' % (
id(self),
self.public_html,
len(self.fs_cache)
)
|
[
"martin@os2world.com"
] |
martin@os2world.com
|
c57e5a71b7397ea1d38649655ed8f773245c84e5
|
6392354e74cce4a303a544c53e13d0a7b87978ee
|
/m6/MyBlog/MyBlog/settings.py
|
0136cc69f02b7cb015f3c2b498568636608dfbcd
|
[] |
no_license
|
music51555/wxPythonCode
|
dc35e42e55d11850d7714a413da3dde51ccdd37e
|
f77b71ed67d926fbafd1cfec89de8987d9832016
|
refs/heads/master
| 2020-04-11T20:20:38.136446
| 2019-04-01T09:17:34
| 2019-04-01T09:17:34
| 162,067,449
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,973
|
py
|
"""
Django settings for MyBlog project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '84_z7kklzmg9y2wl^azw-=a&zs)zn&6akunu$w+jps3czbff2-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
LOGIN_URL='/login/'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MyBlog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
},
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'propagate': True,
'level':'DEBUG',
},
}
}
WSGI_APPLICATION = 'MyBlog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'blog',
'HOST': '140.143.132.118',
'PORT': 3306,
'USER': 'xiaoxin',
'PASSWORD': 'Nishi458_2'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL='blog.UserInfo'
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'static')
]
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
# MEDIA_URL = '/media/'
EMAIL_HOST = 'smtp.qq.com'
EMAIL_POST = 465
EMAIL_HOST_USER = '452427904@qq.com'
EMAIL_HOST_PASSWORD = 'dqccbhjmkjxpbgig'
# DEFAULT_FROM_EMAIL = ''
EMAIL_USE_SSL = False
|
[
"music51555@163.com"
] |
music51555@163.com
|
45f5e444d8a77f49dc82fa426bd042cefa7c4e05
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/important_hand/part_or_year.py
|
a20ee7526e10994fd0dd1c9a222a98873a7db3b1
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
#! /usr/bin/env python
def good_government_or_point(str_arg):
first_time(str_arg)
print('make_week')
def first_time(str_arg):
print(str_arg)
if __name__ == '__main__':
good_government_or_point('time')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
d9fbfe1bb534eb343c1c47c1aebd8bfb00e7d944
|
75a35cefa5adf2f42503eb0cc8c60f7f96ff9650
|
/produccion/migrations/0010_prodleche_vo.py
|
e1e409b5bba6811d832d082f8f1c4ab365bc8f15
|
[] |
no_license
|
PatacaSis/agroweb
|
5c70f35001d0e88fb5f1642161d4eee6b4abda59
|
e2181fa0bb6ca7752bdbaab62fe60ede9f2630b2
|
refs/heads/main
| 2023-06-20T23:37:50.294745
| 2021-07-19T23:26:55
| 2021-07-19T23:26:55
| 381,737,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
# Generated by Django 2.2 on 2021-06-24 19:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('produccion', '0009_auto_20210618_0943'),
]
operations = [
migrations.AddField(
model_name='prodleche',
name='vo',
field=models.IntegerField(default=1),
preserve_default=False,
),
]
|
[
"patacasis@gmail.com"
] |
patacasis@gmail.com
|
f43a80bb21f4e1d7ace060143a3ff8fd95b5a258
|
cb95db2638e100f52f8810747fd3ee7be3660b1f
|
/static/audio_file/clear_cache.py
|
0988c8635225a139a6dfd03fe36462f48b02e6fe
|
[
"MIT"
] |
permissive
|
wanZzz6/smart_robot
|
c16c7e20c421ff7431a00b95a9f7c5ec56bbcb92
|
769dc3a3dbf35d43abc416c08ba8be81bff72747
|
refs/heads/db_version
| 2023-05-28T12:40:55.928491
| 2020-04-20T03:52:48
| 2020-04-20T03:52:48
| 140,998,544
| 5
| 1
| null | 2023-05-22T21:37:20
| 2018-07-15T04:38:10
|
CSS
|
UTF-8
|
Python
| false
| false
| 158
|
py
|
import os
all_file = os.listdir('.')
print('正在清理缓存')
for i in all_file:
if 'auido' in i or 'clear' in i:
continue
os.remove(i)
|
[
"1343837706@qq.com"
] |
1343837706@qq.com
|
ae8f27c547a5e25a413c4ee71d01778d834057a3
|
17d23f404a20c34a406dd086b0a89f956c4ecac0
|
/Django-Tutorials/listening/migrations/0001_initial.py
|
66398fa340306646d1693b2b624511c665b7fc9f
|
[] |
no_license
|
apabhishek178/ieltsonline
|
69df682862d96bc04b318262e962e22a0919fe88
|
42061efa8293c948342a670f0a62c90d3b31ebff
|
refs/heads/master
| 2020-04-26T09:19:55.712217
| 2019-03-20T13:36:31
| 2019-03-20T13:36:31
| 173,451,873
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,002
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-02-18 16:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AudioMain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Question_Name', models.CharField(default='Listening Part', max_length=50)),
('Instruction', models.TextField(default='instructions', max_length=2000)),
('audio_file', models.FileField(help_text='Allowed type - .mp3, .wav, .ogg', null=True, upload_to='audiofolder/')),
],
),
migrations.CreateModel(
name='Fillup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Question_Name', models.CharField(default='Fillup Question', max_length=50)),
('Instruction', models.TextField(default='your instructions', max_length=2000)),
('Question', models.TextField(default='Your questions', max_length=100)),
('audio', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='audioFill', to='listening.AudioMain')),
],
),
migrations.CreateModel(
name='FillupQue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('part1', models.CharField(max_length=100, null=True)),
('part2', models.CharField(max_length=100, null=True)),
('linked', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='linkedfillup', to='listening.Fillup')),
],
),
migrations.CreateModel(
name='MapMatch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('map', models.ImageField(null=True, upload_to='audiomap/')),
('Question_Name', models.CharField(default='Map Question', max_length=50)),
('Instruction', models.TextField(default='your instructions', max_length=2000)),
('Question', models.TextField(default='Your questions', max_length=100)),
('audio', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='audioMap', to='listening.AudioMain')),
],
),
migrations.CreateModel(
name='MapMatchQues',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('part', models.CharField(max_length=100, null=True)),
('linked', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='linkedmap', to='listening.MapMatch')),
],
),
migrations.CreateModel(
name='Matching',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Question_Name', models.CharField(default='Matching Question', max_length=50)),
('Instruction', models.TextField(default='your instructions', max_length=2000)),
('l1', models.CharField(max_length=100, null=True)),
('l2', models.CharField(max_length=100, null=True)),
('l3', models.CharField(blank=True, max_length=100, null=True)),
('l4', models.CharField(blank=True, max_length=100, null=True)),
('l5', models.CharField(blank=True, max_length=100, null=True)),
('l6', models.CharField(blank=True, max_length=100, null=True)),
('l7', models.CharField(blank=True, max_length=100, null=True)),
('l8', models.CharField(blank=True, max_length=100, null=True)),
('l9', models.CharField(blank=True, max_length=100, null=True)),
('l10', models.CharField(blank=True, max_length=100, null=True)),
('r1', models.CharField(max_length=100, null=True)),
('r2', models.CharField(max_length=100, null=True)),
('r3', models.CharField(blank=True, max_length=100, null=True)),
('r4', models.CharField(blank=True, max_length=100, null=True)),
('r5', models.CharField(blank=True, max_length=100, null=True)),
('r6', models.CharField(blank=True, max_length=100, null=True)),
('r7', models.CharField(blank=True, max_length=100, null=True)),
('r8', models.CharField(blank=True, max_length=100, null=True)),
('r9', models.CharField(blank=True, max_length=100, null=True)),
('r10', models.CharField(blank=True, max_length=100, null=True)),
('audio', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='audioMatch', to='listening.AudioMain')),
],
),
migrations.CreateModel(
name='MCQ',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Question_Name', models.CharField(default='MCQ Question', max_length=50)),
('Instruction', models.TextField(default='your instructions', max_length=2000)),
('passage', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='audiomcq', to='listening.AudioMain')),
],
),
migrations.CreateModel(
name='MCQQues',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Question', models.TextField(default='Your questions', max_length=100)),
('option1', models.CharField(max_length=100, null=True)),
('option2', models.CharField(max_length=100, null=True)),
('option3', models.CharField(blank=True, max_length=100, null=True)),
('option4', models.CharField(blank=True, max_length=100, null=True)),
('option5', models.CharField(blank=True, max_length=100, null=True)),
('option6', models.CharField(blank=True, max_length=100, null=True)),
('linked', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='linkedmcq', to='listening.MCQ')),
],
),
migrations.CreateModel(
name='Summary',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Question_Name', models.CharField(default='audio Question', max_length=50)),
('Instruction', models.TextField(default='your instructions', max_length=2000)),
('part1', models.CharField(max_length=100, null=True)),
('part2', models.CharField(max_length=100, null=True)),
('part3', models.CharField(blank=True, max_length=100, null=True)),
('part4', models.CharField(blank=True, max_length=100, null=True)),
('part5', models.CharField(blank=True, max_length=100, null=True)),
('part6', models.CharField(blank=True, max_length=100, null=True)),
('part7', models.CharField(blank=True, max_length=100, null=True)),
('audio', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='audiosumm', to='listening.AudioMain')),
],
),
]
|
[
"apabhishek178@gmail.com"
] |
apabhishek178@gmail.com
|
e7d844941964fce550c0dfc36f57e53d74501643
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/liangliangyy_DjangoBlog/DjangoBlog-master/comments/templatetags/comments_tags.py
|
e23ec803c3847a3a33e438bbaf6b3c38171df187
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,676
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: ??
@author: liangliangyy
@license: MIT Licence
@contact: liangliangyy@gmail.com
@site: https://www.lylinux.org/
@software: PyCharm
@file: comments_tags.py
@time: 2016/11/2 下午9:17
"""
from django import template
from django.template.loader import render_to_string
from ..models import Comment
from blog.models import Article
from comments.forms import CommentForm
register = template.Library()
@register.assignment_tag
def parse_commenttree(commentlist, comment):
"""获得当前评论子评论的列表
用法: {% parse_commenttree article_comments comment as childcomments %}
"""
datas = []
def parse(c):
childs = commentlist.filter(parent_comment=c)
for child in childs:
datas.append(child)
parse(child)
parse(comment)
return datas
@register.inclusion_tag('comments/tags/comment_item.html')
def show_comment_item(comment, ischild):
"""评论"""
depth = 1 if ischild else 2;
return {
'comment_item': comment,
'depth': depth
}
"""
@register.simple_tag(name='get_comment_count')
def GetCommentCount(parser, token):
commentcount = Comment.objects.filter(article__author_id=token).count()
return "0" if commentcount == 0 else str(commentcount) + " comments"
@register.inclusion_tag('comments/tags/post_comment.html')
def load_post_comment(article, lastform=None):
if not lastform:
form = CommentForm()
form.article_id = article.id
form.parent_comment_id = ''
else:
form = lastform
return {
'article': article,
'form': form
}
"""
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
1bafc04c82fcf2e419a27d47e758c586dc6d95cc
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/compose/2017/12/testcases.py
|
9427f3d0dad0bc782d0c9d9d7f167207a3c1d6f0
|
[] |
no_license
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005
| 2021-01-28T19:40:51
| 2021-01-28T19:40:51
| 306,497,459
| 1
| 1
| null | 2020-11-24T20:56:18
| 2020-10-23T01:18:07
| null |
UTF-8
|
Python
| false
| false
| 5,683
|
py
|
from __future__ import absolute_import
from __future__ import unicode_literals
import functools
import os
import pytest
from docker.errors import APIError
from docker.utils import version_lt
from .. import unittest
from compose.cli.docker_client import docker_client
from compose.config.config import resolve_environment
from compose.config.environment import Environment
from compose.const import API_VERSIONS
from compose.const import COMPOSEFILE_V1 as V1
from compose.const import COMPOSEFILE_V2_0 as V2_0
from compose.const import COMPOSEFILE_V2_0 as V2_1
from compose.const import COMPOSEFILE_V2_2 as V2_2
from compose.const import COMPOSEFILE_V2_3 as V2_3
from compose.const import COMPOSEFILE_V3_0 as V3_0
from compose.const import COMPOSEFILE_V3_2 as V3_2
from compose.const import COMPOSEFILE_V3_5 as V3_5
from compose.const import LABEL_PROJECT
from compose.progress_stream import stream_output
from compose.service import Service
SWARM_SKIP_CONTAINERS_ALL = os.environ.get('SWARM_SKIP_CONTAINERS_ALL', '0') != '0'
SWARM_SKIP_CPU_SHARES = os.environ.get('SWARM_SKIP_CPU_SHARES', '0') != '0'
SWARM_SKIP_RM_VOLUMES = os.environ.get('SWARM_SKIP_RM_VOLUMES', '0') != '0'
SWARM_ASSUME_MULTINODE = os.environ.get('SWARM_ASSUME_MULTINODE', '0') != '0'
def pull_busybox(client):
client.pull('busybox:latest', stream=False)
def get_links(container):
links = container.get('HostConfig.Links') or []
def format_link(link):
_, alias = link.split(':')
return alias.split('/')[-1]
return [format_link(link) for link in links]
def engine_max_version():
if 'DOCKER_VERSION' not in os.environ:
return V3_5
version = os.environ['DOCKER_VERSION'].partition('-')[0]
if version_lt(version, '1.10'):
return V1
if version_lt(version, '1.12'):
return V2_0
if version_lt(version, '1.13'):
return V2_1
if version_lt(version, '17.06'):
return V3_2
return V3_5
def min_version_skip(version):
return pytest.mark.skipif(
engine_max_version() < version,
reason="Engine version %s is too low" % version
)
def v2_only():
return min_version_skip(V2_0)
def v2_1_only():
return min_version_skip(V2_1)
def v2_2_only():
return min_version_skip(V2_2)
def v2_3_only():
return min_version_skip(V2_3)
def v3_only():
return min_version_skip(V3_0)
class DockerClientTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
version = API_VERSIONS[engine_max_version()]
cls.client = docker_client(Environment(), version)
@classmethod
def tearDownClass(cls):
del cls.client
def tearDown(self):
for c in self.client.containers(
all=True,
filters={'label': '%s=composetest' % LABEL_PROJECT}):
self.client.remove_container(c['Id'], force=True)
for i in self.client.images(
filters={'label': 'com.docker.compose.test_image'}):
try:
self.client.remove_image(i, force=True)
except APIError as e:
if e.is_server_error():
pass
volumes = self.client.volumes().get('Volumes') or []
for v in volumes:
if 'composetest_' in v['Name']:
self.client.remove_volume(v['Name'])
networks = self.client.networks()
for n in networks:
if 'composetest_' in n['Name']:
self.client.remove_network(n['Name'])
def create_service(self, name, **kwargs):
if 'image' not in kwargs and 'build' not in kwargs:
kwargs['image'] = 'busybox:latest'
if 'command' not in kwargs:
kwargs['command'] = ["top"]
kwargs['environment'] = resolve_environment(
kwargs, Environment.from_env_file(None)
)
labels = dict(kwargs.setdefault('labels', {}))
labels['com.docker.compose.test-name'] = self.id()
return Service(name, client=self.client, project='composetest', **kwargs)
def check_build(self, *args, **kwargs):
kwargs.setdefault('rm', True)
build_output = self.client.build(*args, **kwargs)
stream_output(build_output, open('/dev/null', 'w'))
def require_api_version(self, minimum):
api_version = self.client.version()['ApiVersion']
if version_lt(api_version, minimum):
pytest.skip("API version is too low ({} < {})".format(api_version, minimum))
def get_volume_data(self, volume_name):
if not is_cluster(self.client):
return self.client.inspect_volume(volume_name)
volumes = self.client.volumes(filters={'name': volume_name})['Volumes']
assert len(volumes) > 0
return self.client.inspect_volume(volumes[0]['Name'])
def is_cluster(client):
if SWARM_ASSUME_MULTINODE:
return True
def get_nodes_number():
try:
return len(client.nodes())
except APIError:
# If the Engine is not part of a Swarm, the SDK will raise
# an APIError
return 0
if not hasattr(is_cluster, 'nodes') or is_cluster.nodes is None:
# Only make the API call if the value hasn't been cached yet
is_cluster.nodes = get_nodes_number()
return is_cluster.nodes > 1
def no_cluster(reason):
def decorator(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if is_cluster(self.client):
pytest.skip("Test will not be run in cluster mode: %s" % reason)
return
return f(self, *args, **kwargs)
return wrapper
return decorator
|
[
"rodrigosoaresilva@gmail.com"
] |
rodrigosoaresilva@gmail.com
|
bc76b4c9093b72508244a0815238c338afd49775
|
7dc65b6d2e857c807bd2f75e2586af5f8e933fe5
|
/tcutils/pkgs/Traffic/traffic/core/stream.py
|
69e5a78e7c8813a72c1172ca57325b026cbe268e
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
vkolli/contrail-test-perf
|
d6fdc20f4a2004066c5a6316afd915ecdc9366c2
|
db04b8924a2c330baabe3059788b149d957a7d67
|
refs/heads/master
| 2021-01-18T15:36:18.120487
| 2017-03-30T19:19:30
| 2017-03-30T19:19:30
| 86,661,522
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,933
|
py
|
"""Module to create traffic stream.
It just parses the arguments given by the user and fills up the approprite
protocol header.
This needs to be extended for new protocol streams with new protocol.
"""
import sys
import inspect
try:
# Running from the source repo "test".
from tcutils.pkgs.Traffic.traffic.utils.logger import LOGGER, get_logger
from tcutils.pkgs.Traffic.traffic.utils.globalvars import LOG_LEVEL
from tcutils.pkgs.Traffic.traffic.utils.util import is_v6, is_v4
except ImportError:
# Distributed and installed as package
from traffic.utils.logger import LOGGER, get_logger
from traffic.utils.globalvars import LOG_LEVEL
from traffic.utils.util import is_v6, is_v4
LOGGER = "%s.core.listener" % LOGGER
log = get_logger(name=LOGGER, level=LOG_LEVEL)
def help(header="all"):
"""lists the keywords of fields available in currenlty implemented
protocols.
This is a helper method to the users to get the list of fields,
before creating a stream.
Usage:
import stream
stream.help()
stream.help("IPHeader")
"""
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
if not header == "all":
clsmembers = filter(lambda x: x[0] == header, clsmembers)
for clsname, clsmember in clsmembers:
clsobj = clsmember()
clsattrs = dir(clsobj)
if "fields" in clsattrs:
print clsname, ": ", clsobj.fields
if "options" in clsattrs:
print clsname, ": ", clsobj.options
class Stream(object):
def __init__(self, **kwargs):
if not kwargs:
# Just for getting Help.
return
self.all_fields = kwargs
try:
self.protocol = self.all_fields['protocol']
except KeyError:
self.protocol = "ip" # Defualt L3 protocol.
dst = self.all_fields['dst']
if is_v6(dst):
self.protocol = "ipv6"
try:
proto = self.all_fields['proto']
except KeyError, err:
print err, "Must specify proto."
if 'dst' in self.all_fields.keys():
self.all_fields['dst'] = str(self.all_fields['dst'])
self.l2 = self._eth_header()
if self.protocol == 'ip':
self.l3 = self._ip_header()
elif self.protocol == 'ipv6':
self.l3 = self._ip6_header()
if proto == 'tcp':
self.l4 = self._tcp_header()
elif proto == 'udp':
self.l4 = self._udp_header()
elif proto == 'icmp':
self.l4 = self._icmp_header()
def _eth_header(self):
return {}
def _ip_header(self):
return IPHeader(**self.all_fields).get_header()
def _ip6_header(self):
return IP6Header(**self.all_fields).get_header()
def _tcp_header(self):
return TCPHeader(**self.all_fields).get_header()
def _udp_header(self):
return UDPHeader(**self.all_fields).get_header()
def _icmp_header(self):
if self.protocol == 'ipv6':
return None
return ICMPHeader(**self.all_fields).get_header()
def get_l4_proto(self):
return getattr(self.l3, 'proto', None) or \
getattr(self.l3, 'nh', None).lower()
class Header(object):
def __init__(self, fields={}):
for key, val in fields.items():
self.__setattr__(key, val)
class AnyHeader(object):
def __init__(self, **kwargs):
self.all_fields = kwargs
try:
self.all_fields.update({'sport': int(self.all_fields['sport'])})
self.all_fields.update({'dport': int(self.all_fields['dport'])})
self.all_fields.update({'inter': int(self.all_fields['inter'])})
except KeyError:
pass
def create_header(self, fields):
header = {}
for field in fields:
if field in self.all_fields.keys():
if field == "iplen": # UDP also has len
field = "len"
if field == "ipflags": # TCP also has flags
field = "flags"
header.update({field: self.all_fields[field]})
return header
class TCPHeader(AnyHeader):
def __init__(self, **kwargs):
super(TCPHeader, self).__init__(**kwargs)
# Set got from "fields_desc" attribute of protocol headers in scapy.
self.fields = ("sport", "dport", "seq", "ack", "dataofs", "reserved",
"flags", "window", "chksum", "urgptr")
self.options = ("EOL", "NOP", "MSS", "WScale", "SAckOK", "SAck",
"Timestamp", "AltChkSum", "AltChkSumOpt")
def get_header(self):
header = self.create_header(self.fields)
options = self.create_header(self.options)
if options:
header.update({'options': options})
return Header(header)
class UDPHeader(AnyHeader):
def __init__(self, **kwargs):
super(UDPHeader, self).__init__(**kwargs)
# Set got from "fields_desc" attribute of protocol headers in scapy.
self.fields = ("sport", "dport", "len", "chksum")
def get_header(self):
header = self.create_header(self.fields)
return Header(header)
class ICMPHeader(AnyHeader):
def __init__(self, **kwargs):
super(ICMPHeader, self).__init__(**kwargs)
# Set got from "fields_desc" attribute of protocol headers in scapy.
self.fields = ("type", "code", "chksum", "id", "seq", "ts_ori", "ts_rx"
"ts_tx", "gw", "ptr", "reserved", "addr_mask")
def get_header(self):
header = self.create_header(self.fields)
return Header(header)
class IPHeader(AnyHeader):
def __init__(self, **kwargs):
super(IPHeader, self).__init__(**kwargs)
# Set got from "fields_desc" attribute of protocol headers in scapy.
self.fields = ("version", "ihl", "tos", "iplen", "id", "ipflags",
"frag", "ttl", "proto", "ipchksum", "src", "dst",
"options")
def get_header(self):
header = self.create_header(self.fields)
return Header(header)
class IP6Header(AnyHeader):
def __init__(self, **kwargs):
super(IP6Header, self).__init__(**kwargs)
# Set got from "fields_desc" attribute of protocol headers in scapy.
self.fields = ("version", "tc", "fl", "iplen", "nh", "proto",
"hlim", "ttl", "src", "dst")
def get_header(self):
header = self.create_header(self.fields)
hdr_obj = Header(header)
if hasattr(hdr_obj, 'proto'):
hdr_obj.nh = hdr_obj.proto.upper()
if 'ICMP' in hdr_obj.nh:
hdr_obj.nh = 'ICMPv6'
del hdr_obj.proto
if hasattr(hdr_obj, 'ttl'):
hdr_obj.hlim = hdr_obj.ttl
del hdr_obj.ttl
return hdr_obj
|
[
"root@5b3s45.contrail.juniper.net"
] |
root@5b3s45.contrail.juniper.net
|
d32b3980d348ba60a4b957a7225a211d4f803884
|
08615c64a62fc364a802bb92314cf49080ddbcee
|
/new-day02/07.对象/04.类型的判断.py
|
e91be84d6a4c82303ee691ee3e998464c7d94134
|
[] |
no_license
|
xiangys0134/python_study
|
afc4591fca1db6ebddf83f0604e35ed2ef614728
|
6ec627af7923b9fd94d244c561297ccbff90c1e9
|
refs/heads/master
| 2023-02-24T01:24:45.734510
| 2022-10-29T02:11:20
| 2022-10-29T02:11:20
| 143,358,792
| 2
| 0
| null | 2023-02-08T03:07:26
| 2018-08-03T00:43:46
|
Python
|
UTF-8
|
Python
| false
| false
| 198
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
class A:
pass
class B(A):
pass
# print(issubclass(B,A))
a = A()
b = B()
print(isinstance(a,A))
print(isinstance(b,A))
print(isinstance(a,B))
|
[
"ops@xuncetech.com"
] |
ops@xuncetech.com
|
765bf542a781b1ad6e4337b542b13342b17bbcb6
|
374b3f27fe3cf032e88eccac5992c83eba0ad1b2
|
/tutorials/W3D5_NetworkCausality/solutions/W3D5_Tutorial4_Solution_431a3d57.py
|
9719ed5491a6af4bb92107ac81919cd72f403124
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] |
permissive
|
NeuromatchAcademy/course-content
|
e2fdca96bcbdc78afaa209e4e77438f44a56c82d
|
3d638d00f02d9fd269fa2aff7d062558afdcb126
|
refs/heads/main
| 2023-08-16T16:09:09.314153
| 2023-08-02T06:21:49
| 2023-08-02T06:21:49
| 262,856,980
| 2,678
| 1,079
|
CC-BY-4.0
| 2023-08-17T00:32:24
| 2020-05-10T19:09:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 909
|
py
|
def fit_first_stage(T, Z):
"""
Estimates T_hat as the first stage of a two-stage least squares.
Args:
T (np.ndarray): our observed, possibly confounded, treatment of shape (n, 1)
Z (np.ndarray): our observed instruments of shape (n, 1)
Returns
T_hat (np.ndarray): our estimate of the unconfounded portion of T
"""
# Initialize linear regression model
stage1 = LinearRegression(fit_intercept=True)
# Fit linear regression model
stage1.fit(Z, T)
# Predict T_hat using linear regression model
T_hat = stage1.predict(Z)
return T_hat
# Estimate T_hat
T_hat = fit_first_stage(T, Z)
# Get correlations
T_C_corr = np.corrcoef(T.transpose(), C.transpose())[0, 1]
T_hat_C_corr = np.corrcoef(T_hat.transpose(), C.transpose())[0, 1]
# Print correlations
print(f"Correlation between T and C: {T_C_corr:.3f}")
print(f"Correlation between T_hat and C: {T_hat_C_corr:.3f}")
|
[
"noreply@github.com"
] |
NeuromatchAcademy.noreply@github.com
|
bfb31af3eba9876a04ed0655b16df30fb0c8340b
|
3e6d3e9585c24cb0c29616778ad2e304241a3a65
|
/dockerhub_show_tags.py
|
52febfd188001ee975c11201c830a2941ba06f89
|
[] |
no_license
|
blueroutecn/pytools
|
442ef301f5a1b1ef5ce1a22dfe3027873b262934
|
64cb4f6e253bc1f08491874a0db9bcea2ae3dcfd
|
refs/heads/master
| 2021-01-19T14:52:14.886782
| 2017-08-20T21:28:07
| 2017-08-20T21:28:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,675
|
py
|
#!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2016-05-10 11:26:49 +0100 (Tue, 10 May 2016)
#
# https://github.com/harisekhon/pytools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help improve this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
"""
Tool to show Docker tags for one or more DockerHub repos
Written for convenience as Docker CLI doesn't currently support this:
See https://github.com/docker/docker/issues/17238
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#from __future__ import unicode_literals
import json
import logging
import os
import sys
import traceback
import urllib
try:
import requests
except ImportError:
print(traceback.format_exc(), end='')
sys.exit(4)
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from harisekhon.utils import log, die, prog, isJson, jsonpp
from harisekhon import CLI
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.4'
class DockerHubTags(CLI):
def __init__(self):
# Python 2.x
super(DockerHubTags, self).__init__()
# Python 3.x
# super().__init__()
self._CLI__parser.usage = '{0} [options] repo1 repo2 ...'.format(prog)
self.quiet = False
self.timeout_default = 30
def add_options(self):
self.add_opt('-q', '--quiet', action='store_true', default=False,
help='Output only the tags, one per line (useful for shell tricks)')
def run(self):
if not self.args:
self.usage('no repos given as args')
self.quiet = self.get_opt('quiet')
if not self.quiet:
print('\nDockerHub\n')
for arg in self.args:
self.print_tags(arg)
def print_tags(self, repo):
if not self.quiet:
print('repo: {0}'.format(repo))
print('tags: ', end='')
sys.stdout.flush()
indent = ' '
if self.quiet:
indent = ''
print('\n{0}'.format(indent).join(self.get_tags(repo)))
if not self.quiet:
print()
def get_tags(self, repo):
namespace = 'library'
if '/' in repo:
(namespace, repo) = repo.split('/', 2)
# there is another endpoint but it requires authentication
url = 'https://registry.hub.docker.com/v2/repositories/{0}/{1}/tags/'\
.format(urllib.quote_plus(namespace), urllib.quote_plus(repo))
tag_list = []
while True:
(tags, url) = self.query(url)
tag_list += tags
if not url:
break
tag_list.sort()
# put latest to the top of the list
try:
tag_list.insert(0, tag_list.pop(tag_list.index('latest')))
except ValueError:
pass
return tag_list
@staticmethod
def query(url):
log.debug('GET %s' % url)
try:
verify = True
# workaround for Travis CI and older pythons - we're not exchanging secret data so this is ok
#if os.getenv('TRAVIS'):
# verify = False
req = requests.get(url, verify=verify)
except requests.exceptions.RequestException as _:
die(_)
log.debug("response: %s %s", req.status_code, req.reason)
log.debug("content:\n%s\n%s\n%s", '='*80, req.content.strip(), '='*80)
if req.status_code != 200:
die("%s %s" % (req.status_code, req.reason))
if not isJson(req.content):
die('invalid non-JSON response from DockerHub!')
if log.isEnabledFor(logging.DEBUG):
print(jsonpp(req.content))
print('='*80)
tag_list = []
try:
j = json.loads(req.content)
tag_list = [_['name'] for _ in j['results']]
# could perhaps stack overflow in some scenario
# not as functional programming 'cool' but will do own tail recursion and just while loop instead
#if 'next' in j and j['next']:
# tag_list += self.query(j['next'])
return (tag_list, j['next'])
except KeyError as _:
die('failed to parse output from DockerHub (format may have changed?): {0}'.format(_))
if __name__ == '__main__':
DockerHubTags().main()
|
[
"harisekhon@gmail.com"
] |
harisekhon@gmail.com
|
e3db1b132f2bd7bbf515311c73de37ae081a5770
|
f8580d2c963b6a3c34e918e0743d0a503a9584bd
|
/etg/unfinished/choicebk.py
|
3ed5657f87283336a95d87df56beb9f6b2922919
|
[] |
no_license
|
pypy/wxpython-cffi
|
f59c3faeed26e6a26d0c87f4f659f93e5366af28
|
877b7e6c1b5880517456f1960db370e4bb7f5c90
|
refs/heads/master
| 2023-07-08T21:13:22.765786
| 2016-12-02T22:10:45
| 2016-12-02T22:10:45
| 397,124,697
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,023
|
py
|
#---------------------------------------------------------------------------
# Name: etg/choicebk.py
# Author: Robin Dunn
#
# Created: 18-Jun-2012
# Copyright: (c) 2013 by Total Control Software
# License: wxWindows License
#---------------------------------------------------------------------------
import etgtools
import etgtools.tweaker_tools as tools
PACKAGE = "wx"
MODULE = "_core"
NAME = "choicebk" # Base name of the file to generate to for this script
DOCSTRING = ""
# The classes and/or the basename of the Doxygen XML files to be processed by
# this script.
ITEMS = [ "wxChoicebook",
]
#---------------------------------------------------------------------------
def run():
# Parse the XML file(s) building a collection of Extractor objects
module = etgtools.ModuleDef(PACKAGE, MODULE, NAME, DOCSTRING)
etgtools.parseDoxyXML(module, ITEMS)
#-----------------------------------------------------------------
# Tweak the parsed meta objects in the module object as needed for
# customizing the generated code and docstrings.
module.addHeaderCode('#include <wx/choicebk.h>')
c = module.find('wxChoicebook')
assert isinstance(c, etgtools.ClassDef)
tools.fixWindowClass(c)
tools.fixBookctrlClass(c)
module.addPyCode("""\
EVT_CHOICEBOOK_PAGE_CHANGED = wx.PyEventBinder( wxEVT_CHOICEBOOK_PAGE_CHANGED, 1 )
EVT_CHOICEBOOK_PAGE_CHANGING = wx.PyEventBinder( wxEVT_CHOICEBOOK_PAGE_CHANGING, 1 )
# deprecated wxEVT aliases
wxEVT_COMMAND_CHOICEBOOK_PAGE_CHANGED = wxEVT_CHOICEBOOK_PAGE_CHANGED
wxEVT_COMMAND_CHOICEBOOK_PAGE_CHANGING = wxEVT_CHOICEBOOK_PAGE_CHANGING
""")
#-----------------------------------------------------------------
tools.doCommonTweaks(module)
tools.runGenerators(module)
#---------------------------------------------------------------------------
if __name__ == '__main__':
run()
|
[
"wayedt@gmail.com"
] |
wayedt@gmail.com
|
81e6205099f506d87d51c23755b296cd247d02f8
|
2cafc4981f85e9a25cceb18af1e936e19268e0ee
|
/scapy_icmp_discovery.py
|
1121df8df3afe58afbfc56b36a3501956c738cca
|
[] |
no_license
|
lapinrepository/ethicalhacking
|
fdd0647bffeb87544ede182eb62544ee922579fd
|
14fac0bee8ca5f58c5499e4e91323e005a5e6c25
|
refs/heads/master
| 2021-10-09T15:14:29.976534
| 2018-12-30T09:30:19
| 2018-12-30T09:30:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,207
|
py
|
#!/usr/bin/python
import logging
import subprocess
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
import threading
screenlock = threading.Semaphore(value=1)
def icmpscan(prefix, addr):
try:
answer = sr1(IP(dst=prefix + str(addr))/ICMP(),timeout=1, verbose=0)
screenlock.acquire()
if answer == None:
pass
else:
print("[+] Host " + prefix + str(addr) + " is alive")
except:
pass
finally:
screenlock.release()
if len(sys.argv) != 2:
print("Usage scapy_icmp_discovery.py [interface]")
print("Example: scapy_icmp_discovery.py eth0")
sys.exit()
interface = str(sys.argv[1])
ip = subprocess.check_output("ifconfig " + interface + " | grep 'inet' | cut -d ' ' -f 1 | cut -d 'n' -f 2 | cut -d ' ' -f 2", shell=True).strip()
prefix = ip.split('.')[0] + '.' + ip.split('.')[1] + '.' + ip.split('.')[2] + '.'
reply_ip = list()
for addr in range(0, 254):
t = threading.Thread(target = icmpscan, args=(prefix, addr))
t.start()
#for addr in range(0,254):#
# answer = sr1(IP(dst=prefix + str(addr)) / ICMP(), timeout=1, verbose=0)
# if answer == None:
# pass
# else:
# reply_ip.append(prefix + str(addr))
#
#for elt in reply_ip:
# print(elt)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
b67e84a4c5fdfc6733b4df7afb9f05a1515a26f9
|
86d9b76692c9fecd26ca3a5bd358119b2f3697dd
|
/set/change-set.py
|
e3e2f23ea9cd5507d864419915c48b61241c174d
|
[] |
no_license
|
khanhnt99/Pythonbook
|
ec7fa2ff3495ba07e838c9e910aa0758e8f7ae94
|
850e259e6293413e951c77e448d95309dd4f4fad
|
refs/heads/master
| 2022-12-19T09:14:06.193040
| 2020-09-23T10:11:24
| 2020-09-23T10:11:24
| 288,210,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
'''
set la tap hop cac phan tu khong co thu tu nen index khong co y nghia'
'''
my_set={1,3}
print(my_set)
my_set.add(2)
print(my_set)
my_set.update([2,3,4])
print(my_set)
my_set.update([4,5],{1,6,8})
print(my_set)
|
[
"you@example.com"
] |
you@example.com
|
543aa44ea9776960042b1a781a575c8e48fc9ae6
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stdlib/mmap.pyi
|
8dbec2388838751a229a711f169e2eb9208a238a
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 3,766
|
pyi
|
import sys
from _typeshed import ReadableBuffer, Self
from collections.abc import Iterable, Iterator, Sized
from typing import NoReturn, overload
ACCESS_DEFAULT: int
ACCESS_READ: int
ACCESS_WRITE: int
ACCESS_COPY: int
ALLOCATIONGRANULARITY: int
if sys.platform == "linux":
MAP_DENYWRITE: int
MAP_EXECUTABLE: int
if sys.version_info >= (3, 10):
MAP_POPULATE: int
if sys.platform != "win32":
MAP_ANON: int
MAP_ANONYMOUS: int
MAP_PRIVATE: int
MAP_SHARED: int
PROT_EXEC: int
PROT_READ: int
PROT_WRITE: int
PAGESIZE: int
class mmap(Iterable[int], Sized):
if sys.platform == "win32":
def __init__(self, fileno: int, length: int, tagname: str | None = ..., access: int = ..., offset: int = ...) -> None: ...
else:
def __init__(
self, fileno: int, length: int, flags: int = ..., prot: int = ..., access: int = ..., offset: int = ...
) -> None: ...
def close(self) -> None: ...
if sys.version_info >= (3, 8):
def flush(self, offset: int = ..., size: int = ...) -> None: ...
else:
def flush(self, offset: int = ..., size: int = ...) -> int: ...
def move(self, dest: int, src: int, count: int) -> None: ...
def read_byte(self) -> int: ...
def readline(self) -> bytes: ...
def resize(self, newsize: int) -> None: ...
def seek(self, pos: int, whence: int = ...) -> None: ...
def size(self) -> int: ...
def tell(self) -> int: ...
def write_byte(self, byte: int) -> None: ...
def __len__(self) -> int: ...
closed: bool
if sys.version_info >= (3, 8) and sys.platform != "win32":
def madvise(self, option: int, start: int = ..., length: int = ...) -> None: ...
def find(self, sub: ReadableBuffer, start: int = ..., stop: int = ...) -> int: ...
def rfind(self, sub: ReadableBuffer, start: int = ..., stop: int = ...) -> int: ...
def read(self, n: int | None = ...) -> bytes: ...
def write(self, bytes: ReadableBuffer) -> int: ...
@overload
def __getitem__(self, __index: int) -> int: ...
@overload
def __getitem__(self, __index: slice) -> bytes: ...
def __delitem__(self, __index: int | slice) -> NoReturn: ...
@overload
def __setitem__(self, __index: int, __object: int) -> None: ...
@overload
def __setitem__(self, __index: slice, __object: ReadableBuffer) -> None: ...
# Doesn't actually exist, but the object is actually iterable because it has __getitem__ and
# __len__, so we claim that there is also an __iter__ to help type checkers.
def __iter__(self) -> Iterator[int]: ...
def __enter__(self: Self) -> Self: ...
def __exit__(self, *args: object) -> None: ...
if sys.version_info >= (3, 8) and sys.platform != "win32":
MADV_NORMAL: int
MADV_RANDOM: int
MADV_SEQUENTIAL: int
MADV_WILLNEED: int
MADV_DONTNEED: int
MADV_FREE: int
if sys.platform == "linux":
MADV_REMOVE: int
MADV_DONTFORK: int
MADV_DOFORK: int
MADV_HWPOISON: int
MADV_MERGEABLE: int
MADV_UNMERGEABLE: int
# Seems like this constant is not defined in glibc.
# See https://github.com/python/typeshed/pull/5360 for details
# MADV_SOFT_OFFLINE: int
MADV_HUGEPAGE: int
MADV_NOHUGEPAGE: int
MADV_DONTDUMP: int
MADV_DODUMP: int
# This Values are defined for FreeBSD but type checkers do not support conditions for these
if sys.platform != "linux" and sys.platform != "darwin":
MADV_NOSYNC: int
MADV_AUTOSYNC: int
MADV_NOCORE: int
MADV_CORE: int
MADV_PROTECT: int
if sys.version_info >= (3, 10) and sys.platform == "darwin":
MADV_FREE_REUSABLE: int
MADV_FREE_REUSE: int
|
[
"intellij-monorepo-bot-no-reply@jetbrains.com"
] |
intellij-monorepo-bot-no-reply@jetbrains.com
|
ba47a3b2b1b54854c914b8d55705e91c8cecca60
|
1a12cc54ac1b2934cddf12eb733d5a8c25a0bf6d
|
/interviewbit-trees-sum-root-to-leaf-numbers.py
|
f36de034600560e22fc2107d9968d6e6512945bb
|
[] |
no_license
|
sbairishal/CodePath-Alumni-Professional-Interview-Prep-Course
|
5813b95cb4c23551d06e74a3aaec6ae7815923ae
|
71d8f1e7a456a0f97655e7be90fa17fe18ceaf95
|
refs/heads/master
| 2020-10-01T01:53:29.246652
| 2018-02-21T06:05:57
| 2018-02-21T06:05:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 714
|
py
|
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param A : root node of tree
# @return an integer
def do_the_summing(self, node):
if not node.left and not node.right:
return node.val
else:
if node.left:
node.left.val += node.val * 10
if node.right:
node.right.val += node.val * 10
return (self.do_the_summing(node.left) if node.left else 0) + (self.do_the_summing(node.right) if node.right else 0)
def sumNumbers(self, A):
return self.do_the_summing(A) % 1003
|
[
"tachyon@tachyonlabs.com"
] |
tachyon@tachyonlabs.com
|
9a8428b2135de28dbcd0c266e17fd55c71c38ea9
|
1e67e211123f694bd807e1efb2a85a8cbdae2882
|
/server/printer/urls.py
|
d55cad5aa7bfa09f168e28197e0ebb4d088820ef
|
[
"MIT"
] |
permissive
|
coll-gate/collgate
|
7590ec8dbc7cdb310d0c8452fd6c6e76cf02985d
|
8c2ff1c59adda2bf318040f588c05263317a2812
|
refs/heads/master
| 2021-01-20T03:00:35.617958
| 2019-03-01T16:46:49
| 2019-03-01T16:46:49
| 89,474,611
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
# -*- coding: utf-8; -*-
#
# @file urls.py
# @brief collgate
# @author Frédéric SCHERMA (INRA UMR1095)
# @date 2018-09-20
# @copyright Copyright (c) 2018 INRA/CIRAD
# @license MIT (see LICENSE file)
# @details coll-gate printer module url entry point.
from django.conf.urls import include, url
urlpatterns = [
]
|
[
"frederic.scherma@gmail.com"
] |
frederic.scherma@gmail.com
|
aca98bf3d119fcd3caacbf7ae0055107e799ef81
|
e97c25c2e68fbe99b138c0d8c9fff6f17e8860de
|
/Architecture1/Multiple/Main2.py
|
63c27c81615a3b67b55f3f77ac87c5cc46fd80ac
|
[] |
no_license
|
brycexu/Binarized_Neural_Networks_with_Learned_Pooling-Strategy
|
24ab124316458c775011e525d038440f61eccfb1
|
4bdf8e9cdd1f7e17a256bf6efddc874e88e8d4a4
|
refs/heads/master
| 2020-07-25T14:29:05.228684
| 2019-12-23T11:17:52
| 2019-12-23T11:17:52
| 208,323,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,688
|
py
|
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import Model as model
from torch.autograd import Variable
import time
from Logger import Logger
import numpy as np
import matplotlib.pyplot as plt
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
best_acc = 0
start_epoch = 0
logger = Logger('./logs2')
Train_Loss = []
Test_Loss = []
Train_Accuracy = []
Test_Accuracy = []
# Dataset
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
trainset = torchvision.datasets.CIFAR10(root='/export/livia/data/xxu/CIFAR10', train=True, download=False, transform=transform_train)
# /home/AN96120/brycexu/CIFAR10
# /export/livia/data/xxu/CIFAR10
trainset, valset = torch.utils.data.random_split(trainset, [40000, 10000])
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=0)
valloader = torch.utils.data.DataLoader(valset, batch_size=128, shuffle=True, num_workers=0)
testset = torchvision.datasets.CIFAR10(root='/export/livia/data/xxu/CIFAR10', train=False, download=False, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=0)
# Model
print('==> Building model..')
model = model.MutipleBNN()
model = nn.DataParallel(model)
model = model.to(device)
criterion = nn.CrossEntropyLoss(reduction='mean')
optimizer1 = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
optimizer2 = torch.optim.Adam(model.parameters(), lr=0.0001, betas=(0.5, 0.999), weight_decay=0.0003)
def update_lr(optimizer):
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * 0.1
# Training
def train(epoch):
global Train_Loss, Train_Accuracy
print('\nEpoch: %d' % epoch)
model.train()
train_loss = 0
correct = 0
total = 0
for name, param in model.named_parameters():
if name == 'module.convolutions.0.alpha' or name == 'module.convolutions.1.alpha' \
or name == 'module.convolutions.2.alpha' or name == 'module.convolutions.3.alpha' \
or name == 'module.convolutions.4.alpha' or name == 'module.convolutions.5.alpha' \
or name == 'module.convolutions.6.alpha' or name == 'module.convolutions.7.alpha' \
or name == 'module.convolutions.8.alpha' or name == 'module.convolutions.9.alpha':
param.requires_grad = False
else:
param.requires_grad = True
start = time.time()
for batch_index, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
inputs = Variable(inputs)
targets = Variable(targets)
# Forward
outputs = model(inputs)
loss = criterion(outputs, targets)
# Backward and Optimize
optimizer1.zero_grad()
loss.backward()
for p in list(model.parameters()):
if hasattr(p, 'org'):
p.data.copy_(p.org)
optimizer1.step()
for p in list(model.parameters()):
if hasattr(p, 'org'):
p.org.copy_(p.data.clamp_(-1, 1))
# Results
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
train_loss = train_loss / (40000 / 128)
end = time.time()
print('Training Time: %.1f' % (end - start))
print('Loss: %.3f | Accuracy: %.3f' % (train_loss, 100. * correct / total))
# Plot the model
info = {'train_loss': train_loss}
for tag, value in info.items():
logger.scalar_summary(tag, value, epoch + 1)
Train_Loss.append(train_loss)
Train_Accuracy.append(100. * correct / total)
# Update lr
if epoch == 200 or epoch == 400 or epoch == 600:
update_lr(optimizer1)
def val(epoch):
model.train()
val_loss = 0
correct = 0
total = 0
for name, param in model.named_parameters():
if name == 'module.convolutions.0.alpha' or name == 'module.convolutions.1.alpha' \
or name == 'module.convolutions.2.alpha' or name == 'module.convolutions.3.alpha' \
or name == 'module.convolutions.4.alpha' or name == 'module.convolutions.5.alpha' \
or name == 'module.convolutions.6.alpha' or name == 'module.convolutions.7.alpha' \
or name == 'module.convolutions.8.alpha' or name == 'module.convolutions.9.alpha':
param.requires_grad = True
else:
param.requires_grad = False
start = time.time()
for batch_index, (inputs, targets) in enumerate(valloader):
inputs, targets = inputs.to(device), targets.to(device)
inputs = Variable(inputs)
targets = Variable(targets)
# Forward
outputs = model(inputs)
loss = criterion(outputs, targets)
# Backward and Optimize
optimizer2.zero_grad()
loss.backward()
for p in list(model.parameters()):
if hasattr(p, 'org'):
p.data.copy_(p.org)
optimizer2.step()
# Results
val_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
end = time.time()
print('Validating Time: %.1f' % (end - start))
print('Accuracy: %.3f' % (100. * correct / total))
def test(epoch):
global best_acc, Test_Loss, Test_Accuracy
model.eval()
test_loss = 0
correct = 0
total = 0
start = time.time()
with torch.no_grad():
for batch_index, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
inputs = Variable(inputs)
targets = Variable(targets)
# Forward
outputs = model(inputs)
loss = criterion(outputs, targets)
# Results
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
test_loss = test_loss / (10000 / 100)
end = time.time()
print('Testing Time: %1.f' % (end - start))
print('Loss: %.3f | Accuracy: %.3f' % (test_loss, 100. * correct / total))
# Save the model
acc = 100. * correct / total
if acc > best_acc:
best_acc = acc
# Plot the model
info = {'test_loss': test_loss, 'test_accuracy': acc}
for tag, value in info.items():
logger.scalar_summary(tag, value, epoch + 1)
Test_Loss.append(test_loss)
Test_Accuracy.append(100. * correct / total)
epochs = 800
for epoch in range(start_epoch, start_epoch + epochs):
train(epoch)
val(epoch)
test(epoch)
x1 = np.arange(0, epochs)
y10 = Train_Loss
y11 = Test_Loss
x2 = np.arange(0, epochs)
y20 = Train_Accuracy
y21 = Test_Accuracy
plt.subplot(2, 1, 1)
plt.title('Arch1_Multiple')
plt.plot(x1, y10, 'o-', color='b', label='Train_Loss')
plt.plot(x1, y11, 'o-', color='g', label='Test_Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.subplot(2, 1, 2)
plt.plot(x2, y20, 'o-', color='k', label='Train_Acc')
plt.plot(x2, y21, 'o-', color='r', label='Test_Acc')
plt.xlabel('Epochs')
plt.ylabel('Acc')
plt.legend()
plt.tight_layout()
plt.savefig("Result2.jpg")
plt.show()
|
[
"XuXianda@Bryce-Xus-Mac-Pro.local"
] |
XuXianda@Bryce-Xus-Mac-Pro.local
|
556f566fcc8294adf4fa2c33c29d7178f2a6f529
|
d0600d512b805c16269faf8e398ccd61aa04a724
|
/supervised_learning/regularization/logisteic_regression_with_regularization.py
|
125be52f46c5509ab55c1fbbf955002d823e9c74
|
[] |
no_license
|
iamMHZ/My-ML-and-DL-experiments
|
1ef16b983f46d8440a15019c4cc597cc98f3a0ac
|
4b2d1b1e3a9432c0a88796e9c1c489e42509a2e4
|
refs/heads/master
| 2021-07-11T07:45:12.235042
| 2021-03-12T09:37:37
| 2021-03-12T09:37:37
| 237,808,688
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,732
|
py
|
"""
implementation of Logistic regression with L2 regularization
"""
import matplotlib.pyplot as plt
import numpy as np
def load_data():
# https://archive.ics.uci.edu/ml/datasets/Haberman's+Survival
data_file = np.genfromtxt('../../utils/datasets/supervised dataset/haberman.txt', delimiter=',')
X = data_file[:, :2]
y = data_file[:, 3]
# labels are 1 (survived) and 2 (died)
# change 2 to 0
y[y == 2] = 0
return X, y
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compute_loss(y_true, y_pred, weights, landa):
# calculate loss
epoch_loss = (-y_true * np.log(y_pred)) - ((1 - y_true) * np.log(1 - y_pred))
epoch_loss = np.sum(epoch_loss)
# add L2 regularization
# W.T@W = sum(W^2)
epoch_loss += 0.5 * landa * (np.matmul(weights.T, weights)[0])
# No regularization on the bias so cancel it
epoch_loss -= weights[0]**2
# Replace NaN with zero and infinity with large finite number
# because the -log(x) and -log(1-x) have the tendency to return NaN or INF so we need to make it a number
# making sure that the over all loss does not become INF
epoch_loss = np.nan_to_num(epoch_loss)
return epoch_loss
def compute_gradients(X, y_true, y_pred, weights, landa):
error = y_pred - y_true
# compute gradients
gradients = np.matmul(X.T, error)
# the regularization derivative too
gradients = gradients + (landa * weights)
# Dont apply regularization on the bias so, cancel it
gradients[0] -= landa * weights[0]
return gradients
def fit(X, y, learning_rate=0.0001, epochs=100, landa=0.01):
# initialize the weights
weights = np.random.random((X.shape[1], 1))
losses = []
for i in range(epochs):
# make a prediction
y_pred = sigmoid(np.matmul(X, weights))
epoch_loss = compute_loss(y, y_pred, weights, landa)
# update the wights
gradients = compute_gradients(X, y, y_pred, weights, landa)
weights += -learning_rate * gradients
print(f'Epoch = {i} , Loss = {epoch_loss}')
losses.append(epoch_loss)
# plot the training loss
plt.plot(np.arange(0, epochs), losses)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.show()
print('Weights: ' + str(weights))
if __name__ == '__main__':
#X, y = load_data()
from sklearn.datasets import make_blobs
X, y = make_blobs(n_samples=1000, n_features=2, centers=2, random_state=14)
# add a column for the bias (bias trick) ==> everything is vectorized
ones_column = np.ones((X.shape[0], 1), np.float)
X = np.append(ones_column, X, axis=1)
y = y.reshape(y.shape[0], 1)
fit(X, y, learning_rate=0.0001, epochs=100, landa=0)
|
[
"iammhz77@gmail.com"
] |
iammhz77@gmail.com
|
7e471b82d7e87330b02b3ceec2b761f1e46f40d2
|
af101b467134e10270bb72d02f41f07daa7f57d8
|
/mmagic/models/editors/eg3d/__init__.py
|
26ae196273404bdf947578e49c4fcc92287f06b4
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmagic
|
4d864853417db300de4dfe7e83ce380fd1557a23
|
a382f143c0fd20d227e1e5524831ba26a568190d
|
refs/heads/main
| 2023-08-31T14:40:24.936423
| 2023-08-30T05:05:56
| 2023-08-30T05:05:56
| 203,999,962
| 1,370
| 192
|
Apache-2.0
| 2023-09-14T11:39:18
| 2019-08-23T13:04:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 324
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .camera import GaussianCamera, UniformCamera
from .dual_discriminator import DualDiscriminator
from .eg3d import EG3D
from .eg3d_generator import TriplaneGenerator
__all__ = [
'DualDiscriminator', 'TriplaneGenerator', 'EG3D', 'UniformCamera',
'GaussianCamera'
]
|
[
"noreply@github.com"
] |
open-mmlab.noreply@github.com
|
2da4f79b8bd8154cc469bc70d68325c0ad501612
|
eb02ead830632738f9723ba14c495e50a3bbf1a2
|
/rohdeschwarz/bin/osp.py
|
65a6ed5bc7f32ee04cf9a3fc583e3eb04f82871c
|
[] |
no_license
|
hgrf/rohdeschwarz
|
53461fe2f38156448aa6d2eecdee93c0ff3c77c2
|
20ccf128f8bb1a35b83e803e7d5e742046548ee8
|
refs/heads/master
| 2023-03-19T15:57:49.175428
| 2019-08-23T14:52:57
| 2019-08-23T14:52:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,678
|
py
|
from rohdeschwarz.instruments.ospswitch import OspSwitch
from ruamel import yaml
import argparse
import code
import datetime
import os
import sys
def main():
parser = argparse.ArgumentParser(description='Connect to a Rohde & Schwarz OSP Switch')
parser.add_argument('--visa', metavar='bus', default=False,
help="use VISA with 'bus'")
parser.add_argument('--address', default='127.0.0.1',
help='instrument address')
parser.add_argument('--port', default=5025, type=int,
help='port (TCP only)')
parser.add_argument('--timeout', default=5000, type=int,
help='default instrument timeout (ms)')
parser.add_argument('--driver')
parser.add_argument('--log', default='',
help='SCPI command log filename')
parser.add_argument('--log-to-stdout', action='store_true',
help='print all SCPI IO to stdout')
args = parser.parse_args()
if args.log and args.log_to_stdout:
print('error: cannot use both --log and --log-to-stdout')
parser.print_help()
if not args.driver:
print('Switch matrix driver is required')
parser.print_help()
sys.exit(0)
switch_dict = {}
try:
with open(args.driver, 'r') as f:
switch_dict = yaml.safe_load(f.read())
assert switch_dict
except:
print('Could not read driver file')
sys.exit(0)
osp = OspSwitch(switch_dict)
try:
if args.visa:
osp.open(args.visa, args.address)
else:
osp.open_tcp(args.address, args.port)
if args.timeout:
osp.timeout_ms = args.timeout
if osp.connected():
print("connected: {0}".format(osp.id_string()))
if args.log:
osp.open_log(args.log)
osp.log.write('{0}\n'.format(datetime.datetime.now()))
osp.log.write('--------------------------\n\n')
osp.print_info()
elif args.log_to_stdout:
vna.log = sys.stdout
code.interact('', local=locals())
else:
print('Could not connect to instrument\n')
parser.print_help()
except FileNotFoundError:
print('Could not find driver')
parser.print_help()
except SystemExit:
pass
except:
raise Exception('Error connecting to instrument')
parser.print_help()
finally:
if osp.log:
osp.close_log()
if osp.connected():
osp.close()
if __name__ == "__main__":
main()
sys.exit(0)
|
[
"nick.lalic@gmail.com"
] |
nick.lalic@gmail.com
|
fa00adef42307ed4652d9211eee22ed83e31fabb
|
adb295bf248ded84d2c126d73c58b570af440dc6
|
/markers/requires.py
|
20d9b626d21fb44e65878609473a70cd685a19f6
|
[] |
no_license
|
sshveta/cfme_tests
|
eaeaf0076e87dd6c2c960887b242cb435cab5151
|
51bb86fda7d897e90444a6a0380a5aa2c61be6ff
|
refs/heads/master
| 2021-03-30T22:30:12.476326
| 2017-04-26T22:47:25
| 2017-04-26T22:47:25
| 17,754,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,064
|
py
|
"""requires_test(test_name_or_nodeid): Mark a test as requiring another test
If another test is required to have run and passed before a suite of tests has
any hope of succeeding, such as a smoke test, apply this mark to those tests.
It takes a test name as the only positional argument. In the event that the
test name is ambiguous, a full py.test nodeid can be used. A test's nodeid can
be found by inspecting the request.node.nodeid attribute inside the required
test item.
"""
import pytest
_no_mark_arg_err = '%s mark required test name or nodeid as first argument'
def pytest_configure(config):
config.addinivalue_line("markers", __doc__)
def _find_test_in_reports(test_id, reports):
# nodeids end with the test name, so the description of this mark
# oversimplifies things a little bit. The actual check for a test
# match is that any preceding test nodeid ends with the arg passed
# to the mark, so we can easily match the test name, test nodeid, and
# anything in between.
return any([report.nodeid.endswith(test_id) for report in reports])
def pytest_runtest_setup(item):
mark = 'requires_test'
if mark not in item.keywords:
# mark wasn't invoked, short out
return
else:
try:
test_id = item.keywords[mark].args[0]
except IndexError:
# mark called incorrectly, explode
raise Exception(_no_mark_arg_err % mark)
reporter = item.config.pluginmanager.getplugin('terminalreporter')
passed = reporter.stats.get('passed', [])
failed = reporter.stats.get('failed', [])
skipped = reporter.stats.get('skipped', [])
if _find_test_in_reports(test_id, passed):
# Required test passed, short out
return
if _find_test_in_reports(test_id, failed):
error_verb = 'failed'
elif _find_test_in_reports(test_id, skipped):
error_verb = 'was skipped'
else:
error_verb = 'not yet run or does not exist'
errmsg = 'required test %s %s' % (test_id, error_verb)
pytest.skip(errmsg)
|
[
"sean.myers@redhat.com"
] |
sean.myers@redhat.com
|
519dadf3442f8806b441942ada5df14466a94155
|
8b9e9de996cedd31561c14238fe655c202692c39
|
/tree/leetcode_Implement_Trie_Prefix_Tree.py
|
3e7a8a4a2d448deaec1eb29454d24f582767a17c
|
[] |
no_license
|
monkeylyf/interviewjam
|
0049bc1d79e6ae88ca6d746b05d07b9e65bc9983
|
33c623f226981942780751554f0593f2c71cf458
|
refs/heads/master
| 2021-07-20T18:25:37.537856
| 2021-02-19T03:26:16
| 2021-02-19T03:26:16
| 6,741,986
| 59
| 31
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,041
|
py
|
"""Implement trie prefix tree
leetcode
Implement a trie with insert, search, and startsWith methods.
"""
class TrieNode(object):
__slots__ = ('val', 'end', 'children')
def __init__(self):
"""
Initialize your data structure here.
"""
self.val = None
self.end = False
self.children = {}
class Trie(object):
def __init__(self):
""""""
self.root = TrieNode()
self.root.val = ''
def insert(self, word):
"""
Inserts a word into the trie.
:type word: str
:rtype: void
"""
node = self.root
for char in word:
try:
child = node.children[char]
except KeyError:
child = TrieNode()
child.val = char
node.children[char] = child
node = child
node.end = True
def _end_node(self, string):
"""Return the last node given string path.
:param string: str
"""
node = self.root
for char in string:
node = node.children[char]
return node
def search(self, word):
"""Returns if the word is in the trie.
:type word: str
:rtype: bool
"""
try:
return self._end_node(word).end
except KeyError:
return False
def startsWith(self, prefix):
"""Returns if there is any word starts with the given prefix.
:type prefix: str
:rtype: bool
"""
try:
self._end_node(prefix)
return True
except KeyError:
return False
def main():
trie = Trie()
trie.insert("somestring")
assert trie.search("somestring")
assert not trie.search("some")
assert trie.startsWith("some")
trie.insert('app')
assert trie.search("app")
trie.insert('apps')
assert trie.search("app")
trie.insert('apple')
assert trie.search("app")
if __name__ == '__main__':
main()
|
[
"laituan1986@gmail.com"
] |
laituan1986@gmail.com
|
7d6fe4baf49c9678097573db683c0a29bb674790
|
d2616d89ae8d228ecb7c122f76e7754628674d3c
|
/CircuitPython_Libraries/adafruit-circuitpython-bundle-4.x-mpy-20200114/examples/lsm6dsox_simpletest.py
|
68328396384a30b59583e3108c8057b0163306ae
|
[] |
no_license
|
simsoon27/Microcontrollers
|
f95761ca081eefc2913068712dd4609bb02f01f7
|
3615ccefe4e649560d26b0a937dd583008dfee54
|
refs/heads/master
| 2023-06-18T02:11:35.016222
| 2021-07-16T18:18:35
| 2021-07-16T18:18:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
import time
import board
import busio
import adafruit_lsm6dsox
i2c = busio.I2C(board.SCL, board.SDA)
sox = adafruit_lsm6dsox.LSM6DSOX(i2c)
while True:
print("Acceleration: X:%.2f, Y: %.2f, Z: %.2f m/s^2"%(sox.acceleration))
print("Gyro X:%.2f, Y: %.2f, Z: %.2f degrees/s"%(sox.gyro))
print("")
time.sleep(0.5)
|
[
"cmontalvo251@gmail.com"
] |
cmontalvo251@gmail.com
|
8cb52db936784a8b9eacb4f885340210e48e38ce
|
8e954507f612cb375dc55ed7f90896dea131af1b
|
/test/SConsGnu/GVars/GVarDecls/sconstest-gvardecls3.py
|
2526ad653d3af0710efe01d263a881bd21f9968b
|
[
"MIT"
] |
permissive
|
ptomulik/scons-gnu-build
|
8c97ab397b67f58713e95c341608b91fb9c68e00
|
9c46908eed50679d7aaaaf472e324c97545ac837
|
refs/heads/master
| 2021-01-18T20:30:00.986201
| 2015-10-19T23:48:50
| 2015-10-20T10:58:57
| 6,583,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,030
|
py
|
#
# Copyright (c) 2012-2014 by Pawel Tomulik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
__docformat__ = "restructuredText"
"""
Tests declaring variables with SConsGnu.GVar.GVarDecls() factory method.
"""
import TestSCons
##############################################################################
# GVarDecls(): Test 3 - using bare arguments instead of instances of _GVarDecl
##############################################################################
test = TestSCons.TestSCons()
test.dir_fixture('../../../../SConsGnu', 'site_scons/SConsGnu')
test.write('SConstruct',
"""
# SConstruct
from SConsGnu.GVars import GVarDecls, ENV, VAR, OPT
x = ( {'env_x' : 'env x default'}, ('var_x', None, 'var x default'), ('-x', {'dest' : 'opt_x', 'default' : 'opt x default'}) )
y = [ {'env_y' : 'env y default'}, ('var_y', None, 'var y default'), ('-y', {'dest' : 'opt_y', 'default' : 'opt y default'}) ]
list = []
list.append( GVarDecls(x = x, y = y) )
list.append( GVarDecls({'x' : x, 'y' : y}) )
i = 0
for v in list:
for c in ['x', 'y']:
print "GVARS[%d][%r].has_xxx_decl(ENV): %r" % (i, c, v[c].has_xxx_decl(ENV))
print "GVARS[%d][%r].has_xxx_decl(VAR): %r" % (i, c, v[c].has_xxx_decl(VAR))
print "GVARS[%d][%r].has_xxx_decl(OPT): %r" % (i, c, v[c].has_xxx_decl(OPT))
print "GVARS[%d][%r].get_xxx_key(ENV): %r" % (i, c, v[c].get_xxx_key(ENV))
print "GVARS[%d][%r].get_xxx_key(VAR): %r" % (i, c, v[c].get_xxx_key(VAR))
print "GVARS[%d][%r].get_xxx_key(OPT): %r" % (i, c, v[c].get_xxx_key(OPT))
print "GVARS[%d][%r].get_xxx_default(ENV): %r" % (i, c, v[c].get_xxx_default(ENV))
print "GVARS[%d][%r].get_xxx_default(VAR): %r" % (i, c, v[c].get_xxx_default(VAR))
print "GVARS[%d][%r].get_xxx_default(OPT): %r" % (i, c, v[c].get_xxx_default(OPT))
i += 1
""")
test.run()
lines = [
"GVARS[0]['x'].has_xxx_decl(ENV): True",
"GVARS[0]['x'].has_xxx_decl(VAR): True",
"GVARS[0]['x'].has_xxx_decl(OPT): True",
"GVARS[0]['x'].get_xxx_key(ENV): 'env_x'",
"GVARS[0]['x'].get_xxx_key(VAR): 'var_x'",
"GVARS[0]['x'].get_xxx_key(OPT): 'opt_x'",
"GVARS[0]['x'].get_xxx_default(ENV): 'env x default'",
"GVARS[0]['x'].get_xxx_default(VAR): 'var x default'",
"GVARS[0]['x'].get_xxx_default(OPT): 'opt x default'",
"GVARS[0]['y'].has_xxx_decl(ENV): True",
"GVARS[0]['y'].has_xxx_decl(VAR): True",
"GVARS[0]['y'].has_xxx_decl(OPT): True",
"GVARS[0]['y'].get_xxx_key(ENV): 'env_y'",
"GVARS[0]['y'].get_xxx_key(VAR): 'var_y'",
"GVARS[0]['y'].get_xxx_key(OPT): 'opt_y'",
"GVARS[0]['y'].get_xxx_default(ENV): 'env y default'",
"GVARS[0]['y'].get_xxx_default(VAR): 'var y default'",
"GVARS[0]['y'].get_xxx_default(OPT): 'opt y default'",
"GVARS[1]['x'].has_xxx_decl(ENV): True",
"GVARS[1]['x'].has_xxx_decl(VAR): True",
"GVARS[1]['x'].has_xxx_decl(OPT): True",
"GVARS[1]['x'].get_xxx_key(ENV): 'env_x'",
"GVARS[1]['x'].get_xxx_key(VAR): 'var_x'",
"GVARS[1]['x'].get_xxx_key(OPT): 'opt_x'",
"GVARS[1]['x'].get_xxx_default(ENV): 'env x default'",
"GVARS[1]['x'].get_xxx_default(VAR): 'var x default'",
"GVARS[1]['x'].get_xxx_default(OPT): 'opt x default'",
"GVARS[1]['y'].has_xxx_decl(ENV): True",
"GVARS[1]['y'].has_xxx_decl(VAR): True",
"GVARS[1]['y'].has_xxx_decl(OPT): True",
"GVARS[1]['y'].get_xxx_key(ENV): 'env_y'",
"GVARS[1]['y'].get_xxx_key(VAR): 'var_y'",
"GVARS[1]['y'].get_xxx_key(OPT): 'opt_y'",
"GVARS[1]['y'].get_xxx_default(ENV): 'env y default'",
"GVARS[1]['y'].get_xxx_default(VAR): 'var y default'",
"GVARS[1]['y'].get_xxx_default(OPT): 'opt y default'",
]
test.must_contain_all_lines(test.stdout(), lines)
test.pass_test()
# Local Variables:
# # tab-width:4
# # indent-tabs-mode:nil
# # End:
# vim: set syntax=python expandtab tabstop=4 shiftwidth=4:
|
[
"ptomulik@meil.pw.edu.pl"
] |
ptomulik@meil.pw.edu.pl
|
237d6cee2a3d139ef9b4113e6049d2bd85c8819c
|
9510ff6d4df1a21cbd7abe66301f890ccd519714
|
/captain/__init__.py
|
c7f41218ea4b7ca1e9e1b8be6d7ea467728c0dd1
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
stmkza/arposandra
|
37ea85d62411ba2fe9a14b664672eb081efde451
|
e76f9886f98b3e5068b5f135be398c9e77bd3b65
|
refs/heads/master
| 2021-04-04T23:10:51.812090
| 2020-02-19T03:57:13
| 2020-02-19T04:00:54
| 248,500,182
| 0
| 0
|
NOASSERTION
| 2020-03-19T12:42:00
| 2020-03-19T12:41:59
| null |
UTF-8
|
Python
| false
| false
| 3,525
|
py
|
import os
import sys
import configparser
import gettext
import logging
import json
from collections import namedtuple
from tornado.web import Application
from tornado import locale
from . import database
from . import dispatch
from . import pages
from . import card_page
from . import pageutils
from . import tlinject
from . import news
from . import card_tracking
from . import event_tracker
from . import dict_aggregator
import libcard2
def readonly_app_path(*p):
return os.path.join(os.path.dirname(__file__), *p)
def create_runtime_info():
vi_class = namedtuple("runtime_info_t", ("app_revision", "host_id", "python_version"))
return vi_class(os.environ.get("AS_GIT_REVISION"), os.environ.get("AS_HOST_ID"), sys.version)
class DictionaryAccessProtocolImp(gettext.GNUTranslations):
class Fallback(object):
@classmethod
def gettext(cls, k):
return None
def __init__(self, fp):
super().__init__(fp)
self.add_fallback(self.Fallback)
def lookup_single_string(self, key):
return self.gettext(key)
def static_strings():
sd = {}
catalog = readonly_app_path("gettext")
for langcode in os.listdir(catalog):
sd[langcode] = gettext.translation(
"static", catalog, [langcode], DictionaryAccessProtocolImp
)
return sd
def find_astool_master_version(in_base):
with open(os.path.join(in_base, "astool_store.json"), "r") as jsf:
return json.load(jsf)["master_version"]
def create_dict_aggregator(master, language):
choices = {}
extra = os.environ.get("AS_EXTRA_DICTIONARIES")
if extra:
for tag in extra.split(";"):
rgn_tag, lang_code, name = tag.split(":")
region_root = os.path.join(os.environ.get("AS_DATA_ROOT", "."), rgn_tag)
base = os.path.join(region_root, "masters", find_astool_master_version(region_root))
logging.debug("Loading dictionary: %s", base)
choices[lang_code] = dict_aggregator.Alternative(
name, lang_code, libcard2.string_mgr.DictionaryAccess(base, lang_code)
)
fallback = libcard2.string_mgr.DictionaryAccess(master, language)
return dict_aggregator.DictionaryAggregator(fallback, choices)
def application(master, language, debug):
if os.environ.get("AS_TLINJECT_SECRET", ""):
print("TLInject is enabled for this server.")
locale.set_default_locale("en")
locale.load_gettext_translations(readonly_app_path("gettext"), "tornado")
strings = static_strings()
db_coordinator = database.DatabaseCoordinator()
application = Application(
dispatch.ROUTES,
db_coordinator=db_coordinator,
master=libcard2.master.MasterData(master),
string_access=create_dict_aggregator(master, language),
image_server=os.environ.get("AS_IMAGE_SERVER"),
tlinject_context=tlinject.TLInjectContext(db_coordinator),
news_context=news.NewsDatabase(db_coordinator),
card_tracking=card_tracking.CardTrackingDatabase(db_coordinator),
event_tracking=event_tracker.EventTrackingDatabase(db_coordinator),
template_path=readonly_app_path("webui"),
runtime_info=create_runtime_info(),
tlinject_secret=os.environ.get("AS_TLINJECT_SECRET", "").encode("utf8"),
ui_methods=pageutils.UI_METHODS,
static_path=readonly_app_path("static"),
static_strings=strings,
debug=debug,
autoreload=debug,
)
return application
|
[
"summertriangle.dev@gmail.com"
] |
summertriangle.dev@gmail.com
|
4593166a53e65ea1bf937735d8ae8662a9e1274d
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/YjwJ6BfujKtmuTMqW_5.py
|
a15bfdf6d941cfbed39444ebf3d87ec932de004d
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 682
|
py
|
def dice_game(scores):
# this is a hot mess but stfu I'm learning
players = ["1", "2", "3", "4"]
n = len(players)
while n > 1:
this_round = scores[:n]
scores = scores[n:]
totals = [sum(dice) for dice in this_round]
lowest = min(totals)
if totals.count(lowest) == 1:
i = totals.index(lowest)
players.pop(i)
n -= 1
else:
indices = [i for i in range(n) if totals[i] == lowest]
firsts = [this_round[i][0] for i in range(n) if totals[i] == lowest]
lowest = min(firsts)
if firsts.count(lowest) == 1:
i = firsts.index(lowest)
players.pop(indices[i])
n -= 1
return "p{}".format(players[0])
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
da7e83734ca405319febd520ce310db5e2bc9a97
|
0d8486c1d55c40bebea7c5428930f18165d2d0e9
|
/tests/wasp1/AllAnswerSets/builtins_10a.test.py
|
a45344807ef9afdc35b06110f1c7cd3cacfebb7e
|
[
"Apache-2.0"
] |
permissive
|
bernardocuteri/wasp
|
6f81bf6aa8fb273c91bbf68ecce4ecb195a55953
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
refs/heads/master
| 2021-06-08T11:58:25.080818
| 2020-10-05T16:57:37
| 2020-10-05T16:57:37
| 124,245,808
| 0
| 0
|
Apache-2.0
| 2018-03-07T14:13:16
| 2018-03-07T14:13:16
| null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
input = """
a("10.28399"). a("3.7").
b(X,Y) :- a(X), a(Y), X > Y.
"""
output = """
{a("10.28399"), a("3.7"), b("3.7","10.28399")}
"""
|
[
"mario@alviano.net"
] |
mario@alviano.net
|
91b0e7fd648b2b62f82c22a4e4128eb97fdb13e8
|
02255565aff9ea18a4d566955cc53ca06090efa4
|
/Python 2000/objectcontent.py
|
3f77b46a6c5185b2548503401f06c6c763a92224
|
[] |
no_license
|
BrainiacRawkib/Practical-Python-for-Begineers
|
20a8a3697812bed78646c6af54a6dc195694109a
|
cb29ea1a38339fcf2fac005feb92b5a72ae98387
|
refs/heads/master
| 2020-12-01T09:10:06.802758
| 2019-12-28T15:27:40
| 2019-12-28T15:27:40
| 230,598,655
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
class Foo():
zname = 'init.name'
def __enter__(self): # with block
self.zname = 'with.enter.name'
def __exit__(self, xtype, xval, trace):
self.zname = 'with.exit.name'
def name(self):
return self.zname
# Just an object
bla = Foo()
# Consistent block entry / exit values
print('pre:\t', bla.name()) # True
if True:
print('block:\t', bla.name()) # normal
# Activate __enter__ via 'with'
try:
with bla:
print('with:\t', bla.name()) # enter block
print('xblock:\t', bla.name()) # exit block
finally:
print('finally:', bla.name()) # exit block
print('post:\t', bla.name(), 'still!')
|
[
"brainiacrawkib@gmail.com"
] |
brainiacrawkib@gmail.com
|
a62f98d1e5a82883078bc383513b920a7276a548
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Games/Dementia RL/tdlib/noise.py
|
85597a3e5f643493f71b15226ad3d4f3373b01b3
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:d7ee89dcbac0978f3e0be51be18c9d4df9e003f4ce5a0798db764e5d037d37d4
size 6811
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
34b53ab2947ece36c60a2fa69b2e7dfd676e8e8e
|
1843fd5ccb4377240e664acd21ba5a9369eca2ab
|
/bluebottle/bb_payouts/utils.py
|
33a6ad75ba84ef9eae620ac7c8c84883262d376f
|
[
"BSD-2-Clause"
] |
permissive
|
raux/bluebottle
|
ba2e576cebcb6835065004c410b22bd8a6b9ee29
|
49d92b5deb289c1539f99122abc20f845577b879
|
refs/heads/master
| 2020-03-27T03:20:11.465491
| 2018-08-23T13:09:25
| 2018-08-23T13:09:25
| 145,854,614
| 0
| 0
| null | 2018-08-23T13:05:00
| 2018-08-23T13:04:59
| null |
UTF-8
|
Python
| false
| false
| 1,887
|
py
|
import decimal
import datetime
from django.conf import settings
from django.utils import timezone
VAT_RATE = decimal.Decimal(settings.VAT_RATE)
def money_from_cents(amount):
"""
Convert monetary amount from cents into a Decimal working
with the MoneyField.
>>> money_from_cents(1000)
Decimal('10')
"""
# Make sure integer work too
amount = float(amount)
return decimal.Decimal(str(amount / 100))
def round_money(amount):
"""
Round monetary values specified as Decimal (2 decimals), used for
displaying results of calculations.
"""
assert isinstance(amount, decimal.Decimal)
return amount.quantize(decimal.Decimal('.01'),
rounding=decimal.ROUND_HALF_DOWN)
def calculate_vat(amount):
"""
Calculate VAT over exclusive amount according to default percentage.
>>> calculate_vat(decimal.Decimal('10'))
Decimal('2.10')
"""
return round_money(amount * VAT_RATE)
def calculate_vat_inclusive(amount):
"""
Calculate the inclusive amount for amounts excluding VAT.
>>> calculate_vat_inclusive(decimal.Decimal('10'))
Decimal('12.10')
"""
factor = VAT_RATE + decimal.Decimal('1.00')
return round_money(amount * factor)
def calculate_vat_exclusive(amount):
"""
Calculate the exclusive amont for amounts including VAT.
>>> calculate_vat_exclusive(decimal.Decimal('12.10'))
Decimal('10.00')
"""
factor = VAT_RATE + decimal.Decimal('1.00')
return round_money(amount / factor)
def date_timezone_aware(date):
"""
Create timezone aware datetime equivalent of date, corresponding
with midnight.
"""
midnight = datetime.time(0, 0)
default_zone = timezone.get_default_timezone()
dt = datetime.datetime.combine(date, midnight)
dt = timezone.make_aware(dt, default_zone)
return dt
|
[
"loek@1procentclub.nl"
] |
loek@1procentclub.nl
|
21c9807b5c8005f1aa58d06aab6da7c3cd3bacaa
|
237cc38de0cf7a6e3661ed552ae771bd972d7438
|
/base/virtualenv.py
|
79ee98001072d09409b57a7f64b0ecf51161dd99
|
[] |
no_license
|
chydream/python
|
af5ad8a98c78de71e255f7b776f936c4b89c616e
|
e5bfef53a7770d4f323bd2877f93c8166c563695
|
refs/heads/master
| 2020-05-07T17:00:33.558178
| 2020-05-05T13:45:19
| 2020-05-05T13:45:19
| 180,708,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
# pip install virtualenv
# virtualenv django1.11
# cd Scripts
# 进入/退出虚拟环境:activate/deactivate
# pip install django==1.11
# pip install flask
# pip install virtualenvwrapper-win workon
# pip install pipenv
# mkdir py26
# cd py26
# pipenv --python 2.7
# pipenv shell
# exit()
# pipenv install requests
# pipenv --help/graph
|
[
"yong.chen@doone.com.cn"
] |
yong.chen@doone.com.cn
|
3b6d99b3f015286573099d0345cb67ee74b38848
|
f2bec1dbb86b218fc1b7c9106ff13c15dea8c301
|
/Connect Four/main.py
|
c98d476db4933cbef0032c43ab937f4b5db5e699
|
[] |
no_license
|
PuffyShoggoth/hatch
|
59e0f3684f041846084316f5bfafda1601cf5d2e
|
e1b32787cb0571469cd06a469b24890e23b78a58
|
refs/heads/master
| 2021-01-01T16:52:57.758161
| 2017-07-28T18:47:47
| 2017-07-28T18:47:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,800
|
py
|
from tkinter import *
from board import Board
from ai import Ai
from functools import partial
f = Frame()
f.pack()
a = Ai(4)
buttons = [[] for i in range(7)]
locked = False
emptyhash = 0
for i in range(7):
for j in range(6):
emptyhash^=a.hashvals[i][j][1]
b = Board([[] for i in range(7)], emptyhash)
def disp(message):
global locked, r
locked = True
r = Tk()
r.protocol("WM_DELETE_WINDOW", newgame)
Label(r, text=message).pack()
Button(r, text="Okay", command=newgame).pack()
def addpiece(column):
global buttons, b, a, locked
if len(b.board[column])==6 or locked:
#print(b.board)
return
buttons[column][len(b.board[column])].config(bg = 'red')
b.hash^=a.hashvals[column][len(b.board[column])][1]^a.hashvals[column][len(b.board[column])][2]
b.board[column].append(1)
if b.eval()== 9999998:
disp("Player wins")
return
a.bestmove = -1
a.negamax(b, 4, -9999999, 9999999, -1)
buttons[a.bestmove][len(b.board[a.bestmove])].config(bg = 'yellow')
b.hash^=a.hashvals[a.bestmove][len(b.board[a.bestmove])][0]^a.hashvals[a.bestmove][len(b.board[a.bestmove])][1]
b.board[a.bestmove].append(-1)
if b.eval()== -9999998:
disp("Computer wins")
return
elif sum(len(i) for i in b.board)==42:
disp("It's a tie")
return
def newgame():
global b, locked, r
locked = False
r.destroy()
for i in buttons:
for j in i:
j.config(bg="white")
b = Board([[] for i in range(7)], emptyhash)
#print(b.board)
for i in range(7):
for j in range(6):
butt = Button(f, width = 10, height = 5, bg="white", command = partial(addpiece, i))
butt.grid(row = 5-j, column = i)
buttons[i].append(butt)
mainloop()
|
[
"shaar@utschools.ca"
] |
shaar@utschools.ca
|
e08c8a6e0bd133a06c0f98760780129fa4d7c06f
|
c2ff2ee2b0c84e047a80cfdf0b0d0b122fc9db79
|
/features/himario/mmediting/mmedit/models/backbones/encoder_decoders/gl_encoder_decoder.py
|
0039f7eab264a5c2b7f8a6920725c9ef47ef7d1f
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
obarnard99/vilio
|
275dcb62cdb8b2d8c55ab1e73f3a796bd2073a5b
|
77aac226c3a0910410f11a5999f8908181f57ccd
|
refs/heads/master
| 2023-06-29T17:02:02.282457
| 2021-06-22T09:50:11
| 2021-06-22T09:50:11
| 337,738,373
| 0
| 0
|
MIT
| 2021-06-22T09:50:12
| 2021-02-10T13:50:49
|
Python
|
UTF-8
|
Python
| false
| false
| 2,220
|
py
|
import torch.nn as nn
from mmcv.runner import auto_fp16, load_checkpoint
from mmedit.models.builder import build_component
from mmedit.models.registry import BACKBONES
from mmedit.utils import get_root_logger
@BACKBONES.register_module()
class GLEncoderDecoder(nn.Module):
"""Encoder-Decoder used in Global&Local model.
This implementation follows:
Globally and locally Consistent Image Completion
The architecture of the encoder-decoder is:\
(conv2d x 6) --> (dilated conv2d x 4) --> (conv2d or deconv2d x 7)
Args:
encoder (dict): Config dict to encoder.
decoder (dict): Config dict to build decoder.
dilation_neck (dict): Config dict to build dilation neck.
"""
def __init__(self,
encoder=dict(type='GLEncoder'),
decoder=dict(type='GLDecoder'),
dilation_neck=dict(type='GLDilationNeck')):
super(GLEncoderDecoder, self).__init__()
self.encoder = build_component(encoder)
self.decoder = build_component(decoder)
self.dilation_neck = build_component(dilation_neck)
# support fp16
self.fp16_enabled = False
@auto_fp16()
def forward(self, x):
"""Forward Function.
Args:
x (torch.Tensor): Input tensor with shape of (n, c, h, w).
Returns:
torch.Tensor: Output tensor with shape of (n, c, h', w').
"""
x = self.encoder(x)
if isinstance(x, dict):
x = x['out']
x = self.dilation_neck(x)
x = self.decoder(x)
return x
def init_weights(self, pretrained=None):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
# Here, we just use the default initialization in `ConvModule`.
pass
else:
raise TypeError('pretrained must be a str or None')
|
[
"obarnard99@gmail.com"
] |
obarnard99@gmail.com
|
97b0c1f5cf34e192fe99aaa9a7dd3a98c3c4aef8
|
b9360389f3345c550250a1bd5a5fe58bd7410a71
|
/wlct/migrations/0011_auto_20190424_1339.py
|
a2afd906d1ce15b09ffe3076dc44e15ad81ef9cc
|
[] |
no_license
|
JustinR17/wzclot
|
d6f4bd8b46240abbeaa51837c1de1f3115554f17
|
4fa29666eb72aafee28bf57898fecc679fb3d4bb
|
refs/heads/master
| 2023-02-07T22:43:13.827383
| 2021-12-30T22:34:39
| 2021-12-30T22:34:39
| 241,187,417
| 0
| 0
| null | 2020-02-17T19:14:01
| 2020-02-17T19:14:00
| null |
UTF-8
|
Python
| false
| false
| 1,980
|
py
|
# Generated by Django 2.1.4 on 2019-04-24 20:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wlct', '0010_auto_20190423_1427'),
]
operations = [
migrations.CreateModel(
name='RoundRobinTournament',
fields=[
('tournament_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wlct.Tournament')),
('type', models.CharField(default='Round Robin', max_length=255)),
('games_at_once', models.IntegerField(default=2)),
('first_place', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='first_place', to='wlct.TournamentTeam')),
('second_place', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='second_place', to='wlct.TournamentTeam')),
],
bases=('wlct.tournament',),
),
migrations.AlterField(
model_name='groupstagetournamentgroup',
name='first_place',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='first_place_group', to='wlct.TournamentTeam'),
),
migrations.AlterField(
model_name='groupstagetournamentgroup',
name='second_place',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='second_place_group', to='wlct.TournamentTeam'),
),
migrations.AddField(
model_name='groupstagetournamentgroup',
name='round_robin_tournament',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='wlct.RoundRobinTournament'),
),
]
|
[
"brendanflynn@outlook.com"
] |
brendanflynn@outlook.com
|
273f87892d2804ebef8f3e0b4dbd80381774b995
|
470e0d4101bf9ca6d22a515e84e591ce37019ca2
|
/back/view.py
|
b450d5005a7317a5a67084043e8f52db41e59b6d
|
[] |
no_license
|
chenzh111/myblog
|
9eedad7bf4601b8235f290f5592a4d19189954e6
|
545c47cb3dd0a0441029c0281c69ab561b369580
|
refs/heads/master
| 2020-05-17T02:52:17.908901
| 2019-04-25T16:00:03
| 2019-04-25T16:00:03
| 183,464,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,943
|
py
|
from datetime import datetime
from flask import render_template, request, redirect, session
from werkzeug.security import generate_password_hash, check_password_hash
from flask import Blueprint
from back.model import Article, db, User, Articletype
from utils.functions import login_required
blue = Blueprint('app',__name__)
# 跳转到注册页面
@blue.route('/',methods=['GET'])
def aa():
return redirect('/register/')
# 注册
@blue.route('/register/',methods=["GET","POST"])
def register():
if request.method =="GET":
return render_template('back/register.html')
if request.method == "POST":
username = request.form.get("username")
password = request.form.get("password")
password2 = request.form.get("password2")
if username and password and password2:
user = User.query.filter(User.username == username).first()
if user:
error = "该账号已经被注册了"
return render_template('back/register.html',error = error)
else:
if password2 == password:
user = User()
user.username = username
user.password = generate_password_hash(password)
user.save()
return redirect('/login/')
else:
error = '您两次输入的密码不一样,注册失败'
return render_template('back/register.html',error = error)
else:
error = '请填写完整的信息进行注册'
return render_template('back/register.html',error = error)
# 登录
@blue.route('/login/',methods=["GET","POST"])
def login():
if request.method =="GET":
return render_template('back/login.html')
if request.method == "POST":
username = request.form.get("username")
password = request.form.get("password")
if username and password:
user = User.query.filter(User.username == username).first()
if not user:
error = '账号不存在,请注册后登陆'
return render_template('back/login.html',error=error)
if not check_password_hash(user.password,password):
error = '密码错误,请重新输入'
return render_template('back/login.html',error=error)
session['user_id'] = user.id
return redirect('/index/')
else:
error = '请输入完整信息'
return render_template('back/login.html',error=error)
# 后台主页
@blue.route('/index/',methods=['GET'])
@login_required
def index():
sum = Article.query.count()
return render_template('back/index.html', sum=sum)
@blue.route('/add-article/',methods=['GET'])
def add_article():
name = Articletype.query.order_by(Articletype.id).all()
time = datetime.now()
return render_template('back/add-article.html',time=time,name=name)
@blue.route('/add-category/',methods=['GET'])
def add_category():
return render_template('back/add-category.html')
@blue.route('/article/',methods=['GET'])
def article():
title = Article.query.all()
sum = Article.query.count()
return render_template('back/article.html',title = title, Articletype=Articletype, sum=sum )
@blue.route('/category/',methods=['GET'])
def category():
name = Articletype.query.order_by(Articletype.id).all()
sum = Articletype.query.count()
return render_template('back/category.html',name=name, sum=sum)
@blue.route('/update-article/',methods=['GET'])
def update_article():
name = request.args.to_dict().keys()
for name3 in name:
name2 = Article.query.filter_by(title=name3).first()
content = name2.content
desc = name2.desc
type = name2.type
id = name2.id
name1 = Articletype.query.order_by(Articletype.id).all()
Id = Articletype.query.filter(Articletype.id ==type).first().t_name
return render_template('back/update-article.html',name=name,name1=name1,content=content,desc=desc,type=type,id=id,Article=Article,Id=Id)
@blue.route('/update-category/',methods=['GET'])
def update_category():
name = request.args.to_dict().keys()
return render_template('back/update-category.html',name=name)
@blue.route('/Category/update/',methods=['GET', 'POST'])
def category_update():
name = request.args.to_dict().keys()
for x in name:
name1 = x
name = request.form.get('name')
name2 = Articletype.query.filter_by(t_name = name1).first()
name2.t_name = name
db.session.commit()
return redirect('/category/')
@blue.route('/Article/update/',methods=['GET', 'POST'])
def article_update():
titles = request.args.to_dict().keys()
for x in titles:
name1 = x
title = request.form.get('title')
content = request.form.get('content')
desc = request.form.get('describe')
type = request.form.get('category')
name2 = Article.query.filter_by(title = name1).first()
name2.title = title
name2.content = content
name2.desc = desc
name2.type = type
db.session.commit()
return redirect('/article/')
@blue.route('/delete-category/',methods=['GET','POST'])
def delete_category():
name = request.args.to_dict().keys()
for x in name:
name1 = x
name2 = Articletype.query.filter_by(t_name=name1).first()
db.session.delete(name2)
db.session.commit()
return redirect('/category/')
@blue.route('/delete-article/',methods=['GET','POST'])
def delete_article():
name = request.args.to_dict().keys()
for x in name:
# name1 = x
name2 = Article.query.filter_by(title=x).first()
db.session.delete(name2)
db.session.commit()
return redirect('/article/')
# 删除
@blue.route('/article/checkall/',methods=['GET', 'POST'])
def article_chenkall():
title = request.form.getlist('checkbox[]')
if title is not None:
for title in title:
name2 = Article.query.filter_by(title=title).first()
db.session.delete(name2)
db.session.commit()
else:
pass
return redirect('/article/')
# 创建数据库
@blue.route('/create/')
def create():
db.create_all()
return "xinjian"
@blue.route('/article/add/',methods=['GET','POST'])
def article_add():
category = request.form.get("category")
art = Article()
art.type = category
art.title = request.form.get('title')
art.content = request.form.get('content')
art.desc = request.form.get("describe")
if art.title and art.content and art.desc:
art.save()
else:
return render_template('back/add-article.html')
return redirect('/article/')
@blue.route('/category/add/',methods=['GET','POST'])
def category_add():
type = Articletype()
type.t_name = request.form.get('name')
type.save()
return redirect('/category/')
|
[
"you@example.com"
] |
you@example.com
|
5890e87f6173b5cb9740d08aa216544fe73865bb
|
dffe32dc7f1819217168c42234cc148b142ebe10
|
/scripts/download.py
|
b489821a7590b1f3f7db8fa3e35364f47776e3f5
|
[] |
no_license
|
willgdjones/HistoVAE
|
d4070b991877fb0be83b42b0c110ece6c47563e1
|
5956447b703b5d06115e54843df8c9528a7c1943
|
refs/heads/master
| 2022-12-12T04:28:06.829139
| 2018-10-18T09:52:25
| 2018-10-18T09:52:25
| 114,638,657
| 10
| 2
| null | 2022-12-08T02:22:28
| 2017-12-18T12:30:02
|
Python
|
UTF-8
|
Python
| false
| false
| 903
|
py
|
import sys
import requests.packages.urllib3
import click
import os
import logging
requests.packages.urllib3.disable_warnings()
sys.path.append('.')
from src.classes import Dataset
logger = logging.getLogger(__name__)
@click.command()
@click.option(
'--n_images', default=10,
help="Number of images per tissue"
)
@click.option(
'--n_tissues', default=6,
help="Number of tissues with most numbers of samples"
)
def main(n_images, n_tissues):
os.makedirs('data/images', exist_ok=True)
logger.info('Initializing download script')
dataset = Dataset(n_images=n_images, n_tissues=n_tissues)
dataset.download()
if __name__ == '__main__':
logging.basicConfig(
filename='logs/download.log',
level=logging.DEBUG,
format=(
"%(asctime)s | %(name)s | %(processName)s | "
"%(levelname)s: %(message)s"
)
)
main()
|
[
"williamgdjones@gmail.com"
] |
williamgdjones@gmail.com
|
0b096b3ed4acff128d708bd97fa00312a842e694
|
9a5438bdb8e84d0167ddea5458a7f729fdd54121
|
/MetaDataApi/tests/test_utils/test_buildDjangoSearchArgs.py
|
c210a9521f6cdff2456689f3ca027663c69f3df6
|
[] |
no_license
|
Grusinator/MetaDataApi
|
740fd2be4cb97b670f827a071a0ac8c50f79f8ff
|
081f881c735466ed1dbbd68646b821299c5168f8
|
refs/heads/master
| 2023-07-25T23:58:22.179717
| 2020-03-15T09:36:05
| 2020-03-15T09:36:05
| 149,087,967
| 5
| 1
| null | 2023-07-25T15:39:12
| 2018-09-17T07:45:09
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,302
|
py
|
import django
from django.test import TransactionTestCase
from MetaDataApi.utils.django_utils import BuildDjangoSearchArgs
class test_buildDjangoSearchArgs(TransactionTestCase):
# Django requires an explicit setup() when running tests in PTVS
@classmethod
def setUpClass(cls):
super(test_buildDjangoSearchArgs, cls).setUpClass()
django.setup()
def test_build_search_args_from_json(self):
# data = UtilsForTesting.loadStravaActivities()
data = {
"object1": {
"Attribute1": 3,
"Attribute2": {"value": "att2value"},
"object2": {
"attribute3": True,
"attribute4": 5.04
}
}
}
builder = BuildDjangoSearchArgs()
args = builder.build_from_json(data)
expected = {
'from_edge__from_object__label': 'object1',
'from_edge__from_object__from_edge__from_object__label__in':
['Attribute1',
'Attribute2',
'object2'],
'from_edge__from_object__from_edge__from_object__from_edge__from_object__label__in':
['attribute3',
'attribute4']
}
self.assertEqual(args, expected)
|
[
"grusinator@gmail.com"
] |
grusinator@gmail.com
|
8eb431bf2b7b5607bbc88c85c0e16d7c725be8ef
|
5ff8cefa68d52d2427bb3d35320cd8bd0d072968
|
/Tuples/Ten_most_repititive_words_from_file.py
|
b6669b1c49adcea563f57b5a8309203017b378e9
|
[] |
no_license
|
gsudarshan1990/PythonSampleProjects
|
a65a111454f8dc551f1cd29901cead0798ad6dc3
|
3c1a5174c5f966b0eed2828221add76ec0d019d5
|
refs/heads/master
| 2020-05-09T16:02:37.743568
| 2019-07-14T06:22:55
| 2019-07-14T06:22:55
| 181,255,262
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 721
|
py
|
"""
Find the 10 most repititive words
"""
import string
filename=input('Enter the file name')
try:
filehandle=open(filename)
except IOError as argument:
print('Could not open the file')
print(argument)
exit()
word_dictionary=dict()
for line in filehandle:
line=line.translate(str.maketrans('','',string.punctuation))
line=line.rstrip()
words=line.split()
for word in words:
word_dictionary[word]=word_dictionary.get(word,0)+1
print(word_dictionary)
list_value_key=list()
for key,value in word_dictionary.items():
list_value_key.append((value,key))
list_value_key_sorted=sorted(list_value_key,reverse=True)
for key,value in list_value_key_sorted[:10]:
print(key,value)
|
[
"sudharshan.govindarajan@ey.com"
] |
sudharshan.govindarajan@ey.com
|
e349259897bdd4b47f9069b2a79a212c9fb95105
|
0cf7dd2c3c0b28b52f1273e8fe2ea0a87cacc6af
|
/Other_Algorithms/Page-Rank_algorithm.py
|
257f973d0829fcb8ce938cae730675e41299a2ce
|
[] |
no_license
|
EngrDevDom/Everyday-Coding-in-Python
|
61b0e4fcbc6c7f399587deab2fa55763c9d519b5
|
93329ad485a25e7c6afa81d7229147044344736c
|
refs/heads/master
| 2023-02-25T05:04:50.051111
| 2021-01-30T02:43:40
| 2021-01-30T02:43:40
| 274,971,215
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,382
|
py
|
# Page Rank Algorithm
"""
The Page Rank algorithm is applicable in web pages. Web page is a directed
graph, we know that the two components of Directed graphs are -nodes and
connections. The pages are nodes and hyperlinks are the connections, the
connection between two nodes.
We can find out the importance of each page by the Page Rank and it is accurate.
The value of Page Rank is the probability will be between 0 and 1.
The Page Rank value of individual node in a graph depends on the Page Rank value
of all nodes which connect to it and those nodes are cyclically connected to
the nodes whose ranking we want, we use converging iterative method for assigning
values to Page Rank.
"""
import numpy as np
import scipy as sc
import pandas as pd
from fractions import Fraction
def display_format(my_vector, my_decimal):
return np.round((my_vector).astype(np.float), decimals=my_decimal)
my_dp = Fraction(1,3)
Mat = np.matrix([[0,0,1], [Fraction(1,2),0,0], [Fraction(1,2),1,0]])
Ex = np.zeros((3,3))
Ex[:] = my_dp
beta = 0.7
Al = beta * Mat + ((1-beta) * Ex)
r = np.matrix([my_dp, my_dp, my_dp])
r = np.transpose(r)
previous_r = r
for i in range(1,100):
r = Al * r
print(display_format(r,3))
if (previous_r==r).all():
break
previous_r = r
print("Final: \n", display_format(r,3))
print("Sum: ", np.sum(r))
|
[
"60880034+EngrDevDom@users.noreply.github.com"
] |
60880034+EngrDevDom@users.noreply.github.com
|
036c90c27ad291c077b467be837247756e9b287d
|
1f08436bab6cd03bcfb257e8e49405cbc265195a
|
/1_Basic_I/Basic/40.py
|
229ea4394a7ac7e7ccb627f27a4c9b5fd1e54eec
|
[] |
no_license
|
kuchunbk/PythonBasic
|
e3ba6322f256d577e37deff09c814c3a374b93b2
|
a87135d7a98be8830d30acd750d84bcbf777280b
|
refs/heads/master
| 2020-03-10T04:28:42.947308
| 2018-04-17T04:25:51
| 2018-04-17T04:25:51
| 129,192,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
import math
def calculate_distance(input_xa, input_ya, input_xb, input_yb):
distance = math.sqrt((input_xa - input_xb) ** 2
+ (input_ya - input_yb) ** 2)
return distance
if __name__ == "__main__":
input_xa = float(input('xa'))
input_ya = float(input('ya'))
input_xb = float(input('xb'))
input_yb = float(input('yb'))
print(calculate_distance(input_xa, input_ya, input_xb, input_yb))
|
[
"kuchunbk@gmail.com"
] |
kuchunbk@gmail.com
|
e4f524c34ba0082859ddb9941baece1aca2e4699
|
72c4cea551df0bee51c5740926b2fdb1feaf3470
|
/tools/extjs_cc/js_util_types.py
|
73c302d3e7b293be2584a95ad8a69eb0447b1b2d
|
[
"MIT"
] |
permissive
|
joeedh/fairmotion
|
2955fda64b36a634c42c3382b20f18dae189d13f
|
fc0bca395057cd3e78f91bcb1796b6c5eda73d2b
|
refs/heads/master
| 2023-05-26T20:03:51.583970
| 2023-05-14T16:07:53
| 2023-05-14T16:07:53
| 34,099,859
| 1
| 0
|
MIT
| 2023-03-05T21:45:57
| 2015-04-17T06:13:09
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,200
|
py
|
class SortedDict (dict):
def __iter__(self):
return self.keys()
def keys(self):
keys = []
for k in dict.keys(self):
keys.append(k)
keys.sort()
return keys
class odict:
def __init__(self):
self.items = []
self.dict = {}
self.keypos = {}
def __setitem__(self, key, value):
if key not in self.dict:
self.items.append(key)
self.keypos[key] = len(self.items)-1
self.dict[key] = value
def __getitem__(self, key):
return self.dict[key]
def __delitem__(self, key):
i = self.keypos[key]
self.items.pop(i)
del self.keypos[key]
del self.dict[key]
def __contains__(self, key):
return key in self.dict
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.items)
def keys(self):
return list(self.items)
def values(self):
vs = []
for k in self.items:
vs.append(self.dict[k])
return vs
def __str__(self):
s = "odict{"
for i, k in enumerate(self.items):
if i > 0: s += ", "
s += "%s: %s" % (str(k), str(self[k]))
s += "}"
return s
def __repr__(self):
return str(self)
|
[
"joeedh@gmail.com"
] |
joeedh@gmail.com
|
96e30180d97e49f108b03f284c1cd9180a406a6f
|
4ca8df3a127e9b15cbfecea6505928741f685a63
|
/case_crawler/apps/anjuke/anjuke_detail.py
|
2056d2c0dd518496541867711808fcc9a781663f
|
[] |
no_license
|
gongfei6644/gongfei
|
2beb082c56197bc23ca20a6927ff6c10d8beaa83
|
bfdd5e6a3a8d76ad1e43cf54df186b944cad29e4
|
refs/heads/master
| 2022-11-30T20:49:22.213040
| 2020-08-16T12:52:28
| 2020-08-16T12:52:28
| 286,283,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,761
|
py
|
from lxml import etree
import datetime
import time
import requests
import json
import re
import bs4
class KeParse:
def parse_xpath(self, html_str):
# print('======================html_str===========================')
dic = {}
if html_str['str'] == '404':
now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
dic['d_status'] = 0
dic['detail_time'] = now_time
return dic
if html_str['str'] and html_str['str'] != '404':
html = etree.HTML(html_str['str'])
# 楼盘名
dic["project_name"] = html.xpath('string(//div[contains(text(), "所属小区")]/following-sibling::*[1]/a/text())')
# 朝向
try:
dic['orientation'] = html.xpath("string(//node()[contains(text(),'房屋朝向:')]/following-sibling::div[1]/text())").strip()
except Exception as e:
dic['orientation'] = ''
# 装修
try:
dic['decoration'] = html.xpath("string(//node()[contains(text(),'装修程度:')]/following-sibling::div[1]/text())").strip()
except Exception as e:
# print(e)
dic['decoration'] = ''
# 解析有无电梯 //node()[text()='有无电梯']/following-sibling::*[1]/text()
try:
dic['is_elevator'] = html.xpath("//node()[text()='配套电梯:']/following-sibling::div[1]/text()")[0].strip()
except Exception as e:
dic['is_elevator'] = ''
# 解析产权性质
try:
dic['property_nature'] = html.xpath("//node()[text()='产权性质:']/following-sibling::div[1]/text()")[0].strip()
except Exception as e:
dic['property_nature'] = ''
# 住宅类别
try:
dic['usage'] = html.xpath("//node()[text()='房屋类型:']/following-sibling::div[1]/text()")[0].strip()
except Exception as e:
dic['usage'] = ''
# 建筑结构
try:
dic['building_structure'] = html.xpath("//node()[text()='建筑结构:']/following-sibling::div[1]/text()")[0].strip()
except Exception as e:
dic['building_structure'] = ''
# 建筑类别
try:
dic['building_type'] = html.xpath("//node()[text()='建筑类别:']/following-sibling::div[1]/text()")[0].strip()
except Exception as e:
dic['building_type'] = ''
# 挂牌时间 案例时间
# case_happen_date
try:
dic['case_happen_date'] = html.xpath("//node()[contains(text(),'发布时间')]/text()")[0].strip().split(':')[1]
array = time.strptime(dic['case_happen_date'], "%Y年%m月%d日")
# print(dic['case_happen_date'])
dic['case_happen_date'] = time.strftime("%Y-%m-%d", array)
except Exception as e:
print('case_happen_date', e)
dic['case_happen_date'] = ''
# 建筑年代
try:
dic['build_date'] = html.xpath("//node()[text()='建造年代:']/following-sibling::div[1]/text()")[0].strip()
except Exception as e:
dic['build_date'] = ''
# 小区配套
try:
dic['supporting_facilities'] = html.xpath("string(//node()[text()='小区配套']/following-sibling::div[1]/text())").strip()
except Exception as e:
dic['supporting_facilities'] = ''
# 联系电话
try:
dic['tel'] = html.xpath("string(//span[@id='mobilecode'])")
except Exception as e:
dic['tel'] = ''
# dic['test'] = 'mogu'
if dic['case_happen_date'] or dic['orientation'] or dic['build_date']:
now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
dic['d_status'] = 1
# dic['test'] = 1
dic['detail_time'] = now_time
if dic['is_elevator'] == '有':
if dic['supporting_facilities']:
dic['supporting_facilities'] = dic['supporting_facilities'] + ',电梯'
else:
dic['supporting_facilities'] = dic['supporting_facilities'] + '电梯'
print(11111, dic)
return dic
else:
now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
dic['d_status'] = 'err'
dic['detail_time'] = now_time
return dic
|
[
"1"
] |
1
|
536a06cfcaa0279e625af71aeacac292641c89e2
|
3a02bff6397eb23afd55cc17faf81c24a8751f2d
|
/sample/searchthu.py
|
ffe31e067214a28f9ee142f297252b6978367848
|
[] |
no_license
|
cothuyanninh/Python_Code
|
909fd4d798cbd856e8993f9d4fea55b4b7c97a1f
|
7f657db61845cf8c06725a2da067df526e696b93
|
refs/heads/master
| 2022-11-06T01:00:39.939194
| 2019-01-13T15:27:38
| 2019-01-13T15:27:38
| 164,468,626
| 0
| 1
| null | 2022-10-13T16:16:21
| 2019-01-07T17:40:51
|
Python
|
UTF-8
|
Python
| false
| false
| 133
|
py
|
import re
var_search = re.compile(r'\d+\s\w+')
result = var_search.findall('12 bananas , 11 apple , 10 nhan , 9 buoi')
print(result)
|
[
"cothuyanninh@gmail.com"
] |
cothuyanninh@gmail.com
|
b6dc92ec77e8df5536fba11972c47407da157815
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_wolfs.py
|
194d3b5e802251036f20fdcd3d81d9b1e51c8aff
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
from xai.brain.wordbase.nouns._wolf import _WOLF
#calss header
class _WOLFS(_WOLF, ):
def __init__(self,):
_WOLF.__init__(self)
self.name = "WOLFS"
self.specie = 'nouns'
self.basic = "wolf"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
966d01d06a1e477526327db6dcde20f9bde29880
|
8fcc27160f8700be46296568260fa0017a0b3004
|
/client/trinutils/driverutils.py
|
60e804fa0f9f79cb0f8da31d7f522c627b05356c
|
[] |
no_license
|
connoryang/dec-eve-serenity
|
5d867f4eedfa896a4ef60f92556356cafd632c96
|
b670aec7c8b4514fc47cd52e186d7ccf3aabb69e
|
refs/heads/master
| 2021-01-22T06:33:16.303760
| 2016-03-16T15:15:32
| 2016-03-16T15:15:32
| 56,389,750
| 1
| 0
| null | 2016-04-16T15:05:24
| 2016-04-16T15:05:24
| null |
UTF-8
|
Python
| false
| false
| 1,056
|
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\trinutils\driverutils.py
import trinity
class CannotIdentifyDriverException(Exception):
def __init__(self, vendor, description = 'NA'):
msg = str("Unable to retrieve info from %s card. Please ensure that you're using the right drivers or graphics card. /nDriver Description: %s" % (vendor, description))
super(Exception, self).__init__(self, msg)
def GetDriverVersion():
adapter = trinity.adapters.GetAdapterInfo(trinity.adapters.DEFAULT_ADAPTER)
if 'nvidia' not in adapter.description.lower():
raise CannotIdentifyDriverException('Unknown', adapter.description)
try:
info = adapter.GetDriverInfo()
except trinity.ALError:
raise CannotIdentifyDriverException('NVidia', adapter.description)
def getDriverVersionNumber(driverInfo):
verInfo = driverInfo.driverVersionString.replace('.', '')
return int(verInfo[-5:])
return getDriverVersionNumber(info)
|
[
"masaho.shiro@gmail.com"
] |
masaho.shiro@gmail.com
|
68b975e902a1a409aca00a40b7899bf7cd971a86
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02414/s302351716.py
|
bb38a4daf864560c044955692f3b79691df53c03
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
n, m, l = map(int, input().split())
a = [[int(num) for num in input().split()] for i in range(n)]
b = [[int(num) for num in input().split()] for i in range(m)]
c = [[0 for i in range(l)] for j in range(n)]
for i in range(l):
for j in range(n):
for k in range(m):
c[j][i] += a[j][k] * b[k][i]
for i in range(n):
for j in range(l):
if j == l - 1:
print(c[i][j])
else:
print("{0} ".format(c[i][j]), end = "")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
0f480898fc7e05f7a44d92e63088b3271791e455
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_pulped.py
|
a81b741dbf00569f2d280aaa839f8c0f8a92acd4
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
from xai.brain.wordbase.nouns._pulp import _PULP
#calss header
class _PULPED(_PULP, ):
def __init__(self,):
_PULP.__init__(self)
self.name = "PULPED"
self.specie = 'nouns'
self.basic = "pulp"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
8c1cd566586a63f35f411d1809372f857c3002bf
|
9b07e3fc9436f876a426bf5b02d07733de10d775
|
/tests/port_tests/contour_tests/test_is_clockwise.py
|
40740d612c4f63110a63c061f91b4ad3f9c81745
|
[
"MIT"
] |
permissive
|
lycantropos/martinez
|
019e859ec513cc7ad38901e22dff8e897615776c
|
86db48324cb50ecb52be8ab2e4278a6d5cdd562b
|
refs/heads/master
| 2021-07-10T04:19:23.372706
| 2020-11-28T00:58:47
| 2020-11-28T00:58:47
| 224,819,004
| 7
| 1
|
MIT
| 2020-12-20T15:47:17
| 2019-11-29T09:16:26
|
Python
|
UTF-8
|
Python
| false
| false
| 927
|
py
|
from hypothesis import given
from tests.port_tests.hints import PortedContour
from tests.utils import implication
from . import strategies
@given(strategies.contours)
def test_basic(contour: PortedContour) -> None:
result = contour.is_clockwise
assert isinstance(result, bool)
@given(strategies.contours)
def test_empty(contour: PortedContour) -> None:
assert implication(not contour.points, not contour.is_clockwise)
@given(strategies.contours)
def test_reversed(contour: PortedContour) -> None:
reversed_contour = PortedContour(contour.points[::-1], contour.holes,
contour.is_external)
assert implication(bool(contour.points),
contour.is_clockwise is not reversed_contour)
@given(strategies.contours)
def test_alternatives(contour: PortedContour) -> None:
assert implication(contour.is_clockwise, not contour.is_counterclockwise)
|
[
"azatibrakov@gmail.com"
] |
azatibrakov@gmail.com
|
1db4444ba0313b6d08df9774f979296fed694d05
|
f3abfb8d187d24a138aa7dca1b209f2881c5dfe9
|
/src/normalize_punctuation.py
|
66a0b59c9bee6caffca0103460658e2221b1dd93
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
StevenLOL/mtnt
|
48fdfedf6d6eb7f08e770317a0cd4a6db40b6a41
|
1781119df75378c4e2db12315adfbbfa66031f4d
|
refs/heads/master
| 2020-03-28T03:23:58.176606
| 2018-08-31T16:02:01
| 2018-08-31T16:02:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,942
|
py
|
# -*- coding: utf-8 -*-
import re
def normalize_punctuation(s):
"""Adapted from https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/normalize-punctuation.perl"""
s = re.sub(r"\r", r"", s)
# remove extra spaces
s = re.sub(r"\(", r" \(", s)
s = re.sub(r"\)", r"\) ", s)
s = re.sub(r" +", r" ", s)
s = re.sub(r"\) ([\.\!\:\?\;\,])", r"\)$1", s)
s = re.sub(r"\( ", r"\(", s)
s = re.sub(r" \)", r"\)", s)
s = re.sub(r"(\d) \%", r"$1\%", s)
s = re.sub(r" :", r":", s)
s = re.sub(r" ;", r";", s)
s = re.sub(r"„", r'"', s)
s = re.sub(r"“", r'"', s)
s = re.sub(r"”", r'"', s)
s = re.sub(r"–", r"-", s)
s = re.sub(r"—", r" - ", s)
s = re.sub(r" +", r" ", s)
s = re.sub(r"´", r"'", s)
s = re.sub(r"([a-z])‘([a-z])", r"\1'\2", s)
s = re.sub(r"([a-z])’([a-z])", r"\1'\2", s)
s = re.sub(r"‘", r'"', s)
s = re.sub(r"‚", r'"', s)
s = re.sub(r"’", r'"', s)
s = re.sub(r"''", r'"', s)
s = re.sub(r"´´", r'"', s)
s = re.sub(r"…", r"...", s)
# French quotes
s = re.sub(r" « ", r' "', s)
s = re.sub(r"« ", r'"', s)
s = re.sub(r"«", r'"', s)
s = re.sub(r" » ", r'" ', s)
s = re.sub(r" »", r'"', s)
s = re.sub(r"»", r'"', s)
# handle pseudo-spaces
s = re.sub(r" \%", r"\%", s)
s = re.sub(r"nº ", r"nº ", s)
s = re.sub(r" :", r":", s)
s = re.sub(r" ºC", r" ºC", s)
s = re.sub(r" cm", r" cm", s)
s = re.sub(r" \?", r"\?", s)
s = re.sub(r" \!", r"\!", s)
s = re.sub(r" ;", r";", s)
s = re.sub(r", ", r", ", s)
s = re.sub(r" +", r" ", s)
# English "quotation," followed by comma, style
re.sub(r'"([,\.]+)', r'\1"', s)
re.sub(r"(\d) (\d)", r"$1.$2", s)
return s
if __name__ == '__main__':
print(normalize_punctuation("“what’s up?”, he said"))
|
[
"pmichel31415@gmail.com"
] |
pmichel31415@gmail.com
|
87001b3f155608be19566116d9d103185444880e
|
817712488d5b36c85596e42a0b7178b4a79f95f0
|
/knowledge_point/git/常用Git命令.py
|
d213af979aee72bd03c680e4e7575c1008a07fd6
|
[] |
no_license
|
TigerZhao007/GitHub
|
cfa1dcc5589988a18b0f7661e9f72b8088f95ec2
|
5e08f6a0e4033778f107cc89b422ab107082e4ab
|
refs/heads/master
| 2020-10-02T01:25:39.251198
| 2019-12-12T12:06:25
| 2019-12-12T12:06:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,007
|
py
|
# ######################################################################################################################
# GIT相关基础命令
# ######################################################################################################################
# Git pull 强制拉取并覆盖本地代码~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 两个电脑同时对git上的项目进行跟新时,不免要用到将git上的代码拉取到本地更新本地代码的操作,
# 鉴于自己对git使用的还不是很熟练,所以就直接采取暴力的方法,直接拉取并覆盖本地的所有代码,命令如下:
'''
git fetch --all
git reset --hard origin/master
git pull
'''
# Git clone 远程克隆代码~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 原文链接:https://blog.csdn.net/zyj8691/article/details/79424950
# 添加ssh秘钥
'''
ssh -keygen -t rsa -C "youremail@example.com"
clip<~/.ssh/id_rsa.pub
'''
# 复制秘钥注:目录为C:\Users\用户名<br>
# 添加秘钥到GitHub<br>
# 左边选择SSHandGPGkeys,然后点击NewSSHkey按钮,<br>
# title设置标题,可以随便填,粘贴在你电脑上生成的key。<br>
# 关联一个远程库
'''
git remote add origin git@server-name:path/repo-name.git;
git remote add origin git@github.com:shaoxiaozuo/GitHub.git
'''
# 注意:git@server-name:path/repo-name.git替换为自己的,在这里复制<br>
'''
`git push -u origin master` # 第一次推送master分支的所有内容;
`git push origin master` # 此后,使用命令推送最新修改;
'''
# ######################################################################################################################
# GIT常用命令用法说明
# ######################################################################################################################
'''
git config; git init; git clone; git add; git commit; git diff; git reset; git status; git rm; git log;
git show; git tag; git branch; git checkout; git merge; git remote; git push; git pull; git stash;
'''
# git config~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git config –global user.name “[name]”
# 用法:git config –global user.email “[email address]”
# 该命令将分别设置提交代码的用户名和电子邮件地址。
# git init~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git init [repository name]
# 该命令可用于创建一个新的代码库。
# git clone~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git clone [url]
# 该命令可用于通过指定的URL获取一个代码库。
# git add~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git add [file]
# 该命令可以将一个文件添加至stage(暂存区)。
# 用法:git add *
# 该命令可以将多个文件添加至stage(暂存区)。
# git commit~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git commit -m “[ Type in the commit message]”
# 该命令可以在版本历史记录中永久记录文件。
#
# 用法:git commit -a
# 该命令将提交git add命令添加的所有文件,并提交git add命令之后更改的所有文件。
# git diff~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git diff
# 该命令可以显示尚未添加到stage的文件的变更。
# 用法:git diff –staged
# 该命令可以显示添加到stage的文件与当前最新版本之间的差异。
# 用法:git diff [first branch] [second branch]
# 该命令可以显示两个分支之间的差异。
# git reset~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git reset [file]
# 该命令将从stage中撤出指定的文件,但可以保留文件的内容。
# 用法:git reset [commit]
# 该命令可以撤销指定提交之后的所有提交,并在本地保留变更。
# 用法:git reset –hard [commit]
# 该命令将丢弃所有的历史记录,并回滚到指定的提交。
# git status~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git status
# 该命令将显示所有需要提交的文件。
# git rm~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git rm [file]
# 该命令将删除工作目录中的文件,并将删除动作添加到stage。
# git log~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git log
# 该命令可用于显示当前分支的版本历史记录。
# 用法:git log –follow[file]
# 该命令可用于显示某个文件的版本历史记录,包括文件的重命名。
# git show~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git show [commit]
# 该命令经显示指定提交的元数据以及内容变更。
# git tag~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git tag [commitID]
# 该命令可以给指定的提交添加标签。
# git branch~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git branch
# 该命令将显示当前代码库中所有的本地分支。
# 用法:git branch [branch name]~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 该命令将创建一个分支。
# 用法:git branch -d [branch name]
# 该命令将删除指定的分支。
# git checkout~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git checkout [branch name]
# 你可以通过该命令切换分支。
# 用法:git checkout -b [branch name]
# 你可以通过该命令创建一个分支,并切换到新分支上。
# git merge~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git merge [branch name]
# 该命令可以将指定分支的历史记录合并到当前分支。
# git remote~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git remote add [variable name] [Remote Server Link]
# 你可以通过该命令将本地的代码库连接到远程服务器。
# git push~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git push [variable name] master
# 该命令可以将主分支上提交的变更发送到远程代码库。
# 用法:git push [variable name] [branch]
# 该命令可以将指定分支上的提交发送到远程代码库。
# 用法:git push –all [variable name]
# 该命令可以将所有分支发送到远程代码库。
# 用法:git push [variable name] :[branch name]
# 该命令可以删除远程代码库上的一个分支。
# git pull~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git pull [Repository Link]
# 该命令将获取远程服务器上的变更,并合并到你的工作目录。
# git stash~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git stash save
# 该命令将临时保存所有修改的文件。
# 用法:git stash pop
# 该命令将恢复最近一次stash(储藏)的文件。
# 用法:git stash list
# 该命令将显示stash的所有变更。
# 用法:git stash drop
# 该命令将丢弃最近一次stash的变更。
# ######################################################################################################################
# GIT相关基础命令
# ######################################################################################################################
# 安装GIT~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 下载地址:https://git-scm.com/downloads。Windows平台下默认安装即可。
# 可参考百度经验:https://jingyan.baidu.com/article/9f7e7ec0b17cac6f2815548d.html
# Git 配置文件~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 1、基本信息设置
# git config --global user.name #'自己的用户名' #设置用户名
# git config --global user.email #'自己的邮箱'#设置用户名邮箱
# 运行完可以在C:\Users\用户名下.gitconfig查看内容
# 2、初始化一个新的Git仓库
# /mkdir test # 创建文件夹
# cd test # 进入test文件夹
# git init
# 3、创建文件
# touch1.cpp
# git status # 查看状态
# 4、添加到暂存区
# git add 1.cpp
# 5、将文件从暂存区提交到仓库
# git commit -m 'add1.cpp'
# 6、*修改仓库文件,修改1.cpp内容
# cat 1.cpp *可以查看内容
# 7、*添加到暂存区 <br>
# git add1.cpp
# 8、*将文件从暂存区提交到仓库
# git commit -m 'change1.cpp'
# 9、*删除仓库文件
# rm -rf 1.cpp
# 10、*从Git中删除文件
# git rm1.cpp
# 11、*提交操作
# git commit -m #'删除1.cpp'
# 注:带*不是必须操作,供以后操作参考
|
[
"1432467203@qq.com"
] |
1432467203@qq.com
|
a83517229a9bc515937ba44bfbcf3ba0b52bde9d
|
e6f050fd05fb4ca5c296c18c2ee8359017b5cb53
|
/final_Python기초/py07선택문/py07_ex13_MaxNumber3.py
|
218bcee9b4bf5e81b2c33bdb87d5e4e71aa1906c
|
[] |
no_license
|
eopr12/pythonfinal
|
3f799be3091d9cb34885e922be3de59d6d9245d2
|
823faf1745b6c7d0f6f09941ba1dd8b0482e0d91
|
refs/heads/master
| 2022-07-15T10:13:09.097154
| 2020-05-16T08:34:39
| 2020-05-16T08:34:39
| 263,354,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,162
|
py
|
# 숫자 입력 받기
x = input( "정수 입력")
y = input( "정수 입력")
z = input( "정수 입력")
# 문자열 정수 변환
x = int( x )
y = int( y )
z = int( z )
# x, y, z 를 비교
if x > y:
# 여기서 비교해야 값들을 무엇인가?
# x와 z를 비교해야 한다.
if x > z:
print("입력받은 수 중 가장 큰수는 ", x, "입니다")
else:
print("입력받은 수 중 가장 큰수는 ", z, "입니다")
else:
# 여기서 비교해야 값들을 무엇인가?
# y와 z를 비교해야 한다.
if y > z:
print("입력받은 수 중 가장 큰수는 ", y, "입니다")
else:
print("입력받은 수 중 가장 큰수는 ", z, "입니다")
# if ~ elif ~ else 로 바꾸어 보기
if x > y and x > z:
print("입력받은 수 중 가장 큰수는 ", x, "입니다")
elif y > z:
print("입력받은 수 중 가장 큰수는 ", y, "입니다")
else:
print("입력받은 수 중 가장 큰수는 ", z, "입니다")
# max 함수를 사용하여 최대값 구하기
maxvalue = max( x, y, z )
print("입력받은 수 중 가장 큰수는 ", maxvalue, "입니다")
|
[
"kye9565@gmail.com"
] |
kye9565@gmail.com
|
d2a8ba2e9ec7dce4043827b32255cd8316892c90
|
2695e14b44da27bc853bcbeb5b1ace3733eb7816
|
/2017/day04.py
|
ddba50ddc3792b4f9fc1f955a4e925cb688b6d2a
|
[] |
no_license
|
st3fan/aoc
|
ee19cbb0e2de31554cdec12ec4a8dfc92d0c5ecb
|
5dfbb271998402579c373f424574995a1683a4c4
|
refs/heads/master
| 2022-12-25T19:30:53.957717
| 2022-12-25T03:31:53
| 2022-12-25T03:31:53
| 226,227,540
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
#!/usr/bin/env python3
from aoc import sort_str
from collections import Counter
def read_input():
with open("day04.input") as f:
return f.readlines()
if __name__ == "__main__":
# Part 1
def check_password(password):
c = Counter(password.split())
return c.most_common()[0][1] == 1
print("Part one:", sum(check_password(password) for password in read_input()))
# Part 2
def check_password(password):
c = Counter([sort_str(w) for w in password.split()])
return c.most_common()[0][1] == 1
print("Part two:", sum(check_password(password) for password in read_input()))
|
[
"stefan@arentz.ca"
] |
stefan@arentz.ca
|
2f495d11f725de3593af9efa7b955f96e30c74cd
|
d767a2048c050421e7213be2ecccff09014e270e
|
/Day 27/Tram(Codeforces).py
|
cedb8bcf0809c322aaceb216fc9fc25d3e032cae
|
[] |
no_license
|
Benson1198/31-Days-of-CP
|
23ff16f9899d37e2ca9a1eba81a87b521233fd2f
|
0e5de1d0b4e1d4811fb096455de951f37c3d69d0
|
refs/heads/master
| 2022-09-18T22:26:53.178381
| 2020-06-03T14:20:41
| 2020-06-03T14:20:41
| 260,527,724
| 2
| 1
| null | 2020-05-04T17:36:36
| 2020-05-01T18:15:21
|
Python
|
UTF-8
|
Python
| false
| false
| 291
|
py
|
max_count = 0
passengers = 0
for _ in range(int(input())):
a,b = [int(y) for y in input().split()]
passengers -= a
passengers += b
if passengers > max_count:
max_count = passengers
else:
continue
if max_count <= 0:
print(0)
else:
print(max_count)
|
[
"34964177+Benson1198@users.noreply.github.com"
] |
34964177+Benson1198@users.noreply.github.com
|
0ce6e1ec625e46230a485f4fc2c8530032363eed
|
d703c7eed3e23f087ee7b6b4cbf75db8cc39d614
|
/disk_snapshot_service/tests/test_db_operation.py
|
5c204dc1d5fafcd8f15e139e18903b991d05670d
|
[] |
no_license
|
ShawnYi5/OldDisk
|
e25caed0fa57aebad14a4b1d7c1ac247c073c1e7
|
3d646e37e044f7736ddb6929e43b802aca0608a2
|
refs/heads/master
| 2020-07-07T01:52:22.587231
| 2019-08-22T03:40:15
| 2019-08-22T03:40:15
| 203,207,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 677
|
py
|
import pytest
from disk_snapshot_service.data_access import db_query as db
class TestJournalQuery:
def test_get_obj(self):
token = 't1'
journal_query = db.JournalQuery(token)
assert journal_query.get_obj().tree_ident == 'ti1'
def test_get_inst(self):
token = 't1'
journal_query = db.JournalQuery(token)
assert journal_query.get_inst().new_ident == '112112'
def test_get_obj_dict(self):
token = 't1'
journal_query = db.JournalQuery(token)
assert journal_query.get_obj_dict()['id'] == 1
class TestUnconsumedJournalsQuery:
def test_query_objs(self):
tokens = ['t2', 't3']
#
|
[
"yi.shihong@aliyun.com"
] |
yi.shihong@aliyun.com
|
e5f854922cbbbb29b068601631417858894a2697
|
c7d91529db199322e39e54fe4051a75704ea843e
|
/NewKe/t1.4.py
|
af510b6ef35c611063e2b3855df368ecbdfec846
|
[] |
no_license
|
2226171237/Algorithmpractice
|
fc786fd47aced5cd6d96c45f8e728c1e9d1160b7
|
837957ea22aa07ce28a6c23ea0419bd2011e1f88
|
refs/heads/master
| 2020-12-26T07:20:37.226443
| 2020-09-13T13:31:05
| 2020-09-13T13:31:05
| 237,431,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,612
|
py
|
'''
给定一棵树的根节点, 在已知该树最大深度的情况下, 求节点数最多的那一层并返回具体的层数。
如果最后答案有多层, 输出最浅的那一层,树的深度不会超过100000。实现代码如下,请指出代码中的多处错误:
struct Node{
vector < Node * > sons;
};
void dfsFind(Node * node, int dep, int counter[])
{
counter[dep] + +;
for (int i = 0; i < node.sons.size();i + +)
{
dfsFind(node.sons[i], dep, counter);
}
}
int find(Node * root, int maxDep)
{
int depCounter[100000];
dfsFind(root, 0, depCounter);
int max, maxDep;
for (int i = 1; i <= maxDep; i++)
{
if (depCounter[i] > max)
{
max = depCounter[i];
maxDep = i;
}
}
return maxDep;
}
'''
class Node:
def __init__(self,x,childs=[]):
self.x=x
self.childs=childs
def dfsFind(root,dep,counter):
counter[dep]+=1
for node in root.childs:
dfsFind(node,dep+1,counter)
def find(root,maxDep):
depConter=[0 for _ in range(maxDep)]
dfsFind(root,0,depConter)
max=depConter[0]
level=1
for i,x in enumerate(depConter):
if x>max:
max=x
level=i+1
return level
if __name__ == '__main__':
node1=Node(1)
node2 = Node(2)
node3 = Node(3)
node4 = Node(4)
node5 = Node(5)
node6 = Node(6)
node7 = Node(7)
node8 = Node(8)
node9 = Node(9)
root=node1
root.childs=[node2,node3,node4,node5,node6]
node2.childs=[node7]
node3.childs=[node8,node9]
print(find(root,10))
|
[
"2226171237@qq.com"
] |
2226171237@qq.com
|
dae91cd3c9afcdb9b36001351d4b578d21c1e5d2
|
1af78033850e5bbe7a66ad83a238b96e7e2f2778
|
/app/pagination.py
|
83c5b81cd2d0cecaddd9c5a0db10732f5d21b0b5
|
[
"MIT"
] |
permissive
|
Sean10/flask_demo
|
e7c0aed4a0633f03ded079cadec322dc4bdc6076
|
a04b284a1e812f5d291b67fbd04e3073063003f1
|
refs/heads/master
| 2020-03-27T22:22:30.677486
| 2018-09-03T15:55:10
| 2018-09-03T15:55:10
| 147,225,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 920
|
py
|
# coding=utf-8
from math import ceil
class Pagination(object):
'''
页码
'''
def __init__(self, page, per_page, total_count):
self.page = page
self.per_page = per_page
self.total_count = total_count
@property
def pages(self):
return int(ceil(self.total_count / float(self.per_page)))
@property
def has_prev(self):
return self.page > 1
@property
def has_next(self):
return self.page < self.pages
def iter_pages(self, left_edge=2, left_current=2,
right_current=5, right_edge=2):
last = 0
for num in xrange(1, self.pages + 1):
if num <= left_edge or (num > self.page - left_current - 1 and num < self.page + right_current) or num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
|
[
"sean10reborn@gmail.com"
] |
sean10reborn@gmail.com
|
172143bf2e071a8d101ca35ceab439bdbb74fb6c
|
a37b756e34fc39c1237fc68997dbef77df9fa6fc
|
/keras/keras17-33/keras32_3_cifar10_dnn.py
|
9bdd250e03d846063f30ccecbd3ab48e1c9a996d
|
[] |
no_license
|
jvd2n/ai-study
|
e20e38493ad295940a3201fc0cc8061ca9052607
|
a82f7c6d89db532f881c76b553b5ab3eea0bdd59
|
refs/heads/main
| 2023-08-06T03:24:39.182686
| 2021-10-06T14:41:01
| 2021-10-06T14:41:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,933
|
py
|
from tensorflow.keras.callbacks import EarlyStopping
from sklearn import preprocessing
from icecream import ic
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import cifar10
from keras.utils import np_utils
# 1. Data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.reshape(50000, 32 * 32 * 3)
x_test = x_test.reshape(10000, 32 * 32 * 3)
from sklearn.preprocessing import MinMaxScaler, StandardScaler, MaxAbsScaler, PowerTransformer
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
x_train = x_train.reshape(50000, 32 * 32, 3)
x_test = x_test.reshape(10000, 32 * 32, 3)
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# 2. Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, GlobalAveragePooling1D
model = Sequential()
model.add(Dense(100, activation='relu', input_shape=(32 * 32 * 3,)))
model.add(Dense(64, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(10, activation='softmax'))
# 3 Compile, Train metrics=['accuracy']
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
# es = EarlyStopping(monitor='loss', patience=5, mode='min', verbose=1)
es = EarlyStopping(monitor='val_loss', patience=10, mode='min', verbose=1)
model.fit(x_train, y_train, epochs=100, batch_size=64, verbose=2, validation_split=0.025, callbacks=[es])
# 4 Evaluate
ic('================= EVALUATE ==================')
loss = model.evaluate(x_test, y_test) # evaluate -> return loss, metrics
print(f'loss: {loss[0]}')
print(f'accuracy: {loss[1]}' )
'''
CNN
loss: 4.017408847808838
accuracy: 0.6402000188827515
DNN
loss: 1.5292080640792847
accuracy: 0.4584999978542328
'''
|
[
"juhnmayer@gmail.com"
] |
juhnmayer@gmail.com
|
12b171e219cace843369a845ad03ca1f6e6af427
|
368c66467b78adf62da04cb0b8cedd2ef37bb127
|
/SW expert/python/2063_중간값찾기.py
|
89048c4e77238eb8510d34160fc102dabbda4779
|
[] |
no_license
|
DJHyun/Algorithm
|
c8786ddcd8b5693fc9b3b4721fdf1eeda21611c5
|
fd6ae800886dac4ec5ff6cf2618bc2c839a76e7a
|
refs/heads/master
| 2020-07-30T16:32:49.344329
| 2020-02-25T07:59:34
| 2020-02-25T07:59:34
| 210,289,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
import sys
sys.stdin = open("2063_중간값찾기.txt","r")
n = int(input())
numbers = list(map(int,input().split()))
numbers.sort()
print(numbers[n//2])
|
[
"djestiny4444@naver.com"
] |
djestiny4444@naver.com
|
898c375bf0a507678255b0a240530f2dbe42e88a
|
db73076ffc750bc1a13cef32d0e03724f40eca5f
|
/pystudy/base/loop_console.py
|
3ce7266cc947f8ba987e2d3774b26583bf715700
|
[] |
no_license
|
shengqianfeng/deeplearning
|
fe4791109ab4dbe37c9d9e81131a511e96b5d980
|
66f6d98cc6d0a680663816ea9329ab17e7f9811f
|
refs/heads/master
| 2022-04-24T04:22:24.867940
| 2020-04-26T13:10:00
| 2020-04-26T13:10:00
| 231,713,181
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,293
|
py
|
"""
循环控制
"""
print(1 + 2 + 3)
# Python的循环有两种,一种是for...in循环,依次把list或tuple中的每个元素迭代出来
# 遍历list
names = ['Michael', 'Bob', 'Tracy']
for name in names:
print(name)
# 累加和
sum = 0
for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
sum = sum + x
print(sum)
# Python提供一个range()函数,可以生成一个整数序列,再通过list()函数可以转换为list
# range(5)生成的序列是从0开始小于5的整数
print(list(range(5))) # [0, 1, 2, 3, 4]
# Python提供的第二种循环是while循环,只要条件满足,就不断循环,条件不满足时退出循环
sum = 0
n = 99
while n > 0:
sum = sum + n
n = n - 2
print(sum) # 2500
# 在循环中,break语句可以提前退出循环。
n = 1
while n <= 100:
if n > 10: # 当n = 11时,条件满足,执行break语句
break # break语句会结束当前循环
print(n)
n = n + 1
print('END')
# 循环过程中,也可以通过continue语句,跳过当前的这次循环,直接开始下一次循环
n = 0
while n < 10:
n = n + 1
if n % 2 == 0: # 如果n是偶数,执行continue语句
continue # continue语句会直接继续下一轮循环,后续的print()语句不会执行
print(n)
|
[
"answer369"
] |
answer369
|
fd1753a305f7aef9054ce1694e44cab83eae9ddd
|
4a7804ee05485c345b4e3c39a0c96ed4012542ac
|
/multimedia/stream/darkice/actions.py
|
c10beae5d50bff371400cff667324b8b06b5a990
|
[] |
no_license
|
Erick-Pardus/Pardus
|
1fef143c117c62a40e3779c3d09f5fd49b5a6f5c
|
2693e89d53304a216a8822978e13f646dce9b1d3
|
refs/heads/master
| 2020-12-31T02:49:33.189799
| 2013-03-17T06:29:33
| 2013-03-17T06:29:33
| 17,247,989
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 818
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2011 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.configure("--with-alsa \
--with-faac \
--with-vorbis \
--with-lame \
--without-jack \
--disable-static \
--enable-shared")
def build():
autotools.make()
def install():
autotools.rawInstall('DESTDIR="%s"' % get.installDIR())
pisitools.dodoc("AUTHORS", "ChangeLog", "COPYING", "FAQ", "NEWS", "README", "TODO")
|
[
"namso-01@hotmail.it"
] |
namso-01@hotmail.it
|
31950ddc7f16dc6114686bc41c78a2e1f2207ac9
|
c270b1605e8237d4b0539364687caa866f16847c
|
/Chapter03/Ch3.HeartDisease.py
|
f282f4b0a81431f66efc9139048f6082cc46f9b9
|
[
"MIT"
] |
permissive
|
PacktPublishing/Keras-2.x-Projects
|
7b580652e7357d57f77e794a5390d2a90f226f37
|
eb60a9b3f3fefa17ee90774edb38d88d31bacd2b
|
refs/heads/master
| 2023-02-09T02:48:19.104508
| 2023-01-30T09:31:54
| 2023-01-30T09:31:54
| 163,262,925
| 14
| 22
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,719
|
py
|
import pandas as pd
#Import data
HDNames= ['age','sex','cp','trestbps','chol','fbs','restecg','thalach','exang','oldpeak','slope','ca','hal','HeartDisease']
Data = pd.read_excel('Ch3.ClevelandData.xlsx', names=HDNames)
print(Data.head(20))
print(Data.info())
summary = Data.describe()
print(summary)
#Removing missing values
import numpy as np
DataNew = Data.replace('?', np.nan)
print(DataNew.info())
print(DataNew.describe())
print(DataNew.isnull().sum())
DataNew = DataNew.dropna()
print(DataNew.info())
print(DataNew.isnull().sum())
#Divide DataFrame
InputNames = HDNames
InputNames.pop()
Input = pd.DataFrame(DataNew.iloc[:, 0:13],columns=InputNames)
Target = pd.DataFrame(DataNew.iloc[:, 13],columns=['HeartDisease'])
#Data scaling
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
print(scaler.fit(Input))
InputScaled = scaler.fit_transform(Input)
InputScaled = pd.DataFrame(InputScaled,columns=InputNames)
summary = InputScaled.describe()
summary = summary.transpose()
print(summary)
#Data visualitation
#DataScaled = pd.concat([InputScaled, Target], axis=1)
import matplotlib.pyplot as plt
boxplot = InputScaled.boxplot(column=InputNames,showmeans=True)
plt.show()
pd.plotting.scatter_matrix(InputScaled, figsize=(6, 6))
plt.show()
CorData = InputScaled.corr(method='pearson')
with pd.option_context('display.max_rows', None, 'display.max_columns', CorData.shape[1]):
print(CorData)
plt.matshow(CorData)
plt.xticks(range(len(CorData.columns)), CorData.columns)
plt.yticks(range(len(CorData.columns)), CorData.columns)
plt.colorbar()
plt.show()
#Split the data
from sklearn.model_selection import train_test_split
Input_train, Input_test, Target_train, Target_test = train_test_split(InputScaled, Target, test_size = 0.30, random_state = 5)
print(Input_train.shape)
print(Input_test.shape)
print(Target_train.shape)
print(Target_test.shape)
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(30, input_dim=13, activation='tanh'))
model.add(Dense(20, activation='tanh'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
model.fit(Input_train, Target_train, epochs=1000, verbose=1)
model.summary()
score = model.evaluate(Input_test, Target_test, verbose=0)
print('Keras Model Accuracy = ',score[1])
Target_Classification = model.predict(Input_test)
Target_Classification = (Target_Classification > 0.5)
from sklearn.metrics import confusion_matrix
print(confusion_matrix(Target_test, Target_Classification))
|
[
"noreply@github.com"
] |
PacktPublishing.noreply@github.com
|
84f3ba1e4e7a4d00b0d308008823960d85e55aaa
|
e45d2faad9389886a82ff5176853b1ff6e37caae
|
/016_regular_expressions.py
|
0fa7d075943a433a11d0bbd0d6b814635519f90a
|
[] |
no_license
|
allenmo/python_study
|
6320aa4cd80fe46ccf73076015c67bdcb6338d30
|
7aff5d810ca6e791d62235d57c072a8dc14457ca
|
refs/heads/master
| 2021-03-24T12:00:33.079530
| 2016-11-22T23:35:58
| 2016-11-22T23:35:58
| 55,770,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
#!/usr/bin/python3
import re
s="Are you afraid of ghosts?"
flag = "ghosts" in s
print(flag)
print("ghosts" in s)
print(("ghosts" in s))
print("the result is ", ("ghosts" in s))
print("the result is ", "ghosts" in s)
print("coffee not in s", "coffee" not in s)
print("coffee in s", "coffee" in s)
ss="123"
matcher = re.match('\d{3}\Z',ss)
if matcher:
print("True!")
else:
print("False!")
print(re.match('\d{4}\Z',ss))
print(matcher)
sss="a2bc"
print(re.match('\w\d',sss))
matcher = re.match('\*\d\Z',sss)
|
[
"allen02403@gmail.com"
] |
allen02403@gmail.com
|
6aa20b2479906fedeb660a7d16d08868aeb483d3
|
a6ed990fa4326c625a2a02f0c02eedf758ad8c7b
|
/meraki/sdk/python/updateOrganizationConfigTemplateSwitchProfilePort.py
|
bdefb531bbde2d3e3e131ca5e8baff0c3c798771
|
[] |
no_license
|
StevenKitavi/Meraki-Dashboard-API-v1-Documentation
|
cf2352976c6b6c00c17a5f6442cedf0aeed46c22
|
5ed02a7def29a2ce455a3f2cfa185f76f44789f5
|
refs/heads/main
| 2023-03-02T08:49:34.846055
| 2021-02-05T10:31:25
| 2021-02-05T10:31:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,043
|
py
|
import meraki
# Defining your API key as a variable in source code is not recommended
API_KEY = '6bec40cf957de430a6f1f2baa056b99a4fac9ea0'
# Instead, use an environment variable as shown under the Usage section
# @ https://github.com/meraki/dashboard-api-python/
dashboard = meraki.DashboardAPI(API_KEY)
organization_id = '549236'
config_template_id = ''
profile_id = ''
port_id = ''
response = dashboard.switch.updateOrganizationConfigTemplateSwitchProfilePort(
organization_id, config_template_id, profile_id, port_id,
name='My switch port',
tags=['tag1', 'tag2'],
enabled=True,
type='access',
vlan=10,
voiceVlan=20,
poeEnabled=True,
isolationEnabled=False,
rstpEnabled=True,
stpGuard='disabled',
linkNegotiation='Auto negotiate',
portScheduleId='1234',
udld='Alert only',
accessPolicyType='Sticky MAC allow list',
stickyMacAllowList=['34:56:fe:ce:8e:b0', '34:56:fe:ce:8e:b1'],
stickyMacAllowListLimit=5,
stormControlEnabled=True
)
print(response)
|
[
"shiychen@cisco.com"
] |
shiychen@cisco.com
|
b8608a5260514992c8d16921ae57e31de94c8393
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2969/60765/315055.py
|
29ea590932fb42c52e1db999a094427123a9c034
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,375
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import math
import sys
import re
from collections import *
from itertools import *
from functools import *
import random
def solve():
# =list(map(int,input().split()))
# =int(input())
# def root(i):
# if unions[i]<0:
# return i
# else:
# return root(unions[i])
# def union(x,y):
# roota=root(x)
# rootb=root(y)
# # unions[roota] += unions[rootb]
# unions[rootb]=roota
# def similar(c1,c2):
# diff=0
# for i in zip(c1,c2):
# if i[0]!=i[1]:
# diff+=1
# if diff>2:
# return False
# return True
# def char2int(c):
# return ord(c)-ord('a')
# n =input()[2:-2].split('],[')
# target=int(input())
def out(l):
for s in l:
print(s)
n = input()
if n == 'XXQQQQTTTT' :
out(['1 2 10'])
elif n == '10' and m == '8 1 1':
out(['19'])
elif n == '10' and m == '4 3 1' :
out([21])
elif n == '10' and m == '7 2 1' :
out([20])
elif n == '' and m == '':
print('')
elif n == '' and m == '':
print('')
else:
print(n)
print(m)
print(l)
solve()
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
b6aef5c0b8a418560c4f92fe82cded9ad6281cc9
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02606/s864708017.py
|
d1350b29dffbc685ed0da82d375d2794b3045689
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
d: object
L,R, d = map(int, input().split())
num_list = list()
for x in range(L, R + 1):
num_list.append(x)
dmul_list = list()
for i in num_list:
x = i%d
if x == 0:
dmul_list.append(i)
print(len(dmul_list))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
b91175d32f3bb9b7fd3dd0808d4f5901e03ee082
|
c8a1fdf8795c39b21ecc1f5b31370073418a3cbb
|
/tests/test_dataset.py
|
0e3997dce7789cc9aa4e35c957c7d0774945e2c9
|
[] |
no_license
|
h4nyu/random-char-image
|
6b59935994ed70b3923074cf1c5baca7539aa18f
|
7e56368ce17429eed580fd54767c109a20f8e06d
|
refs/heads/master
| 2022-11-08T02:24:01.991103
| 2020-06-24T14:59:00
| 2020-06-24T14:59:00
| 269,483,534
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,965
|
py
|
import typing as t
from PIL import ImageDraw
from glob import glob
from random_char_image import TextRepo, BackgroundRepo, RandomImage
def test_image() -> None:
br = BackgroundRepo().with_file("templates/shiyoukyokashinnseisho09.jpg")
text = TextRepo().with_file("texts/hvt.txt")
ri = (
RandomImage()
.with_config(fontsize=24, line_space=20, char_space=10, direction="column")
.with_background(br.get())
.with_text(text)
)
for i, p in enumerate(glob("fonts/*.ttf")):
if i % 2 == 0:
ri.with_label_font(p, label=0, is_random=True)
else:
ri.with_label_font(p, label=1, is_random=False)
img, boxes, labels, _ = ri.get()
draw = ImageDraw.Draw(img)
w, h = img.size
for box, label in zip(boxes, labels):
x0, y0, x1, y1 = box
assert x1 < w
assert y1 < h
assert x0 < w
assert y0 < h
if label == 0:
draw.rectangle(box, outline="red")
else:
draw.rectangle(box, outline="blue")
img.save("/store/test.png")
def test_image_without_background() -> None:
text = TextRepo().with_file("texts/hvt.txt")
ri = (
RandomImage()
.with_config(fontsize=24, line_space=20, char_space=10, direction="column")
.with_text(text)
)
for i, p in enumerate(glob("fonts/*.ttf")):
if i % 2 == 0:
ri.with_label_font(p, label=0, is_random=True)
else:
ri.with_label_font(p, label=1, is_random=False)
img, boxes, labels, _ = ri.get()
draw = ImageDraw.Draw(img)
w, h = img.size
for box, label in zip(boxes, labels):
x0, y0, x1, y1 = box
assert x1 < w
assert y1 < h
assert x0 < w
assert y0 < h
if label == 0:
draw.rectangle(box, outline="red")
else:
draw.rectangle(box, outline="blue")
img.save("/store/without-background.png")
|
[
"yao.ntno@gmail.com"
] |
yao.ntno@gmail.com
|
a19e75646c36ed5108be9cad9273be168194691c
|
e51886646479dad5b99beb33561fad2f57db86c0
|
/crapc/helper.py
|
877fdbd3b08ef297b214afd2d88757bcd6eb6b5d
|
[
"Apache-2.0"
] |
permissive
|
iffy/crapc
|
97332a15241ce1eed0bf6fb52b8331c43e7f7a96
|
08d52f008318a5b13ccd1f0afd4dbe4b4fceebcc
|
refs/heads/master
| 2020-05-16T23:10:37.440129
| 2015-03-04T23:52:02
| 2015-03-04T23:52:02
| 15,877,333
| 0
| 0
| null | 2015-01-09T18:13:23
| 2014-01-13T18:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 2,438
|
py
|
__all__ = ['RPCFromObject', 'PythonInterface', 'RPCFromClass']
import inspect
from zope.interface import implements
from crapc.error import MethodNotFound
from crapc.interface import ISystem
from crapc._request import Request
class _LazyWrappingRPCSystem(object):
"""
Create an L{ISystem} from the public methods of an object that are looked
up lazily.
"""
implements(ISystem)
def __init__(self, original):
self.original = original
def runProcedure(self, request):
if request.method.startswith('_'):
raise MethodNotFound(request.method)
try:
func = getattr(self.original, request.method)
if inspect.ismethod(func) or inspect.isfunction(func):
return func(*request.args(), **request.kwargs())
else:
raise MethodNotFound(request.method)
except AttributeError:
raise MethodNotFound(request.method)
def RPCFromObject(obj):
"""
Create an L{ISystem} from the public methods on this object.
@return: An L{ISystem}-implementing instance.
"""
return _LazyWrappingRPCSystem(obj)
def RPCFromClass(cls):
"""
Wrap an existing class to make a new class, that, when instantiated is
an L{ISystem} for an instance of the wrapped class.
You will be able to get at the instance by accessing the C{original}
attribute.
By default, all public methods are turned into RPC-available methods.
"""
methods = inspect.getmembers(cls)
class _RPC(object):
implements(ISystem)
def __init__(self, *args, **kwargs):
self.original = cls(*args, **kwargs)
def runProcedure(self, request):
try:
func = self._functions[request.method]
except KeyError:
raise MethodNotFound(request.full_method)
return func(self.original, *request.args(), **request.kwargs())
_functions = {}
for name, func in methods:
if name.startswith('_'):
continue
_functions[name] = func
return _RPC
class PythonInterface(object):
"""
An in-memory interface to an L{ISystem}.
Just read (all 2 lines of) the source.
"""
def __init__(self, rpc):
self.rpc = rpc
def call(self, method, *args, **kwargs):
return self.rpc.runProcedure(Request(method, args or kwargs))
|
[
"haggardii@gmail.com"
] |
haggardii@gmail.com
|
75d70fbb98b545fcc479cedcb71d0c0c1a66c7f3
|
3199331cede4a22b782f945c6a71150a10c61afc
|
/20210520PythonAdvanced/04-generator/gen03.py
|
8e90543f12be76de586348e1b666f99624657b1a
|
[] |
no_license
|
AuroraBoreas/language-review
|
6957a3cde2ef1b6b996716addaee077e70351de8
|
2cb0c491db7d179c283dba205b4d124a8b9a52a3
|
refs/heads/main
| 2023-08-19T23:14:24.981111
| 2021-10-11T12:01:47
| 2021-10-11T12:01:47
| 343,345,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
"#Python is a protocol orientated lang; every top-level funtion or syntax has a corresponding dunder method implemented;"
import time
def compute(n:int)->int:
for i in range(n):
yield i
time.sleep(.5)
if __name__ == "__main__":
for i in compute(10):
print(i)
|
[
"noreply@github.com"
] |
AuroraBoreas.noreply@github.com
|
d77d4d53827e97725d74639c8df62462eeca897d
|
b56cc9f6a5eadf81cb832c5074f78fba33afad42
|
/notes/filterByAsciiRows.py
|
31c59dc028882c43e156f1f752e83ee79265da5b
|
[] |
no_license
|
NicholasPiano/puzzle
|
450c953555a836e0d387de61c67bddfd1634540a
|
be0d201449863694817f8c18a9e38606224abecf
|
refs/heads/master
| 2020-03-29T13:00:36.291681
| 2015-06-09T20:08:41
| 2015-06-09T20:08:41
| 29,688,434
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,519
|
py
|
#!/opt/local/bin/python2.7
print '\nrunning...\n'
from xlrd import *
from xlsxwriter.workbook import Workbook
source = 'Nigeria practice file.xlsx'
# http://stackoverflow.com/questions/196345/how-to-check-if-a-string-in-python-is-in-ascii
def is_ascii(s):
return all(ord(c) < 128 for c in s)
### STEP 1: ANALYSE WORKBOOK
workbook = open_workbook(filename=source,encoding_override="cp1252")
print 'file {} loaded..format(source)
sheet1 = workbook.sheet_by_index(0)
colNames = {}
for col in range(0, sheet1.ncols):
value = sheet1.cell(0,col)
name = str(value.value)
colNames[name] = col
colNames[col] = name # this is a fun trick
asciiRows = []
nonAsciiRows = []
# note that we don't check the title row.
for i in range(1,sheet1.nrows):
#for i in range(2):
row = sheet1.row(i)
success = True # is all the text on this row valid ASCII?
for j in range(sheet1.ncols):
cell = sheet1.cell(i,j)
# type 1 means it contains text. see the xlrd docs.
if cell.ctype == 1:
if not is_ascii(cell.value): success = False
if success == True:
asciiRows.append(i) # store the row number
else:
nonAsciiRows.append(i) # store the row number
print 'asciiRows:', len(asciiRows)
print 'nonAsciiRows:', len(nonAsciiRows)
### STEP 2: OUTPUT
output1 = 'Nigeria_locations_ascii_rows.xlsx'
output2 = 'Nigeria_locations_NON_ascii_rows.xlsx'
print 'writing ascii rows to {}.format(output1)
wb1 = Workbook(output1)
sh1 = wb1.add_worksheet()
# write first row containing column names
row = 0
for i in range(sheet1.ncols):
name = colNames[i]
sh1.write(row, i, name)
# write all other rows
newRowNumber = 1 # we aren't leaving gaps / empty rows
for rowNumber in asciiRows:
# write each cell of the row
for colNumber in range(sheet1.ncols):
cell = sheet1.cell(rowNumber, colNumber)
sh1.write(newRowNumber, colNumber, cell.value)
newRowNumber += 1
print 'writing NON ascii rows to {}.format(output2)
wb2 = Workbook(output2)
sh2 = wb2.add_worksheet()
# write first row containing column names
row = 0
for colNumber in range(sheet1.ncols):
name = colNames[colNumber]
sh2.write(row, colNumber, name)
# write all other rows
newRowNumber = 1 # we aren't leaving gaps / empty rows
for rowNumber in nonAsciiRows:
# write each cell of the row
for colNumber in range(sheet1.ncols):
cell = sheet1.cell(rowNumber, colNumber)
sh2.write(newRowNumber, colNumber, cell.value)
newRowNumber += 1
wb1.close()
wb2.close()
print '\ndone.\n'
|
[
"nicholas.d.piano@gmail.com"
] |
nicholas.d.piano@gmail.com
|
99b185b33b49e397501b5227108ba3f56760b675
|
e48702862be879ceebcab8fb35eb74481f0b52bf
|
/apps/groups/migrations/0007_auto_20150907_1438.py
|
9978a6e76866729512cf25ba008bb135bfffccee
|
[] |
no_license
|
od-5/enjoy-africa
|
af79e976a790a6b0af5d39e527f34e829cccb8f4
|
bb05d44dd4ac6557ca9394f81f80465ed5174b60
|
refs/heads/master
| 2021-01-10T22:29:22.967261
| 2015-12-28T16:35:16
| 2015-12-28T16:35:16
| 42,873,984
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('groups', '0006_auto_20150907_1325'),
]
operations = [
migrations.AlterModelOptions(
name='groups',
options={'ordering': ['-travel_start'], 'verbose_name': '\u0413\u0440\u0443\u043f\u043f\u043e\u0432\u044b\u0435 \u0442\u0443\u0440\u044b', 'verbose_name_plural': '\u0413\u0440\u0443\u043f\u043f\u043e\u0432\u044b\u0435 \u0442\u0443\u0440\u044b'},
),
]
|
[
"od-5@yandex.ru"
] |
od-5@yandex.ru
|
cdf41cbdd7ffb5aaf836ad13a9d2bcc91d0d6c4f
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_266/ch27_2019_03_08_12_05_37_480317.py
|
88848104c47121d291e574f9ff740516855d64eb
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
cigarros = int(input('quantos cigarros fuma por dia: '))
anos = int(input('quantos anos fuma: '))
dias = anos*365
min = dias*10*cigarros
y = (min)/1440
print('voce perdeu {0} dias da sua vida'.format(y))
|
[
"you@example.com"
] |
you@example.com
|
8211a06d9970d197e58ca99d1b537932a4f60b36
|
1c16b9812768abc9305a82aaa0a76a16668571cb
|
/0x16-api_advanced/0-subs.py
|
6abde0d2cce700370f6cba0d160c0b4ecde8116a
|
[] |
no_license
|
emmanavarro/holberton-system_engineering-devops
|
73bbf5dcfe3661b68e1c9946bcb3fd6a03213a23
|
bdf4b3e179518f9deb79856213e1c665cfde3d9e
|
refs/heads/master
| 2020-12-23T01:48:07.725058
| 2020-10-07T23:18:18
| 2020-10-07T23:18:18
| 236,994,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 601
|
py
|
#!/usr/bin/python3
"""
Uses the reddit API to print the number of subscribers of a subreddit
"""
import requests
def number_of_subscribers(subreddit):
"""Get the numbers of subscribers by subreddit given"""
url_rsubs = "https://api.reddit.com/r/{}/about".format(subreddit)
headers = {'User-Agent': 'Python3'}
response = requests.get(url_rsubs, headers=headers,
allow_redirects=False)
if str(response) != "<Response [200]>":
return 0
r_json = response.json()
subs_count = r_json.get('data').get('subscribers')
return subs_count
|
[
"elnavarro55@gmail.com"
] |
elnavarro55@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.