blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1b27e6f24cd077a7f67416dc90786e02d2bbef4a
|
78337fc3cd0d3be2307f42c4c7ae51bedb1d468b
|
/opengever/base/subscribers.py
|
9c4dbb486292e6ee55509845467899ce58b5da03
|
[] |
no_license
|
sensecs1/opengever.core
|
af19bafce4491f3c648682783fc1c918dc1f5944
|
0666e86fade7835c98a118fa6319071f859fcdb5
|
refs/heads/master
| 2021-01-18T07:44:56.965935
| 2015-03-13T15:26:30
| 2015-03-13T15:26:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
from five import grok
from OFS.interfaces import IObjectClonedEvent
from opengever.base import _
from plone.dexterity.interfaces import IDexterityContent
from zope.component.hooks import getSite
@grok.subscribe(IDexterityContent, IObjectClonedEvent)
def create_initial_version(obj, event):
"""When a object was copied, create an initial version.
"""
portal = getSite()
pr = portal.portal_repository
history = pr.getHistory(obj)
if history is not None and not len(history) > 0:
comment = _(u'label_initial_version_copied',
default="Initial version (document copied)")
# Create an initial version
pr._recursiveSave(obj, {}, pr._prepareSysMetadata(comment),
autoapply=pr.autoapply)
|
[
"lukas.graf@4teamwork.ch"
] |
lukas.graf@4teamwork.ch
|
99f3a49b00e82b89201fee4a187fdd21f2dc6a13
|
20b6cdf48fab05027fa65cceb7812c0f4cd784f8
|
/epidemics/utils/nested.py
|
7e9cb281cbbb21fb7f806c49d04994c46f443701
|
[] |
no_license
|
cselab/covid19
|
f260db9633ae77cd6857616b9a19168d647166fd
|
a867fed2a3cf7681f38fa9701b85a90400b1ad62
|
refs/heads/master
| 2023-02-07T14:55:30.703399
| 2020-10-05T09:12:18
| 2020-10-05T09:12:18
| 251,288,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,925
|
py
|
import numpy as np
from multiprocessing import Pool
import pickle
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class WorkerPool(object):
def __init__(self, cores):
self.pool = Pool(processes=cores)
self.size = cores
def map(self, function, tasks):
return self.pool.map(function, tasks)
def priorTransformFromJs(p,js):
pt = np.zeros(len(p))
for idx in range(len(p)):
lb = js['Distributions'][idx]['Minimum']
ub = js['Distributions'][idx]['Maximum']
pt[idx] = lb+p[idx]*(ub-lb)
return pt
def resample_equal_with_idx(samples, weights, rstate=None):
if rstate is None:
rstate = np.random
if abs(np.sum(weights) - 1.) > 1e-9: # same tol as in np.random.choice.
# Guarantee that the weights will sum to 1.
warnings.warn("Weights do not sum to 1 and have been renormalized.")
weights = np.array(weights) / np.sum(weights)
# Make N subdivisions and choose positions with a consistent random offset.
nsamples = len(weights)
positions = (rstate.random() + np.arange(nsamples)) / nsamples
# Resample the data.
idx = np.zeros(nsamples, dtype=np.int)
cumulative_sum = np.cumsum(weights)
i, j = 0, 0
while i < nsamples:
if positions[i] < cumulative_sum[j]:
idx[i] = j
i += 1
else:
j += 1
return samples[idx], idx
def getPosteriorFromResult(result):
weights = np.exp(result.logwt - result.logz[-1]) # normalized weights
samples, idx = resample_equal_with_idx(result.samples, weights)
return samples, idx
# Plot histogram of sampes in diagonal
def plot_histogram(ax, theta):
dim = theta.shape[1]
num_bins = 30
for i in range(dim):
if (dim == 1):
ax_loc = ax
else:
ax_loc = ax[i, i]
hist, bins, _ = ax_loc.hist(
theta[:, i], num_bins, density=True, color='lightgreen', ec='black')
if i == 0:
# Rescale hist to scale of theta -> get correct axis titles
widths = np.diff(bins)
if (dim > 1):
hist = hist / np.max(hist) * (
ax_loc.get_xlim()[1] - ax_loc.get_xlim()[0])
bottom = ax_loc.get_xlim()[0]
ax_loc.cla()
ax_loc.bar(
bins[:-1],
hist,
widths,
color='lightgreen',
ec='black',
bottom=bottom)
ax_loc.set_ylim(ax_loc.get_xlim())
ax_loc.set_xticklabels([])
else:
ax_loc.cla()
ax_loc.bar(bins[:-1], hist, widths, color='lightgreen', ec='black')
elif i == theta.shape[1] - 1:
ax_loc.set_yticklabels([])
else:
ax_loc.set_xticklabels([])
ax_loc.set_yticklabels([])
ax_loc.tick_params(axis='both', which='both', length=0)
#Plot scatter plot in upper triangle of figure
def plot_upper_triangle(ax, theta, lik):
dim = theta.shape[1]
if (dim == 1):
return
for i in range(dim):
for j in range(i + 1, dim):
if lik:
ax[i, j].scatter(
theta[:, j], theta[:, i], marker='o', s=3, alpha=0.5, c=lik)
else:
ax[i, j].plot(theta[:, j], theta[:, i], marker='.', s=1, alpha=0.5)
ax[i, j].set_xticklabels([])
ax[i, j].set_yticklabels([])
ax[i, j].grid(b=True, which='both')
#Plot 2d histogram in lower triangle of figure
def plot_lower_triangle(ax, theta):
dim = theta.shape[1]
if (dim == 1):
return
for i in range(dim):
for j in range(i):
# returns bin values, bin edges and bin edges
H, xe, ye = np.histogram2d(theta[:, j], theta[:, i], 10, density=True)
# plot and interpolate data
ax[i, j].imshow(
H.T,
aspect="auto",
interpolation='spline16',
origin='lower',
extent=np.hstack((ax[j, j].get_xlim(), ax[i, i].get_xlim())),
cmap=plt.get_cmap('jet'))
if i < theta.shape[1] - 1:
ax[i, j].set_xticklabels([])
if j > 0:
ax[i, j].set_yticklabels([])
def plotNetsedResult(result, savepath=None):
samples, idx = getPosteriorFromResult(result)
numdim = len(samples[0])
numentries = len(samples)
llk = (result.logl[idx]).tolist()
fig, ax = plt.subplots(numdim, numdim, figsize=(8, 8))
samplesTmp = np.reshape(samples, (numentries, numdim))
plt.suptitle(
'{0} Plotter - \nNumber of Samples {1}'.format(
str("Nested Sampling"), str(numentries)).strip(),
fontweight='bold',
fontsize=12)
plot_histogram(ax, samplesTmp)
plot_upper_triangle(ax, samplesTmp, llk)
plot_lower_triangle(ax, samplesTmp)
# for i in range(numdim):
# ax[i, 0].set_ylabel(genList[idx]['Variables'][i]['Name'])
# ax[-1, i].set_xlabel(genList[idx]['Variables'][i]['Name'])
if (savepath==None):
print("TEST")
plt.show()
else:
plt.savefig(savepath)
|
[
"wadaniel@ethz.ch"
] |
wadaniel@ethz.ch
|
ee6666217155863ba3f1a1250154627c6bc7f1d6
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_088/ch119_2020_09_19_21_49_34_845287.py
|
a275184e3182156e1d1a024a8eccecd18f2097b8
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
import math
def calcula_euler(x,n):
contador=2
inicio=1+x
while(soma<n):
inicio+=x**contador/math.factorial(contador)
contador+=1
return soma
|
[
"you@example.com"
] |
you@example.com
|
073c1c64a092fcea66e7e1774e63ffd5b6ae4e58
|
388d81ea4354877326a772bcaa54d276cee81283
|
/Data Structures/1-D/Help Jarvis!.py
|
bc928fe401aa39cd1cc37d252fc1636df73cb52a
|
[] |
no_license
|
anshumanairy/Hacker-Earth
|
61a67fb5e6ce3ef4068ad904480386bf7095f1f7
|
9c3ed575eb7007b055cb149ff644a91f287d4de7
|
refs/heads/master
| 2022-11-28T21:48:38.450999
| 2020-08-08T20:45:49
| 2020-08-08T20:45:49
| 285,909,494
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
def func():
T=int(input())
for i in range(T):
list1=list(input())
list1.sort()
check=1
for j in range(len(list1)-1):
if(int(list1[j+1])!=int(list1[j])+1):
check=0
print('NO')
break
if(check==1):
print('YES')
func()
|
[
"anshuman.airy04@gmail.com"
] |
anshuman.airy04@gmail.com
|
9cdde1ad8c49b5c7e5c0f70037b8e64f9ba08427
|
228de37ad02ee9af51a208ad3287224af1f2c472
|
/app/travel/models/city_information.py
|
6d19042d174b2b9a6fca2575bc78d656d1a06ecf
|
[] |
no_license
|
kahee/MySmallTrip
|
cb0b0a9afdee009f3b4055af92af0bc5ec50f0cd
|
75e1bf32993f137e70360f6aa3b22904d61bd24c
|
refs/heads/master
| 2022-12-11T18:57:12.494011
| 2018-09-02T09:12:59
| 2018-09-02T09:12:59
| 130,799,032
| 1
| 0
| null | 2022-12-08T01:01:50
| 2018-04-24T05:08:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,034
|
py
|
import magic
from .product_base import ProductBase
from django.core.files import File
from django.db import models
from io import BytesIO
from PIL import Image
class CityInformation(ProductBase):
name = models.CharField('도시명', max_length=20)
continent = models.CharField('대륙', max_length=20)
nationality = models.CharField('나라', max_length=20)
city_image = models.ImageField('도시이미지', upload_to='city')
city_image_thumbnail = models.ImageField(upload_to='city-thumbnail')
def save(self, *args, **kwargs):
self._save_thumbnail_process()
super().save(*args, **kwargs)
def _save_thumbnail_process(self):
"""
save() 메서드 실행 도중 img_profile필드의 썸네일 생성에 관한 로직
:return:
"""
if self.city_image:
# 이미지파일의 이름과 확장자를 가져옴
full_name = self.city_image.name.rsplit('/')[-1]
full_name_split = full_name.rsplit('.', maxsplit=1)
temp_file = BytesIO()
temp_file.write(self.city_image.read())
temp_file.seek(0)
mime_info = magic.from_buffer(temp_file.read(), mime=True)
temp_file.seek(0)
name = full_name_split[0]
ext = mime_info.split('/')[-1]
# Pillow를 사용해 이미지 파일 로드
im = Image.open(self.city_image)
# 썸네일 형태로 데이터 변경
im.thumbnail((375, 199))
# 썸네일 이미지 데이터를 가지고 있을 임시 메모리 파일 생성
temp_file = BytesIO()
# 임시 메모리 파일에 Pillow인스턴스의 내용을 기록
im.save(temp_file, ext)
# 임시 메모리파일을 Django의 File로 한번 감싸 썸네일 필드에 저장
self.city_image_thumbnail.save(f'{name}_thumbnail.{ext}', File(temp_file), save=False)
else:
self.city_image_thumbnail.delete(save=False)
|
[
"hsj2334@gmail.com"
] |
hsj2334@gmail.com
|
ccd5157c5b4c9a6d812fe92d0c2ef85a5cdc3286
|
c78f01652444caa083ca75211bae6903d98363cb
|
/devel/.private/hector_mapping/lib/python2.7/dist-packages/hector_mapping/msg/_HectorDebugInfo.py
|
7a372d58becc1ac75f20e0ad7199a48ec033dc09
|
[
"Apache-2.0"
] |
permissive
|
arijitnoobstar/UAVProjectileCatcher
|
9179980f8095652811b69b70930f65b17fbb4901
|
3c1bed80df167192cb4b971b58c891187628142e
|
refs/heads/master
| 2023-05-01T11:03:09.595821
| 2021-05-16T15:10:03
| 2021-05-16T15:10:03
| 341,154,017
| 19
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,561
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from hector_mapping/HectorDebugInfo.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import hector_mapping.msg
class HectorDebugInfo(genpy.Message):
_md5sum = "4d33c0696c0c536f5c1447c260756674"
_type = "hector_mapping/HectorDebugInfo"
_has_header = False # flag to mark the presence of a Header object
_full_text = """HectorIterData[] iterData
================================================================================
MSG: hector_mapping/HectorIterData
float64[9] hessian
float64 conditionNum
float64 determinant
float64 conditionNum2d
float64 determinant2d
"""
__slots__ = ['iterData']
_slot_types = ['hector_mapping/HectorIterData[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
iterData
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(HectorDebugInfo, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.iterData is None:
self.iterData = []
else:
self.iterData = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
length = len(self.iterData)
buff.write(_struct_I.pack(length))
for val1 in self.iterData:
buff.write(_get_struct_9d().pack(*val1.hessian))
_x = val1
buff.write(_get_struct_4d().pack(_x.conditionNum, _x.determinant, _x.conditionNum2d, _x.determinant2d))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.iterData is None:
self.iterData = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.iterData = []
for i in range(0, length):
val1 = hector_mapping.msg.HectorIterData()
start = end
end += 72
val1.hessian = _get_struct_9d().unpack(str[start:end])
_x = val1
start = end
end += 32
(_x.conditionNum, _x.determinant, _x.conditionNum2d, _x.determinant2d,) = _get_struct_4d().unpack(str[start:end])
self.iterData.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
length = len(self.iterData)
buff.write(_struct_I.pack(length))
for val1 in self.iterData:
buff.write(val1.hessian.tostring())
_x = val1
buff.write(_get_struct_4d().pack(_x.conditionNum, _x.determinant, _x.conditionNum2d, _x.determinant2d))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.iterData is None:
self.iterData = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.iterData = []
for i in range(0, length):
val1 = hector_mapping.msg.HectorIterData()
start = end
end += 72
val1.hessian = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)
_x = val1
start = end
end += 32
(_x.conditionNum, _x.determinant, _x.conditionNum2d, _x.determinant2d,) = _get_struct_4d().unpack(str[start:end])
self.iterData.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_4d = None
def _get_struct_4d():
global _struct_4d
if _struct_4d is None:
_struct_4d = struct.Struct("<4d")
return _struct_4d
_struct_9d = None
def _get_struct_9d():
global _struct_9d
if _struct_9d is None:
_struct_9d = struct.Struct("<9d")
return _struct_9d
|
[
"arijit.dg@hotmail.com"
] |
arijit.dg@hotmail.com
|
e293bee7a84c70a651836bf80d330077ffc03c8c
|
65f8211fc33eb5f9ac1ff0d68902226ca9a58692
|
/graph_algorithms/prim_list.py
|
626165c26ecc2a08d4d6f6a6ddc24a40752d16f2
|
[] |
no_license
|
szarbartosz/asd-python
|
46869f5699a1ef661e2df02e523af0adcddbbbda
|
0130cc3dcbba6ad62e1516c98b5cbab85848d619
|
refs/heads/master
| 2022-12-13T19:02:53.699381
| 2020-09-11T13:29:31
| 2020-09-11T13:29:31
| 242,975,318
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,680
|
py
|
from queue import PriorityQueue
def prim_list(G, s):
V = len(G)
Q = PriorityQueue()
visited = [False] * V
parent = [None] * V
d = [float('inf')] * V
d[s] = 0
Q.put((d[s], s))
def relax(u, v, w):
if d[v] > w:
d[v] = w
parent[v] = u
Q.put((d[v], v))
while not Q.empty():
(_, u) = Q.get()
visited[u] = True
for v in G[u]:
if not visited[v[0]]:
relax(u, v[0], v[1])
result = ['MST:']
sum = 0
for i in range(V):
if parent[i] is not None:
for j in range(len(G[i])):
if G[i][j][0] == parent[i]:
sum += G[i][j][1]
result.append('edge: ({}, {}) weight: {}'.format(parent[i], i, G[i][j][1]))
result.append('sum of weights: {}'.format(sum))
return result
G = [[(1, 4), (7, 8)],
[(0, 4), (2, 8), (7, 11)],
[(1, 8), (5, 4), (8, 2), (3, 7)],
[(2, 7), (4, 9), (5, 14)],
[(3, 9), (5, 10)],
[(3, 14), (2, 4), (6, 2), (4, 10)],
[(5, 2), (7, 1), (8, 6)],
[(6, 1), (0, 8), (1, 11), (8, 7)],
[(6, 6), (7, 7), (2, 2)]]
arr = prim_list(G, 1)
for i in range(len(arr)):
print(arr[i])
H = [[(1, 28), (5, 10)],
[(0, 28), (2, 16), (6, 14)],
[(1, 16), (3, 12)],
[(2, 12), (6, 18), (4, 22)],
[(3, 22), (6, 24), (5, 25)],
[(0, 10), (4, 25)],
[(1, 14), (3, 18), (4, 24)]]
print('\n')
arr = prim_list(H, 5)
for i in range(len(arr)):
print(arr[i])
I = [[(1, 3), (2, 1)],
[(0, 3), (2, 1)],
[(0, 1), (1, 1)]]
print('\n')
arr = prim_list(I, 0)
for i in range(len(arr)):
print(arr[i])
|
[
"szarbartosz@gmail.com"
] |
szarbartosz@gmail.com
|
cb6e622957ed4c3fb52b382cdca781a32cd17fb3
|
a6991b575847377f28b82cd725c67b324bc53d6c
|
/coderunner-quala/game.py
|
0d4956df8ae63487d32b0d091991cfcd6bb994cf
|
[] |
no_license
|
zakuro9715/atcoder
|
7732712405b284487da87dfb56782d855a6f6af6
|
6c50e66e2de1964bb23d200c2c8d35af84f17b69
|
refs/heads/master
| 2021-05-16T03:17:57.743717
| 2019-12-06T07:57:59
| 2019-12-06T07:57:59
| 32,463,524
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
import urllib.request
import settings
endpoint = 'https://game.coderunner.jp'
token = settings.token
def submit(text):
return _request('{0}/q?str={1}&token={2}'.format(endpoint, text, token))
def update_profile(text):
return _request('{0}/profile?text={1}&token={2}'.format(endpoint, text, token))
def _request(url):
try:
res = url, urllib.request.urlopen(url).read(), None
except urllib.error.HTTPError as e:
res = url, None, e
finally:
f = open(settings.logs, 'a')
f.writelines(map(lambda x: '{0}\n'.format(x), res))
f.write('\n')
f.close()
return res
|
[
"zakuro@yuzakuro.me"
] |
zakuro@yuzakuro.me
|
0f8cc58656b66668df127c7d77176ab02c8389e5
|
88d555a009f9075e59177fac70036892f397b439
|
/basenji/archive/augmentation.py
|
50b223c90c872923f1db563e272389153f3313a9
|
[
"Apache-2.0"
] |
permissive
|
calico/basenji
|
f9f406971d355dda81821dcf274696a7d27e332d
|
615b9eec8a591783b16d959029ddad08edae853d
|
refs/heads/master
| 2023-09-04T11:14:15.620786
| 2023-07-27T00:05:13
| 2023-07-27T00:05:13
| 96,346,574
| 326
| 143
|
Apache-2.0
| 2023-08-16T00:36:32
| 2017-07-05T17:54:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,944
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
import pdb
import tensorflow as tf
from basenji import ops
def stochastic_rc(seq_1hot):
"""Stochastically reverse complement a one hot encoded DNA sequence."""
rcseq_1hot = tf.gather(seq_1hot, [3, 2, 1, 0], axis=-1)
rcseq_1hot = tf.reverse(rcseq_1hot, axis=[1])
reverse_bool = tf.random_uniform(shape=[]) > 0.5
seq_1hot_aug = tf.cond(reverse_bool, lambda: rcseq_1hot, lambda: seq_1hot)
return seq_1hot_aug, reverse_bool
def reverse_preds(rp_tuple):
(preds, reverse_bool) = rp_tuple
preds_rev = tf.reverse(preds, axis=[1])
preds_match = tf.cond(reverse_bool, lambda: preds, lambda: preds_rev)
return preds_match
################################################################################
def shift_sequence(seq, shift_amount, pad_value=0.25):
"""Shift a sequence left or right by shift_amount.
Args:
seq: a [batch_size, sequence_length, sequence_depth] sequence to shift
shift_amount: the signed amount to shift (tf.int32 or int)
pad_value: value to fill the padding (primitive or scalar tf.Tensor)
"""
if seq.shape.ndims != 3:
raise ValueError('input sequence should be rank 3')
input_shape = seq.shape
pad = pad_value * tf.ones_like(seq[:, 0:tf.abs(shift_amount), :])
def _shift_right(_seq):
sliced_seq = _seq[:, :-shift_amount:, :]
return tf.concat([pad, sliced_seq], axis=1)
def _shift_left(_seq):
sliced_seq = _seq[:, -shift_amount:, :]
return tf.concat([sliced_seq, pad], axis=1)
output = tf.cond(
tf.greater(shift_amount, 0), lambda: _shift_right(seq),
lambda: _shift_left(seq))
output.set_shape(input_shape)
return output
def augment_deterministic_set(data_ops, augment_rc=False, augment_shifts=[0]):
"""
Args:
data_ops: dict with keys 'sequence,' 'label,' and 'na.'
augment_rc: Boolean
augment_shifts: List of ints.
Returns
data_ops_list:
"""
augment_pairs = []
for ashift in augment_shifts:
augment_pairs.append((False, ashift))
if augment_rc:
augment_pairs.append((True, ashift))
data_ops_list = []
for arc, ashift in augment_pairs:
data_ops_aug = augment_deterministic(data_ops, arc, ashift)
data_ops_list.append(data_ops_aug)
return data_ops_list
def augment_deterministic(data_ops, augment_rc=False, augment_shift=0):
"""Apply a deterministic augmentation, specified by the parameters.
Args:
data_ops: dict with keys 'sequence,' 'label,' and 'na.'
augment_rc: Boolean
augment_shift: Int
Returns
data_ops: augmented data, with all existing keys transformed
and 'reverse_preds' bool added.
"""
data_ops_aug = {}
for key in data_ops:
if key not in ['sequence']:
data_ops_aug[key] = data_ops[key]
if augment_shift == 0:
data_ops_aug['sequence'] = data_ops['sequence']
else:
shift_amount = tf.constant(augment_shift, shape=(), dtype=tf.int64)
data_ops_aug['sequence'] = shift_sequence(data_ops['sequence'], shift_amount)
if augment_rc:
data_ops_aug = augment_deterministic_rc(data_ops_aug)
else:
data_ops_aug['reverse_preds'] = tf.zeros((), dtype=tf.bool)
return data_ops_aug
def augment_deterministic_rc(data_ops):
"""Apply a deterministic reverse complement augmentation.
Args:
data_ops: dict with keys 'sequence,' 'label,' and 'na.'
Returns
data_ops_aug: augmented data ops
"""
data_ops_aug = ops.reverse_complement_transform(data_ops)
data_ops_aug['reverse_preds'] = tf.ones((), dtype=tf.bool)
return data_ops_aug
def augment_stochastic_rc(data_ops):
"""Apply a stochastic reverse complement augmentation.
Args:
data_ops: dict with keys 'sequence,' 'label,' and 'na.'
Returns
data_ops_aug: augmented data
"""
reverse_preds = tf.random_uniform(shape=[]) > 0.5
data_ops_aug = tf.cond(reverse_preds, lambda: ops.reverse_complement_transform(data_ops),
lambda: data_ops.copy())
data_ops_aug['reverse_preds'] = reverse_preds
return data_ops_aug
def augment_stochastic_shifts(seq, augment_shifts):
"""Apply a stochastic shift augmentation.
Args:
seq: input sequence of size [batch_size, length, depth]
augment_shifts: list of int offsets to sample from
Returns:
shifted and padded sequence of size [batch_size, length, depth]
"""
shift_index = tf.random_uniform(shape=[], minval=0,
maxval=len(augment_shifts), dtype=tf.int64)
shift_value = tf.gather(tf.constant(augment_shifts), shift_index)
seq = tf.cond(tf.not_equal(shift_value, 0),
lambda: shift_sequence(seq, shift_value),
lambda: seq)
return seq
def augment_stochastic(data_ops, augment_rc=False, augment_shifts=[]):
"""Apply stochastic augmentations,
Args:
data_ops: dict with keys 'sequence,' 'label,' and 'na.'
augment_rc: Boolean for whether to apply reverse complement augmentation.
augment_shifts: list of int offsets to sample shift augmentations.
Returns:
data_ops_aug: augmented data
"""
if augment_shifts:
data_ops['sequence'] = augment_stochastic_shifts(data_ops['sequence'],
augment_shifts)
if augment_rc:
data_ops = augment_stochastic_rc(data_ops)
else:
data_ops['reverse_preds'] = tf.zeros((), dtype=tf.bool)
return data_ops
|
[
"drk@calicolabs.com"
] |
drk@calicolabs.com
|
6863d4fb2fa2c83913a6cfce35a272bbbd68db57
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/dockerized-gists/f828b38421dfbee59daf/snippet.py
|
b7817810ac5fdc2fc629f33ee3356f6c29cd2707
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,937
|
py
|
"""
Physics simulation with PyODE followed by a (basic) rendering with Vapory
See the result here: http://i.imgur.com/TdhxwGz.gifv
Zulko 2014
This script is placed in the Public Domain (Licence Creative Commons 0)
"""
# =============== FIRST PART : SIMULATION WITH PyODE
import ode
lx, ly, lz, density = 1.0, 1.0, 1.0, 1.0
world = ode.World()
world.setGravity( (0,-9.81,0) )
world.setCFM(1E-6)
space = ode.Space()
contactgroup = ode.JointGroup()
geoms = []
def near_callback(args, geom1, geom2):
"""Callback function for the collide() method below.
This function checks if the given geoms do collide and
creates contact joints if they do.
"""
contacts = ode.collide(geom1, geom2)
world,contactgroup = args
for c in contacts:
c.setBounce(0.01)
c.setMu(60000)
j = ode.ContactJoint(world, contactgroup, c)
j.attach(geom1.getBody(), geom2.getBody())
def new_cube(xyz):
""" Creates a new PyODE cude at position (x,y,z) """
body = ode.Body(world)
M = ode.Mass()
M.setBox(density, lx, ly, lz)
body.setMass(M)
body.shape = "box"
body.boxsize = (lx, ly, lz)
body.setPosition(xyz)
geom = ode.GeomBox(space, lengths=body.boxsize)
geom.setBody(body)
geoms.append(geom) # avoids that geom gets trashed
return body
# The objects of the scene:
floor = ode.GeomPlane(space, (0,1,0), 0)
cubes = [new_cube(xyz) for xyz in
[(0.5,3,0.5),(0.5,4,0),(0,5,0),(-0.5,6,0),
(-0.5,7,-0.5),(0,8,0.5)]]
# Start the simulation !
t = 0.0
dt = 0.005
duration = 4.0
trajectories = []
while t<duration:
trajectories.append([(c.getPosition(), c.getRotation())
for c in cubes])
space.collide((world,contactgroup), near_callback)
world.step(dt)
contactgroup.empty()
t+=dt
# =============== SECOND PART : RENDERING WITH VAPORY
from moviepy.editor import VideoClip, ipython_display
from vapory import *
light = LightSource( [10,10,10], 'color', [3,3,3],
'parallel', 'point_at', [0, 0, 0])
ground = Plane([0,1,0],0, Texture('Rosewood'))
def vapory_box(xyz, R):
""" Draws a box with at the given position and rotation"""
return Box([-lx/2, -ly/2, -lz/2], [lx/2, ly/2, lz/2],
Texture('T_Ruby_Glass'), Interior('ior',4),
'matrix', R+xyz)
def make_frame(t):
""" Returns the image of the scene rendered at time t """
boxes = [vapory_box(position, rotation)
for (position, rotation) in trajectories[int(t/dt)]]
scene = Scene( Camera("location", [0,3,-4], "look_at", [0,0,0]),
[light, ground, Background("White")] + boxes,
included=["colors.inc", "textures.inc", "glass.inc"])
return scene.render(height=300, width=400, antialiasing=0.0001)
clip = VideoClip(make_frame, duration=duration)
clip.write_videofile("pyODE.avi", codec='png', fps=20) # lossless format
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
7fae4a50aac90c167dfa619bd30118ca39f52bff
|
d30ebe0bc57289f2f566cecab68b1c338f34c4ad
|
/bigml/tests/test_02_dev_prediction.py
|
1e85df76644faa9bbd13f104ae6f50345e37bc5d
|
[
"Apache-2.0"
] |
permissive
|
alanponce/python
|
cdfaee04a36dd19ab13cce7e9d8fed557e0d9cc8
|
9423b4c4968b81ee14cef1ab6cd62d23dfa8bd26
|
refs/heads/next
| 2020-12-28T23:45:26.384281
| 2016-04-29T01:38:58
| 2016-04-29T01:38:58
| 57,441,721
| 1
| 0
| null | 2016-04-30T12:52:05
| 2016-04-30T12:52:03
|
Python
|
UTF-8
|
Python
| false
| false
| 3,325
|
py
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2015-2016 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Testing prediction creation in DEV mode
"""
from world import world, setup_module, teardown_module
import create_source_steps as source_create
import read_source_steps as source_read
import create_dataset_steps as dataset_create
import create_model_steps as model_create
import create_prediction_steps as prediction_create
class TestDevPrediction(object):
def setup(self):
"""
Switches to DEV mode for this class methods only
"""
print "\n-------------------\nTests in: %s\n" % __name__
world.api.delete_project(world.project_id)
world.api = world.api_dev_mode
world.project_id = world.api.create_project( \
{"name": world.test_project_name})['resource']
def teardown(self):
"""
Debug information
"""
print "\nEnd of tests in: %s\n-------------------\n" % __name__
def test_scenario1(self):
"""
Scenario: Successfully creating a prediction in DEV mode:
Given I want to use api in DEV mode
When I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And the source has DEV True
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a model
And I wait until the model is ready less than <time_3> secs
When I create a prediction for "<data_input>"
Then the prediction for "<objective>" is "<prediction>"
Examples:
| data | time_1 | time_2 | time_3 | data_input | objective | prediction |
| ../data/iris.csv | 10 | 10 | 10 | {"petal width": 0.5} | 000004 | Iris-setosa |
"""
print self.test_scenario1.__doc__
examples = [
['data/iris.csv', '10', '10', '10', '{"petal width": 0.5}', '000004', 'Iris-setosa']]
for example in examples:
print "\nTesting with:\n", example
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
source_read.source_has_dev(self, True)
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_model(self)
model_create.the_model_is_finished_in_less_than(self, example[3])
prediction_create.i_create_a_prediction(self, example[4])
prediction_create.the_prediction_is(self, example[5], example[6])
|
[
"merce@bigml.com"
] |
merce@bigml.com
|
077c541ab06e9bf347eb23beeab6465e7d5db980
|
d83fde3c891f44014f5339572dc72ebf62c38663
|
/_bin/google-cloud-sdk/.install/.backup/lib/surface/organizations/set_iam_policy.py
|
929a22f897c4a754e6a303c96cd35346964f7d5e
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
gyaresu/dotfiles
|
047cc3ca70f4b405ba272856c69ee491a79d2ebe
|
e5e533b3a081b42e9492b228f308f6833b670cfe
|
refs/heads/master
| 2022-11-24T01:12:49.435037
| 2022-11-01T16:58:13
| 2022-11-01T16:58:13
| 17,139,657
| 1
| 1
| null | 2020-07-25T14:11:43
| 2014-02-24T14:59:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,780
|
py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to set IAM policy for a resource."""
from __future__ import absolute_import
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.iam import iam_util
from googlecloudsdk.command_lib.organizations import flags
from googlecloudsdk.command_lib.organizations import orgs_base
@base.ReleaseTracks(
base.ReleaseTrack.GA, base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class SetIamPolicy(orgs_base.OrganizationCommand):
"""Set IAM policy for an organization.
Given an organization ID and a file encoded in JSON or YAML that contains the
IAM policy, this command will set the IAM policy for that organization.
"""
detailed_help = {
'EXAMPLES': (
'\n'.join([
'The following command reads an IAM policy defined in a JSON',
'file `policy.json` and sets it for an organization with the ID',
'`123456789`:',
'',
' $ {command} 123456789 policy.json',
]))
}
@staticmethod
def Args(parser):
flags.IdArg('whose IAM policy you want to set.').AddToParser(parser)
parser.add_argument(
'policy_file', help='JSON or YAML file containing the IAM policy.')
def Run(self, args):
messages = self.OrganizationsMessages()
policy = iam_util.ParsePolicyFile(args.policy_file, messages.Policy)
update_mask = iam_util.ConstructUpdateMaskFromPolicy(args.policy_file)
# To preserve the existing set-iam-policy behavior of always overwriting
# bindings and etag, add bindings and etag to update_mask.
if 'bindings' not in update_mask:
update_mask += ',bindings'
if 'etag' not in update_mask:
update_mask += ',etag'
set_iam_policy_request = messages.SetIamPolicyRequest(
policy=policy,
updateMask=update_mask)
policy_request = (
messages.CloudresourcemanagerOrganizationsSetIamPolicyRequest(
organizationsId=args.id,
setIamPolicyRequest=set_iam_policy_request))
result = self.OrganizationsClient().SetIamPolicy(policy_request)
iam_util.LogSetIamPolicy(args.id, 'organization')
return result
|
[
"me@gareth.codes"
] |
me@gareth.codes
|
7bd188b9a79352c9745f901ed5b9ffc3d20ca7ee
|
a6fc9cec97fbbd6ecc08715c43e4dd53606e2110
|
/_apscheduler/example/config.py
|
30569ac472e19331db956c59d43ebbe6fa580d60
|
[] |
no_license
|
BennyJane/python-demo
|
2848acaaa81d5011d9dfc1585b3d6b685178f88e
|
75098ec4f9c8288d637ee1f9585f824fcb5267ee
|
refs/heads/master
| 2023-04-11T12:24:30.245205
| 2021-04-24T23:47:56
| 2021-04-24T23:47:56
| 264,815,200
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 984
|
py
|
# !/usr/bin/env python
# -*-coding:utf-8 -*-
# PROJECT : Python-Exercise
# Time :2020/12/8 23:02
# Warning :The Hard Way Is Easier
from apscheduler.jobstores.redis import RedisJobStore
# from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
job_stores = {
"redis": RedisJobStore(), # 设置一个名为redis的job存储,后端使用 redis
# 一个名为 default 的 job 存储,后端使用数据库(使用 Sqlite)
# "default": SQLAlchemyJobStore(url="sqlite:///jobs.sqlite")
}
executors = {
"default": ThreadPoolExecutor(20), # 设置一个名为 default的线程池执行器, 最大线程设置为20个
"processpool": ProcessPoolExecutor(5), # 设置一个名为 processpool的进程池执行器,最大进程数设为5个
}
# 开启job合并,设置job最大实例上限为3
job_default = {
'coalesce': False,
'max_instances': 3
}
|
[
"3355817143@qq.com"
] |
3355817143@qq.com
|
593d13c224a279fdd6e7316dac0be8555af09385
|
491f9c9b618242953329a8e633fd9bf0bb1cb3fe
|
/learn_pytest/python_examination/sync_lock.py
|
5519da2e10fea248403d3b16ef3f4f2f0ac1fac3
|
[] |
no_license
|
xshen1122/Scripts
|
656d599e47a70a3b3e89e4f95cf97ee391ff9a7e
|
c1ed60abc7885d4a86ba077efc8bcf0fbdbafd75
|
refs/heads/master
| 2021-06-28T04:46:31.370809
| 2020-10-21T07:52:15
| 2020-10-21T07:52:15
| 172,480,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,204
|
py
|
# sync_lock.py
#coding=utf-8
import threading
import time
'''
charactors:
1. only 1 global
2. only 1 lock
3. before add, acquire lock,after add, release lock
'''
g_num = 0
# 创建一个互斥锁
# 默认是未上锁的状态
mutex = threading.Lock()
def work1(num):
global g_num
for i in range(num):
mutex.acquire() # 上锁
g_num += 1
mutex.release() # 解锁
print("----in work1, g_num is %d---"%g_num)
def work2(num):
global g_num
for i in range(num):
mutex.acquire() # 上锁
g_num += 1
mutex.release() # 解锁
print("----in work2, g_num is %d---"%g_num)
def main():
print("---线程创建之前g_num is %d---"%g_num)
# 创建两个线程,各自对g_num进行相加
t1 = threading.Thread(target=work1, args=(10000000,))
t1.start()
t2 = threading.Thread(target=work2, args=(10000000,))
t2.start()
while len(threading.enumerate()) != 1:
time.sleep(1)
print("2个线程对同一个全局变量操作之后的最终结果是:%s" % g_num)
if __name__ == "__main__":
main()
'''
first time:
---线程创建之前g_num is 0---
----in work2, g_num is 19820884---
----in work1, g_num is 20000000---
2个线程对同一个全局变量操作之后的最终结果是:20000000
second time:
---线程创建之前g_num is 0---
----in work2, g_num is 19739028---
----in work1, g_num is 20000000---
2个线程对同一个全局变量操作之后的最终结果是:20000000
锁的好处:
确保了某段关键代码只能由一个线程从头到尾完整地执行
锁的坏处:
阻止了多线程并发执行,包含锁的某段代码实际上只能以单线程模式执行,效率就大大地下降了
由于可以存在多个锁,不同的线程持有不同的锁,并试图获取对方持有的锁时,可能会造成死锁
Avoid dead lock
综上所述,银行家算法是从当前状态出发,逐个按安全序列检查各客户谁能完成其工作,
然后假定其完成工作且归还全部贷款,再进而检查下一个能完成工作的客户,......。
如果所有客户都能完成工作,则找到一个安全序列,银行家才是安全的
'''
|
[
"xueqin.shen@outlook.com"
] |
xueqin.shen@outlook.com
|
3ebef0181194e685a308c0d0dc844268473d9e40
|
b3330bd3365767b89afb9c432f4deb722b39ac1c
|
/python/interviews/airbnb_interview.py
|
0606e6cdb6d380deac6a5ec412d64fba6ca83161
|
[] |
no_license
|
hguochen/algorithms
|
944df332d5b39220bd59cbd62dc74b12e335fb9e
|
703e71a5cd9e002d800340df879ed475a404d092
|
refs/heads/master
| 2022-02-27T12:11:10.607042
| 2022-02-18T21:04:00
| 2022-02-18T21:04:00
| 13,767,503
| 5
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,109
|
py
|
'''
A host gets a set of back to back reservation requests (inquiries) for stays
of various lengths. The information about these requests is stored in an array,
where each value is an integer representing the length of a stay in nights. All
of these inquiries are back to back, which means that stay number i+1 starts
right after stay number i ends. The host wants to have time to prepare the
listing for the next guest, so they don't want to accept inquiries that are
adjacent to each other. In other words, if they accept inquiry i, they can't
also accept either i-1 or i+1.
Under these conditions, what is the maximum number of nights the host can
accept?
Some examples:
[5, 1, 2, 6] = 11 [5,2] [5,6] [1,6]
[4, 9, 6] = 10 [4, 6] [9]
[4, 11, 6] = 11
[4, 10, 3, 1, 5] = 15
[1] -> [1]
[1,2] -> [1] [2]
[1,2,3] -> [1,3], [2], [3]
[1,2,3,4] -> [1,3] [1,4] [2,4]
every index will generate n-2
'''
import copy
def max_nights(array):
"""
Dynamic programming solution.
O(n)
"""
if len(array) < 1:
return 0
elif len(array) <= 2:
return max(array)
else:
compare = [[array[0]], array[0]]
if array[1] > array[0]:
gap = [[array[1]], array[1]]
else:
gap = copy.deepcopy(compare)
for i in xrange(2, len(array)):
if compare[1] + array[i] > gap[1]:
compare[0].append(array[i])
compare[1] = compare[1] + array[i]
compare, gap = gap, compare
else:
compare = copy.deepcopy(gap)
return gap
# assumptions
# min 0 size
# max infinite
# all integeres in array are positive and no 0s
# get subsets of from the original array based on rules to omit immediate next
# request
# compute the sum of subsets and return the largest value
# O(n * n-2) -> O(n^2)
def maximum_nights(array):
if len(array) < 1:
return 0
largest = 0
for i in xrange(len(array)): # O(n)
temp = 0
if i == len(array)-2:
if array[i] > largest:
largest = array[i]
break
for item in array[i+2:]: # O(n-2)
temp = array[i] + item
if temp > largest:
largest = temp
return largest
# O(n)
# possible to update the largest with only 1 run
def maximum_nights3(array):
if len(array) < 1:
return 0
largest = 0
for i in xrange(len(array)):
if i == len(array)-2:
if array[i] > largest:
largest = array[i]
break
sliced = array[i+2:]
if len(sliced) < 1:
continue
temp = array[i] + max(sliced)
if temp > largest:
largest = temp
return largest
if __name__ == "__main__":
test1 = [5, 1, 2, 6, 20, 2]
test2 = [4, 9, 6]
test3 = [4, 11, 6]
test4 = [4, 10, 3, 1, 5]
print maximum_nights3(test1), max_nights(test1) # 11
print maximum_nights3(test2), max_nights(test2) # 10
print maximum_nights3(test3), max_nights(test3) # 11
print maximum_nights3(test4), max_nights(test4) # 15
|
[
"hguochen@gmail.com"
] |
hguochen@gmail.com
|
dee6ff4b161070ca78004c953b96b987c2b5e47f
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/graph/job/MoveVertexToCenterAnimatorFunctionGraphJob.pyi
|
1a8355245b98205e284af676d6f82d0cd3aae7d4
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,133
|
pyi
|
import ghidra.graph.job
import ghidra.util.task
import java.awt.geom
import java.lang
class MoveVertexToCenterAnimatorFunctionGraphJob(ghidra.graph.job.MoveViewAnimatorFunctionGraphJob):
def __init__(self, __a0: edu.uci.ics.jung.visualization.VisualizationServer, __a1: object, __a2: bool): ...
def canShortcut(self) -> bool: ...
def dispose(self) -> None: ...
def equals(self, __a0: object) -> bool: ...
def execute(self, listener: ghidra.graph.job.GraphJobListener) -> None: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def isFinished(self) -> bool: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def setBusyListener(self, listener: ghidra.util.task.BusyListener) -> None: ...
def setOffset(self, offsetFromOriginalPoint: java.awt.geom.Point2D) -> None: ...
def shortcut(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
6a12630e2c604bd3d4846c1c689b8ef29ce522f5
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/semantic_segmentation/MMseg-swin/configs/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes.py
|
8eda8e4380774b352568fcd4f9bafd8ea178d8d0
|
[
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 3,795
|
py
|
# -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017
# All rights reserved.
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
# -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017
# All rights reserved.
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
_base_ = ['./segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py']
checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segformer/mit_b3_20220624-13b1141c.pth' # noqa
model = dict(
backbone=dict(
init_cfg=dict(type='Pretrained', checkpoint=checkpoint),
embed_dims=64,
num_layers=[3, 4, 18, 3]),
decode_head=dict(in_channels=[64, 128, 320, 512]))
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
0ae6ded01b3c8dfb6041f106bfef7ba3f04ad22b
|
23561a4d2a9c169b8c2fdeb330f6ff4e2d2e4581
|
/dayu_widgets/examples/MMessageTest.py
|
fc96b0a5b93cfdc7e6ff28861c8d997c2e72e752
|
[
"MIT"
] |
permissive
|
DangoWang/dayu_widgets
|
3e04516d15bd73e8544c17deb2ce91f80981bce1
|
980a3d40388e2022cc581dd1b9c6e9ed19788f35
|
refs/heads/master
| 2020-06-06T05:37:24.986564
| 2019-06-04T12:06:15
| 2019-06-04T12:06:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,892
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.2
# Email : muyanru345@163.com
###################################################################
import functools
from dayu_widgets.MButtonGroup import MPushButtonGroup
from dayu_widgets.MDivider import MDivider
from dayu_widgets.MFieldMixin import MFieldMixin
from dayu_widgets.MLabel import MLabel
from dayu_widgets.MMessage import MMessage
from dayu_widgets.MPushButton import MPushButton
from dayu_widgets.qt import *
class MMessageTest(QWidget, MFieldMixin):
def __init__(self, parent=None):
super(MMessageTest, self).__init__(parent)
self._init_ui()
def _init_ui(self):
button3 = MPushButton(text='Normal Message', type=MPushButton.PrimaryType)
button4 = MPushButton(text='Success Message', type=MPushButton.SuccessType)
button5 = MPushButton(text='Warning Message', type=MPushButton.WarningType)
button6 = MPushButton(text='Error Message', type=MPushButton.ErrorType)
button3.clicked.connect(functools.partial(self.slot_show_message, MMessage.info, {'text': u'这是一条普通提示'}))
button4.clicked.connect(functools.partial(self.slot_show_message, MMessage.success, {'text': u'恭喜你,成功啦!'}))
button5.clicked.connect(functools.partial(self.slot_show_message, MMessage.warning, {'text': u'我警告你哦!'}))
button6.clicked.connect(functools.partial(self.slot_show_message, MMessage.error, {'text': u'失败了!'}))
sub_lay1 = QHBoxLayout()
sub_lay1.addWidget(button3)
sub_lay1.addWidget(button4)
sub_lay1.addWidget(button5)
sub_lay1.addWidget(button6)
button_duration = MPushButton(text='show 5s Message')
button_duration.clicked.connect(functools.partial(self.slot_show_message, MMessage.info,
{'text': u'该条消息将显示5秒后关闭',
'duration': 5
}))
button_closable = MPushButton(text='closable Message')
button_closable.clicked.connect(functools.partial(self.slot_show_message, MMessage.info,
{'text': u'可手动关闭提示',
'closable': True
}))
main_lay = QVBoxLayout()
main_lay.addWidget(MDivider('different type'))
main_lay.addLayout(sub_lay1)
main_lay.addWidget(MLabel(u'不同的提示状态:普通、成功、警告、错误。默认2秒后消失'))
main_lay.addWidget(MDivider('set duration'))
main_lay.addWidget(button_duration)
main_lay.addWidget(MLabel(u'自定义时长,config中设置duration值,单位为秒'))
main_lay.addWidget(MDivider('set closable'))
main_lay.addWidget(button_closable)
main_lay.addWidget(MLabel(u'设置是否可关闭,config中设置closable 为 True'))
button_grp = MPushButtonGroup()
button_grp.set_button_list([
{'text': 'set duration to 1s',
'clicked': functools.partial(self.slot_set_config, MMessage.config, {'duration': 1})},
{'text': 'set duration to 10s',
'clicked': functools.partial(self.slot_set_config, MMessage.config, {'duration': 10})},
{'text': 'set top to 5',
'clicked': functools.partial(self.slot_set_config, MMessage.config, {'top': 5})},
{'text': 'set top to 50',
'clicked': functools.partial(self.slot_set_config, MMessage.config, {'top': 50})},
])
loading_button = MPushButton.primary('Display a loading indicator')
loading_button.clicked.connect(self.slot_show_loading)
main_lay.addWidget(MDivider('set global setting'))
main_lay.addWidget(button_grp)
main_lay.addWidget(MLabel(u'全局设置默认duration(默认2秒);top(离parent顶端的距离,默认24px)'))
main_lay.addWidget(loading_button)
main_lay.addStretch()
self.setLayout(main_lay)
def slot_show_message(self, func, config):
func(parent=self, **config)
def slot_set_config(self, func, config):
func(**config)
def slot_show_loading(self):
msg = MMessage.loading(u'正在加载中', parent=self)
msg.sig_closed.connect(functools.partial(MMessage.success, u'加载成功啦!!哈哈哈哈', self))
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
test = MMessageTest()
from dayu_widgets import dayu_theme
dayu_theme.apply(test)
test.show()
sys.exit(app.exec_())
|
[
"muyanru@phenom-films.com"
] |
muyanru@phenom-films.com
|
f1259a4f81b2d648cd09e4d43b32528aacea3340
|
491235d50ab27bb871d58a5dfff74d6a4aa9bbe6
|
/pong-client/pong.py
|
0df4af0d73a6bda898022ba8e51b66bc7b2083b6
|
[] |
no_license
|
elgrandt/Pong-Network
|
768bb861757d1fb98be3b761a66ad14e632f7932
|
204e1c5d9fbd53eece906d56df394602bdc269b6
|
refs/heads/master
| 2022-12-06T16:12:01.506699
| 2020-08-18T03:27:47
| 2020-08-18T03:27:47
| 288,315,589
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,305
|
py
|
import pygame
import network
from gui import server_list,events,server_connect_menu,status_cur,loading
import gui
import global_knowns
from twisted.internet import reactor,threads
import sys
import thread
import global_knowns as gk
import elements
class game:
def __init__(self,IOSYS):
self.IOSYS = IOSYS
self.elements = []
menu_connect = server_connect_menu.server_connect()
menu_connect.set_position((300-menu_connect.W/2,200-menu_connect.H/2))
self.mc = menu_connect
self.elements.append(menu_connect)
self.STATUS = "TOCONNECT"
def handleData(self,data):
if (data["packet"] == global_knowns.welcome):
self.handleStart()
elif (data["packet"] == gk.rooms_information):
self.update_rooms(data["rooms"])
elif (data["packet"] == gk.extra_rooms_info):
self.update_room_extra(data["data"])
elif (data["packet"] == gk.element_list):
self.updateElements(data["data"])
elif (data["packet"] == gk.start_game):
self.startSendMove()
elif (data["packet"] == gk.stop_game):
self.endSendMove()
def logic_update(self,EVENTS):
for x in range(len(self.elements)):
self.elements[x].logic_update(EVENTS)
if (self.STATUS == "TOCONNECT"):
self.update_menu_connect()
elif (self.STATUS == "CONNECTING"):
self.update_connecting()
elif (self.STATUS == "ROOMLIST"):
self.update_room_list()
elif (self.STATUS == "ERROR"):
self.update_error_connect()
elif (self.STATUS == "GAME"):
self.updateGame()
def graphic_update(self,SCREEN):
for x in range(len(self.elements)):
self.elements[x].graphic_update(SCREEN)
def update_menu_connect(self):
if (self.mc.button_connect.button.pressed):
self.start_connect(self.mc.get_host(),self.mc.get_port())
def start_connect(self,host,port):
self.mc.set_loading()
self.STATUS = "CONNECTING"
self.reactorStart( host,port,self.IOSYS.NETWORK)
def update_connecting(self):
pass
def reactorStart(self,GAME_IP,GAME_PORT,connection):
if (GAME_IP == "Host"):
GAME_IP = "localhost"
GAME_PORT = "9999"
reactor.connectTCP(GAME_IP, int(GAME_PORT), connection) # @UndefinedVariable
def handleStart(self):
self.elements = []
sl = server_list(self.IOSYS.NETWORK)
sl.set_position((0,0))
self.elements.append(sl)
self.sl = sl
if (self.STATUS == "CONNECTING"):
self.STATUS = "ROOMLIST"
self.get_info()
def handleFail(self):
if (self.STATUS == "CONNECTING"):
self.STATUS = "ERROR"
self.mc.set_error()
def update_room_list(self):
if (self.sl.end == True):
self.STATUS = "GAME"
self.elements = []
self.elementManager = elements.manager(self.IOSYS.NETWORK)
self.elements.append(self.elementManager)
def update_error_connect(self):
if (self.mc.ta.button.pressed):
x,y = self.mc.X,self.mc.Y
self.mc.__init__()
self.mc.set_position((x,y))
self.STATUS = "TOCONNECT"
def get_info(self):
self.IOSYS.NETWORK.protocol.sendData({"packet":gk.get_rooms_information})
def update_rooms(self,data):
self.sl.update(data)
def update_room_extra(self,data):
if (self.STATUS == "ROOMLIST"):
self.sl.update_extra(data)
def updateGame(self):
pass
def updateElements(self,data):
if (self.STATUS == "GAME"):
self.elementManager.updateElements(data)
def startSendMove(self):
self.elementManager.startSend()
def stopSendMove(self):
self.elementManager.stopSend()
class iosys:
def __init__(self):
#OUTPUT
self.SCREEN = pygame.display.set_mode((600,400))
#INPUT
self.EVENTS = events.events()
#LOGIC
self.GAME = game(self)
#NETWORK
self.NETWORK = network.gameClientFactory(self.GAME)
#CLOCK
self.CLOCK = pygame.time.Clock()
#ON
self.ON = True
def updating(self):
if (True):
self.SCREEN.fill((255,255,255))
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.quitGame()
self.EVENTS.update_keyboard(pygame.key.get_pressed())
self.EVENTS.update_mouse(pygame.mouse.get_pos(),pygame.mouse.get_pressed())
self.GAME.logic_update(self.EVENTS)
self.GAME.graphic_update(self.SCREEN)
status_cur.update()
pygame.display.update()
reactor.callLater(1./40,self.updating)# @UndefinedVariable
def quitGame(self):
reactor.stop()# @UndefinedVariable
pygame.quit()
def main():
pygame.init()
io = iosys()
io.updating()
reactor.run()# @UndefinedVariable
main()
|
[
"dylantasat11@gmail.com"
] |
dylantasat11@gmail.com
|
a2c5d72364cb3e960aa0ad31b96f0ece5611e5db
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/case/world/different_point/work_or_day.py
|
ec81da39db35106ef6fe892116e21fe8b9443a91
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
#! /usr/bin/env python
def little_time(str_arg):
thing(str_arg)
print('be_woman_with_new_company')
def thing(str_arg):
print(str_arg)
if __name__ == '__main__':
little_time('leave_different_person')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
3262ec6b92ac1240518c3b7f9d92a29fc31d4712
|
74434b547122c5f13f748c981851bfbdfdfc3a32
|
/orders/migrations/0003_auto_20200509_1214.py
|
f98f90ce8ca8900504147176259d769937090bb0
|
[] |
no_license
|
anthonylauly/My-Shop---Django-2-By-Example
|
a4a597a6f1d243aebc74e30005034da4b3a0cf4a
|
2e47abe19e1fc1f35ad73049f1f077ee805ad945
|
refs/heads/main
| 2023-01-30T00:20:44.343128
| 2020-12-08T06:05:43
| 2020-12-08T06:05:43
| 313,935,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
# Generated by Django 2.2.12 on 2020-05-09 12:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('orders', '0002_order_braintree_id'),
]
operations = [
migrations.AlterModelOptions(
name='order',
options={'ordering': ('-created',), 'verbose_name': 'order'},
),
]
|
[
"anthony.lauly1833@gmail.com"
] |
anthony.lauly1833@gmail.com
|
a4dd4b1312533cde33af7a3832573538a1280377
|
ad8bb38dc80e5898d59f140b93e044cca86a8c02
|
/greedy-algorithms/assignment_2/tests/test_clustering.py
|
8d7d0f0bb07293e8eac1cc5ff3b05fb7986c23ad
|
[] |
no_license
|
ybim/stanford-algs
|
5c4358fd7fa4e23ae6c1660b97d7596c4e6d4400
|
6c4749f2a4c2fc36630f74833acd352ce08b2a43
|
refs/heads/master
| 2021-03-04T03:45:13.444826
| 2018-03-25T21:07:28
| 2018-03-25T21:07:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 874
|
py
|
import os
import os.path
import logging
from ..clustering import read_file, maximum_spacing
import pytest
test_dir = os.path.join(os.path.dirname(__file__),
'cluster_cases')
def lookup_spacing(f):
f = f.replace('input', 'output')
with open(f, 'r') as handle:
return int(handle.readlines()[0])
def pytest_generate_tests(metafunc):
idlist = []
argvalues = []
for case in metafunc.cls.cases:
idlist.append(case)
argvalues.append(os.path.join(test_dir, case))
metafunc.parametrize('_in', argvalues, ids=idlist, scope='class')
class TestCluster:
cases = [f for f in os.listdir(test_dir)
if f.startswith('input') and f.endswith('.txt')]
def test_clustering(self, _in):
nodes, edges = read_file(_in)
assert maximum_spacing(nodes, edges) == lookup_spacing(_in)
|
[
"trendahl@slac.stanford.edu"
] |
trendahl@slac.stanford.edu
|
c87160c690087c80faa23886e3d2eb5ad52a67c4
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_53/143.py
|
d02c162e499137e8369e07af73f72116b53679db
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
#!/usr/bin/env python
f = open('A-large.in', 'r')
n = int(f.readline())
count = 0
for line in f:
res = 'OFF'
count+=1
(n,k) = line.split()
n = int(n)
k = int(k)
if k>0:
if k%(2**n) == (2**n-1):
res = 'ON'
print 'Case #' + str(count) + ': '+ res
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
839b24b461961df7d7a77ebcc140ceeea25f949d
|
265c94e9b4fdfd1aefbcef85242db4e1beef45df
|
/src/ch4/predictData_2015_0.25.py
|
8d62a5850ed480818401d16854286573eee494f2
|
[] |
no_license
|
Relph1119/machine-learning-blueprints
|
283cc1a6ee843ae4c806466af8f9a0df2325ecb6
|
c46c5bd93bd781f96d41a80a03a797ca3e673627
|
refs/heads/master
| 2022-12-09T11:34:34.883337
| 2020-08-26T08:48:24
| 2020-08-26T08:48:24
| 136,500,353
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,171
|
py
|
import getFeature as gft
from sklearn.ensemble import RandomForestClassifier
from sklearn import linear_model
import pandas as pd
import matplotlib.pyplot as plt
X = gft.X
ipos = gft.ipos
X_train, X_test = X[173:], X[:173]
y_train = ipos['$ Chg Open to Close'][173:].map(lambda x: 1 if x>=.25 else 0)
y_test = ipos['$ Chg Open to Close'][:173].map(lambda x: 1 if x>=.25 else 0)
clf = linear_model.LogisticRegression()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
#print(ipos[(ipos['Date']>='2015-01-01')]['$ Chg Open to Close'].describe())
pred_label=clf.predict(X_test)
results=[]
for pl, tl, idx, chg in zip(pred_label, y_test, y_test.index, ipos.iloc[y_test.index]['$ Chg Open to Close']):
if pl == tl:
results.append([idx, chg, pl, tl, 1])
else:
results.append([idx, chg, pl, tl, 0])
rf = pd.DataFrame(results, columns=['index', '$ chg', 'predicted', 'actual', 'correct'])
print(rf[rf['predicted']==1]['$ chg'].describe())
fig, ax = plt.subplots(figsize=(15, 10))
rf[rf['predicted']==1]['$ chg'].plot(kind='bar')
ax.set_title('Model Predicted Buys', y=1.01)
ax.set_ylabel('$ Change Open to Close')
ax.set_xlabel('Index')
plt.show()
|
[
"huruifeng1202@163.com"
] |
huruifeng1202@163.com
|
0e9b6c56ee63c623900c65faf8d6ad30c2b7eb88
|
7fc3d33c2ba5426ee6d68e80dd6c279320051ac2
|
/nvis/base.py
|
d53390d0995cc0c38c002e377283d6d654f0ce95
|
[] |
no_license
|
sinhrks/nvis
|
e2f4b8382a3fe99f0c69051cfe64ee3d6ae29e51
|
7efb98f812a39572a9d2207ce5689ac0a56ba44f
|
refs/heads/master
| 2021-01-12T13:26:41.363157
| 2016-09-25T22:14:19
| 2016-09-25T22:14:19
| 69,165,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,704
|
py
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import collections
import six
import traitlets
from enum import Enum
import nvis.common as com
class _JSObject(traitlets.HasTraits):
"""
Base class for JS instances, which can be converted to
JavaScript instance
"""
def __eq__(self, other):
# conmpare with script
if isinstance(other, _JSObject):
return self.script == other.script
return False
@property
def _klass(self):
return "Cesium.{0}".format(self.__class__.__name__)
@property
def _props(self):
raise NotImplementedError('must be overriden in child classes')
@property
def _property_dict(self):
props = collections.OrderedDict()
for p in self._props:
props[p] = getattr(self, p)
return props
@property
def script(self):
props = self._property_dict
results = com.to_jsobject(props)
return ''.join(results)
class _Enum(Enum):
@property
def script(self):
return self.value
class RistrictedList(_JSObject):
widget = traitlets.Instance(klass=_JSObject)
def __init__(self, widget, allowed, propertyname):
self.widget = widget
self._items = []
self._allowed = allowed
self._propertyname = propertyname
def add(self, item, **kwargs):
if com.is_listlike(item):
for i in item:
self.add(i, **kwargs)
elif isinstance(item, self._allowed):
for key, value in six.iteritems(kwargs):
setattr(item, key, value)
self._items.append(item)
else:
msg = 'item must be {allowed} instance: {item}'
if isinstance(self._allowed, tuple):
allowed = ', '.join([a.__name__ for a in self._allowed])
else:
allowed = self._allowed
raise ValueError(msg.format(allowed=allowed, item=item))
def clear(self):
self._items = []
def __len__(self):
return len(self._items)
def __getitem__(self, item):
return self._items[item]
@property
def script(self):
"""
return list of scripts built from entities
each script may be a list of comamnds also
"""
results = []
for item in self._items:
script = """{varname}.{propertyname}.add({item});"""
script = script.format(varname=self.widget._varname,
propertyname=self._propertyname,
item=item.script)
results.append(script)
return results
|
[
"sinhrks@gmail.com"
] |
sinhrks@gmail.com
|
90f1ed30332f1619a1573ce61dab44dbb952e827
|
117f066c80f3863ebef74463292bca6444f9758a
|
/ray/do.py
|
a0649d1bcc56f44ebc03bdcbe163b03e6ef80cbd
|
[] |
no_license
|
cottrell/notebooks
|
c6de3842cbaeb71457d270cbe6fabc8695a6ee1b
|
9eaf3d0500067fccb294d064ab78d7aaa03e8b4d
|
refs/heads/master
| 2023-08-09T22:41:01.996938
| 2023-08-04T22:41:51
| 2023-08-04T22:41:51
| 26,830,272
| 3
| 1
| null | 2023-03-04T03:58:03
| 2014-11-18T21:14:23
|
Python
|
UTF-8
|
Python
| false
| false
| 322
|
py
|
import numpy as np
import ray
def init():
return ray.init()
def bounce():
ray.disconnect()
# might be incorrect
return ray.init()
def f(x):
return x ** 2
f_remote = ray.remote(f)
def g(x, seed=1):
np.random.seed(seed)
x = np.random.randn(10, 5) + x
return x
g_remote = ray.remote(g)
|
[
"cottrell@users.noreply.github.com"
] |
cottrell@users.noreply.github.com
|
f8e52d6fa581c4d3b98559e464663fb186f6b5f8
|
c2e50f81a127b83fd181442f4d7a5b4751d767e6
|
/tools/coverage/gcda_clean.py
|
c222c448a3d91facd453d8ebf9d68be989166fed
|
[
"Apache-2.0"
] |
permissive
|
Thunderbrook/Paddle
|
72eda072fae8fd7d555198aadec7ec8899ccb387
|
4870c9bc99c6bd3b814485d7d4f525fe68ccd9a5
|
refs/heads/develop
| 2022-11-05T09:53:16.633465
| 2020-04-09T09:17:40
| 2020-04-09T09:17:40
| 196,961,339
| 0
| 0
|
Apache-2.0
| 2020-12-17T08:57:33
| 2019-07-15T08:51:53
|
C++
|
UTF-8
|
Python
| false
| false
| 1,825
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" usage: gcda_clean.py pull_id. """
import os
import sys
from github import Github
def get_pull(pull_id):
"""Get pull.
Args:
pull_id (int): Pull id.
Returns:
github.PullRequest.PullRequest
"""
token = os.getenv('GITHUB_API_TOKEN',
'e1f9c3cf211d5c20e65bd9ab7ec07983da284bca')
github = Github(token, timeout=60)
repo = github.get_repo('PaddlePaddle/Paddle')
pull = repo.get_pull(pull_id)
return pull
def get_files(pull_id):
"""Get files.
Args:
pull_id (int): Pull id.
Returns:
iterable: The generator will yield every filename.
"""
pull = get_pull(pull_id)
for file in pull.get_files():
yield file.filename
def clean(pull_id):
"""Clean.
Args:
pull_id (int): Pull id.
Returns:
None.
"""
changed = []
for file in get_files(pull_id):
changed.append('/paddle/build/{}.gcda'.format(file))
for parent, dirs, files in os.walk('/paddle/build/'):
for gcda in files:
if gcda.endswith('.gcda'):
trimmed = parent
# convert paddle/fluid/imperative/CMakeFiles/layer.dir/layer.cc.gcda
# to paddle/fluid/imperative/layer.cc.gcda
if trimmed.endswith('.dir'):
trimmed = os.path.dirname(trimmed)
if trimmed.endswith('CMakeFiles'):
trimmed = os.path.dirname(trimmed)
# remove no changed gcda
if os.path.join(trimmed, gcda) not in changed:
gcda = os.path.join(parent, gcda)
os.remove(gcda)
if __name__ == '__main__':
pull_id = sys.argv[1]
pull_id = int(pull_id)
clean(pull_id)
|
[
"luotao02@baidu.com"
] |
luotao02@baidu.com
|
c0a77d989d2367e946c726db66d379941cb95e58
|
191a7f83d964f74a2b3c7faeb4fc47d9c63d521f
|
/.history/main_20210529115051.py
|
1db23d5522089aab30d37049645fc3bb97d85fce
|
[] |
no_license
|
AndreLiu1225/Kinder-Values-Survey
|
2a317feee8d5b17c27da2b2116742656e35d8ab9
|
090c27da0c822abb7dfc0ec6e13ae1b3dcb7bbf3
|
refs/heads/master
| 2023-05-03T00:26:00.481423
| 2021-06-04T03:24:19
| 2021-06-04T03:24:19
| 371,989,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,895
|
py
|
from flask import Flask, render_template, redirect, url_for, flash, request
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms import StringField, TextField, SubmitField, IntegerField, SelectField, RadioField
from wtforms.validators import DataRequired, Email, EqualTo, Length, ValidationError
import datetime
import matplotlib.pyplot as plt
app = Flask(__name__)
app.config['SECRET_KEY'] = "0c8973c8a5e001bb0c816a7b56c84f3a"
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///site.db"
db = SQLAlchemy(app)
class Survey(db.Model):
age = db.Column(db.Integer, nullable=False, primary_key=True)
email = db.Column(db.String(50), unique=False, nullable=False)
profession = db.Column(db.String(50), nullable=False)
power = db.Column(db.Integer, nullable=False)
tradition = db.Column(db.Integer, nullable=False)
achievement = db.Column(db.Integer, nullable=False)
stimulation = db.Column(db.Integer, nullable=False)
hedonism = db.Column(db.Integer, nullable=False)
conformity = db.Column(db.Integer, nullable=False)
security = db.Column(db.Integer, nullable=False)
self_direction = db.Column(db.Integer, nullable=False)
benevolence = db.Column(db.Integer, nullable=False)
universalism = db.Column(db.Integer, nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)
def __repr__(self):
return f"Survey('{self.age}', '{self.name}', '{self.date_posted}')"
class MCQ(FlaskForm):
email = StringField("What is your email?", validators=[DataRequired(), Email(message=('Not a valid email address')), Length(max=50)])
age = IntegerField("Please enter your age", validators=[DataRequired()])
profession = StringField("What is your profession?", validators=[DataRequired(), Length(max=30)])
# Self-Enhancement
power = IntegerField("Do you desire a higher social status and dominance over others? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
hedonism = IntegerField("Is personal gratification the most important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
achievement = IntegerField("Is achievement according to social standards important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
# Conservation
tradition = IntegerField("Do you care about preserving traditions? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
conformity = IntegerField("Do you think restraint of actions against social norms is important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
security = IntegerField("Do you value safety, harmony and stability of society, of relationships, and of self? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
# Openness to change
stimulation = IntegerField("Do you prefer novel and exciting challenges in life? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
self_direction = IntegerField("Do you think independent thought and action are important (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
# Self-transcendence
benevolence = IntegerField("Are preserving and enhancing the welfare of your friends and family the most important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
universalism = IntegerField("I find it important to understand, tolerate, appreciate and protect all ethnicities and people. (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
submit = SubmitField("Submit")
@app.route('/', methods=['POST','GET'])
def values_quiz():
form = MCQ()
if form.validate_on_submit():
post = Survey(age=form.age.data, email=form.email.data, profession=form.profession.data, power=form.power.data,
tradition=form.tradition.data, achievement=form.achievement.data, stimulation=form.stimulation.data,
hedonism=form.hedonism.data, conformity=form.conformity.data, self_direction=form.self_direction.data,
benevolence=form.benevolence.data, universalism=form.universalism.data, security=form.security.data)
# if Survey.is_email_in_database(form.email.data):
# flash(f"The user with {form.email.data} has already filled the survey", "danger")
db.session.add(post)
db.session.commit()
flash(f'Survey is completed by {form.email.data}', 'success')
return redirect(url_for('data_dashboard'))
else:
flash('Ensure all questions are answered correctly', 'warning')
return render_template('MCQ.html', form=form)
@app.route('/results', methods=['GET'])
def data_dashboard():
power = request.form['power']
tradition = request.form['tradition']
achievement = request.form['achievement']
stimulation = request.form['stimulation']
hedonism = request.form['hedonism']
conformity = request.form['conformity']
security = request.form['security']
self_direction = request['self_direction']
benevolence = request['benevolence']
universalism = request['universalism']
values = [power, tradition, achievement, stimulation, hedonism, conformity, security, self_direction, benevolence, universalism]
values_labels = ['Openness to Change', 'Self-Transcendence',
'Conservation', 'Self-Enchancement']
openness = [hedonism, stimulation, self_direction]
self_enhancement = [hedonism, achievement, power]
conservation = [tradition, conformity, security]
self_trans = [universalism, benevolence]
total_sum = sum(values)
open_sum = round(sum(openness)/total_sum*100)
enhance_sum = round(sum(self_enhancement)/total_sum*100)
trans_sum = round(sum(self_trans)/total_sum*100)
cons_sum = round(sum(conservation)/total_sum*100)
sum_v = [open_sum, enhance_sum, trans_sum, cons_sum]
# initiating the range of y ticks
ran = [20,40,60,80,100]
plt.xticks(ran, values_labels)
# Calling bar plot function
plt.bar(ran, sum_v)
plt.title('Percentage obtained on each dynamic values')
plt.ylabel('Percentage')
plt.xlabel('Dynamic value types')
return render_template('data_dashboard.html', image=plt.show())
if __name__ == "__main__":
app.run(debug=True)
|
[
"andreliu2004@gmail.com"
] |
andreliu2004@gmail.com
|
fb7165c445a5220131bc572f6c89d995c80cc8bb
|
e27eebd9cacba56eb0e161cf9584b856db4543b5
|
/code/version-demo/venv/bin/django-admin
|
f3b2a60f6246b15c8ef89d632a053b19ff026f34
|
[] |
no_license
|
Dosimz/django-rest-framework-notes
|
fd374f9793057f9fbfe85c072912a019ef26b73a
|
1d0cf0e9cd9042e432f883cd9c3fa2504e5b9a22
|
refs/heads/master
| 2020-06-25T15:36:33.700321
| 2019-08-03T04:26:40
| 2019-08-03T04:26:40
| 199,354,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
#!/run/media/yuyi/068AE93F8AE92BBD/python/django-rest-framework/code/version-demo/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"Vhzix24@gmail.com"
] |
Vhzix24@gmail.com
|
|
2678e12e0ef37185b07dd5045587321dab24ceab
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_116/1538.py
|
f953c8478ab10037f18a92cc014c6473535f8868
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,103
|
py
|
#!/usr/bin/env python
row = 4
col = 4
def getState(matrix):
hasEmpty = False
# Check row
for i in range(4):
flag = matrix[i][0]
if (matrix[i][1] == flag or matrix[i][1] == 'T') \
and (matrix[i][2] == flag or matrix[i][2] == 'T') \
and (matrix[i][3] == flag or matrix[i][3] == 'T'):
if flag == "X":
return "X won"
if flag == "O":
return "O won"
for j in range(1,4):
if matrix[i][j] == '.':
hasEmpty = True
# Check col
for i in range(4):
flag = matrix[0][i]
if (matrix[1][i] == flag or matrix[1][i] == 'T') \
and (matrix[2][i] == flag or matrix[2][i] == 'T') \
and (matrix[3][i] == flag or matrix[3][i] == 'T'):
if flag == "X":
return "X won"
if flag == "O":
return "O won"
for j in range(1,4):
if matrix[j][i] == '.':
hasEmpty = True
# Check diagonal
flag = matrix[0][0]
if (matrix[1][1] == flag or matrix[1][1] == 'T') \
and (matrix[2][2] == flag or matrix[2][2] == 'T') \
and (matrix[3][3] == flag or matrix[3][3] == 'T'):
if flag == "X":
return "X won"
if flag == "O":
return "O won"
flag = matrix[0][3]
if (matrix[1][2] == flag or matrix[1][2] == 'T') \
and (matrix[2][1] == flag or matrix[2][1] == 'T') \
and (matrix[3][0] == flag or matrix[3][0] == 'T'):
if flag == "X":
return "X won"
if flag == "O":
return "O won"
if hasEmpty:
return "Game has not completed"
else:
return "Draw"
#f = open("small")
f = open("A-small-attempt0.in")
lines = f.readlines()
num = int(lines[0])
wfile = open("lk", "w")
index = 1
for i in range(num):
matrix = []
for j in range(index, index+4):
ns = list(lines[j].strip())
matrix.append(ns)
wfile.write("Case #%d: %s\n" % (i+1, getState(matrix)))
index += 5
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
41187fee24fba11e59e9c8b13bd1af25a462c4bd
|
3854700ad938a19efbb934fb0706793d70848ff1
|
/docs/python/algorytmy/heron.py
|
378735c8ff661796f1d4195070da01ca9670741d
|
[] |
no_license
|
xinulsw/linetc
|
0e0c94f3bd814a9ef8da9f2613785f605c04ed6e
|
7311063d640ca1889dca1cd9d6f8b40541c8809a
|
refs/heads/master
| 2022-06-21T05:40:32.139937
| 2022-05-27T15:33:05
| 2022-05-27T15:33:05
| 28,744,486
| 1
| 5
| null | 2016-12-30T14:13:33
| 2015-01-03T13:30:50
|
Python
|
UTF-8
|
Python
| false
| false
| 724
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Obliczanie wartości pierwiastka kwadratowego z podanej liczby
# przy użyciu metody Herona: a(n) = (a(n-1) + x / a(n-1)) / 2
# <eCG>
def pierwiastek(x, d):
a = x # inicjalizacja szkuanej wartości
while abs(a - (x / a)) > d:
a = (a + (x / a)) / 2
return a
def main(args):
# liczba dla której szukamy pierwiastka
x = float(raw_input("Podaj liczbę: "))
# dokładność obliczeń
d = float(raw_input("Podaj dokładność: "))
# drukujemy wynik z dokładnością do 6 miejsc
print "Pierwiastek kwadratowy: {:.6f}".format(pierwiastek(x, d))
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
[
"xinulsw@gmail.com"
] |
xinulsw@gmail.com
|
671a4259b297caaf1b440af131b996017d579c02
|
4c3467d15408362150528c98022016e3af48a481
|
/day05/day05.py
|
97d0ddbbe0a70fad197e58908870483548377405
|
[] |
no_license
|
Ha-Young/nomad_python_challenge
|
7659fc1f5d08b9925156f48e5150bd6981593452
|
d4b476ad27fcc039851f7c7acbc4d925d678d6b2
|
refs/heads/master
| 2022-12-12T10:59:38.154297
| 2020-09-06T14:16:52
| 2020-09-06T14:16:52
| 257,329,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,100
|
py
|
import os
import requests
from bs4 import BeautifulSoup
os.system("clear")
url = "https://www.iban.com/currency-codes"
req = requests.get(url)
soup = BeautifulSoup(req.text, "html.parser")
table = soup.find('table', {'class': 'table'})
tbody = table.find('tbody')
trs = tbody.find_all('tr')
dict_country_code = {}
for i, tr in enumerate(trs):
tds = tr.find_all('td')
country = tds[0].text.strip()
currency = tds[1].text.strip()
code = tds[2].text.strip()
number = tds[3].text.strip()
dict_country_code[i] = {
'country':country,
'currency': currency,
'code': code,
'number':number
}
print("Hello Please Choose select a country")
for key, value in dict_country_code.items():
print(key,value['country'])
while(True):
try:
inputNum = int(input('#: '))
if inputNum not in dict_country_code.keys():
print("Choose a number from the list.")
else :
print('You Chose',dict_country_code[inputNum]['country'])
print('The currency code is',dict_country_code[inputNum]['code'])
break
except:
print("That wasn't a number")
|
[
"hayeong28@naver.com"
] |
hayeong28@naver.com
|
fb05f1a8484b67ea8c719f094c1624f8289d9513
|
d7db8cd9cf18d21a57c6bdb1a3ead018d81801a9
|
/字节跳动/数组与排序/数组中第k个最大元素.py
|
d961475a12f37bea9f668fbae3335b8f3f7edfb3
|
[] |
no_license
|
Eleanoryuyuyu/LeetCode
|
a1e8c230162a3a850e6714ed7c2d4d946bc5f33b
|
44b65f3ab6cc6ca3ef2a2fd7ef39d4df0cd7f7eb
|
refs/heads/master
| 2021-06-01T15:25:44.812448
| 2020-08-06T07:32:44
| 2020-08-06T07:32:44
| 144,653,156
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,027
|
py
|
from typing import List
class Solution:
def findKthLargest(self, nums: List[int], k: int) -> int:
def partition(left, right):
paviot = nums[left]
while left < right:
while left < right and nums[right] >= paviot:
right -= 1
nums[left] = nums[right]
while left < right and nums[left] <= paviot:
left += 1
nums[right] = nums[left]
nums[left] = paviot
return left
n = len(nums)
p = n - k
left, right = 0, n - 1
index = partition(left, right)
while index != p:
if index < p:
left = index + 1
index = partition(left, right)
elif index > p:
right = index - 1
index = partition(left, right)
else:
return nums[index]
return nums[index]
nums = [3,2,3,1,2,4,5,5,6]
print(Solution().findKthLargest(nums, 4))
|
[
"yangjieyu@zju.edu.cn"
] |
yangjieyu@zju.edu.cn
|
bad84b93239fac0a1e4e0d53a6650b8a3c99b62a
|
bdec175f02173938f99e546e772ce8b3730a3f48
|
/lists/ex48.py
|
83c5d0115be5792c374e8c614d5037b552d518fe
|
[] |
no_license
|
hmunduri/MyPython
|
99f637f6665a733903968047aa46b763d9557858
|
af26f3a4ffb9b786d682114635b432480010ffc8
|
refs/heads/master
| 2020-03-09T13:13:59.873228
| 2018-04-20T22:33:30
| 2018-04-20T22:33:30
| 128,805,444
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
#Python program print a nested lists using the print() function.
import sys
colors = [['Red'], ['Green'], ['Black']]
print(' '.join([str(lst) for lst in colors]))
|
[
"root@himagiri0275.mylabserver.com"
] |
root@himagiri0275.mylabserver.com
|
f2c38ad89b093cb62129bdcd1e825b1f8d27054b
|
fc0a6e0f9ffa90a2473fec77bc52ea02e9b21f55
|
/venv/lib/python3.7/site-packages/akshare/news/news_east_money.py
|
b585d6d945b5f41bbe2d071cbf638da2a4c7259b
|
[] |
no_license
|
YixuanSeanZhou/COVID19_Scraping
|
3903e697caf406c7d357afd8cc43811d62896244
|
b84890c4a5ddef589cd76d1ed8fd4a1976f4e3c4
|
refs/heads/master
| 2022-09-08T16:14:33.292096
| 2020-05-23T04:26:02
| 2020-05-23T04:26:02
| 266,261,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: Albert King
date: 2019/12/12 15:08
contact: jindaxiang@163.com
desc:
"""
import newspaper
sina_paper = newspaper.build('http://futures.hexun.com/domestic/', language='zh')
for article in sina_paper.articles:
print(article)
sina_paper.size()
first_article = sina_paper.articles[5]
first_article.download()
first_article.parse()
print(first_article.title)
print(first_article.text)
|
[
"thomaszhou2333@gmail.com"
] |
thomaszhou2333@gmail.com
|
d75c687734fdd3778c4f88537fea21cdca140a57
|
1ee910d6602123eb1328f56419b04e31b3761b6b
|
/lib/python3.5/site-packages/twilio/rest/sync/v1/__init__.py
|
c1aab31243a7200a57ff60b634bef12ef3b66f88
|
[
"MIT"
] |
permissive
|
mraza007/Pizza-or-Not-a-Pizza
|
7fc89e0905c86fbd3c77a9cc834a4b6098912aeb
|
6ad59d046adbd6be812c7403d9cb8ffbdbd6b0b8
|
refs/heads/master
| 2022-12-15T15:47:34.779838
| 2018-07-04T02:28:56
| 2018-07-04T02:28:56
| 127,992,302
| 30
| 4
|
MIT
| 2022-11-22T00:43:51
| 2018-04-04T01:56:26
|
Python
|
UTF-8
|
Python
| false
| false
| 938
|
py
|
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base.version import Version
from twilio.rest.sync.v1.service import ServiceList
class V1(Version):
def __init__(self, domain):
"""
Initialize the V1 version of Sync
:returns: V1 version of Sync
:rtype: twilio.rest.sync.v1.V1.V1
"""
super(V1, self).__init__(domain)
self.version = 'v1'
self._services = None
@property
def services(self):
"""
:rtype: twilio.rest.sync.v1.service.ServiceList
"""
if self._services is None:
self._services = ServiceList(self)
return self._services
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Sync.V1>'
|
[
"muhammadraza0047@gmail.com"
] |
muhammadraza0047@gmail.com
|
b02dbf261aa073b6ee44836616403995bc7d2945
|
590a3dab75527bb8742165e84d7108d056826351
|
/py_tutorials/py_tutorials/1_supervised/train_svm.py
|
0d87c8c10e65447b4213b9ff822672393d37ac32
|
[] |
no_license
|
onkarganjewar/ipam-tutorials
|
2cfa5055484531f2cc2fdd051ddf0ea771a08a37
|
a8451dd121d0a4029bc26c27ee28b45dfc6e51c0
|
refs/heads/master
| 2021-01-13T12:48:50.944781
| 2016-10-30T02:57:09
| 2016-10-30T02:57:09
| 72,319,739
| 0
| 0
| null | 2016-10-30T02:05:48
| 2016-10-30T02:05:47
| null |
UTF-8
|
Python
| false
| false
| 1,996
|
py
|
import logging
import sys
import time
import numpy as np
from numpy import arange, dot, maximum, ones, tanh, zeros
from numpy.random import randn
from skdata import mnist
from autodiff import fmin_sgd, fmin_l_bfgs_b
from utils import show_filters
def main():
# -- top-level parameters of this script
dtype = 'float32' # XXX
n_examples = 50000
online_batch_size = 1
online_epochs = 2
batch_epochs = 30
lbfgs_m = 20
# -- load and prepare the data set
data_view = mnist.views.OfficialVectorClassification(x_dtype=dtype)
n_classes = 10
x = data_view.train.x[:n_examples]
y = data_view.train.y[:n_examples]
y1 = -1 * ones((len(y), n_classes)).astype(dtype)
y1[arange(len(y)), y] = 1
# --initialize the SVM model
w = zeros((x.shape[1], n_classes), dtype=dtype)
b = zeros(n_classes, dtype=dtype)
def svm(ww, bb, xx=x, yy=y1):
# -- one vs. all linear SVM loss
margin = yy * (dot(xx, ww) + bb)
hinge = maximum(0, 1 - margin)
cost = hinge.mean(axis=0).sum()
return cost
# -- stage-1 optimization by stochastic gradient descent
print 'Starting SGD'
n_batches = n_examples / online_batch_size
w, b = fmin_sgd(svm, (w, b),
streams={
'xx': x.reshape((n_batches, online_batch_size, x.shape[1])),
'yy': y1.reshape((n_batches, online_batch_size, y1.shape[1]))},
loops=online_epochs,
stepsize=0.001,
print_interval=10000,
)
print 'SGD complete, about to start L-BFGS'
show_filters(w.T, (28, 28), (2, 5,))
# -- stage-2 optimization by L-BFGS
print 'Starting L-BFGS'
w, b = fmin_l_bfgs_b(svm, (w, b),
maxfun=batch_epochs,
iprint=1,
m=lbfgs_m)
print 'L-BFGS complete'
show_filters(w.T, (28, 28), (2, 5,))
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
sys.exit(main())
|
[
"james.bergstra@gmail.com"
] |
james.bergstra@gmail.com
|
9039e65c812006506a37289b4b5417f47fc945cf
|
ecf62aae48e02420cd99008f58c4725c6da56d22
|
/models/review.py
|
094c86885b823cd171680a2c1fc29a9485940188
|
[] |
no_license
|
ThibautBernard/AirBnB_clone
|
e3110415acd98b56134928eee0d2befb6bd68a25
|
d495dd85add4332880eacf00b338704c2799d3e5
|
refs/heads/main
| 2023-03-08T15:51:46.968249
| 2021-03-03T15:58:29
| 2021-03-03T15:58:29
| 337,568,140
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
#!/usr/bin/python3
from models.base_model import BaseModel
"""
Class that represent a Review
"""
class Review(BaseModel):
place_id = ""
user_id = ""
text = ""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
|
[
"thibautbernard@sfr.fr"
] |
thibautbernard@sfr.fr
|
db81a9b45a6e7f46cba1807fbdb51d5e43c86df0
|
65b4522c04c2be071c2d42095956fe950fe1cebe
|
/lib/viscojapan/plots/plot_L_curve.py
|
b89df84bae00664d50f081bdee6e2e51759c6520
|
[] |
no_license
|
geodesy/viscojapan
|
ac0cd93f7a2134cd2651623b94879dcc21c0c46a
|
03e70265b56eb5994e73bcb6066f0be338e42f27
|
refs/heads/master
| 2021-03-03T18:19:07.779601
| 2015-07-16T03:50:49
| 2015-07-16T03:50:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,286
|
py
|
import glob
import h5py
from pylab import loglog, xlabel, ylabel, grid, text
__all__=['plot_L_from_res_h5','plot_L']
def plot_L_from_res_h5(pathname, x_key, y_key, **kwargs):
flist = glob.glob(pathname)
flist = sorted(flist)
xs = []
ys = []
for f in flist:
with h5py.File(f,'r') as fid:
x = fid[x_key][...]
y = fid[y_key][...]
xs.append(x)
ys.append(y)
handle = loglog(xs,ys, **kwargs)
xlabel(x_key)
ylabel(y_key)
grid('on')
return handle
def plot_L(nres,nsol,alphas=None,lanos=None,
label=None,color='blue'):
'''
alphas - regularization parameters array
lanos - array that indicates which alphas pairs are labeled.
None means label every two.
label - L-curve label
'''
assert len(nres)==len(nsol)
if alphas is not None:
assert len(nres)==len(alphas)
# plot L-curve
loglog(nres,nsol,'o',label=label,color=color)
if alphas is not None:
if lanos is None:
lanos = range(0,len(alphas),2)
for ano in lanos:
text(nres[ano],nsol[ano],'%d/%.2G'%(ano,alphas[ano]),color='red')
xlabel('Residual Norm ($\log_{10}{||Gm-d||_2}$)')
ylabel('Solution Roughness ($\log_{10}{||Lm||_2}$)')
grid('on')
|
[
"zy31415@gmail.com"
] |
zy31415@gmail.com
|
e263b3bbee16c780f42e47780248423245375d51
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_frolicking.py
|
ef11ef8bcd650fd92541f71619da24e6b40dcd57
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
py
|
from xai.brain.wordbase.nouns._frolic import _FROLIC
#calss header
class _FROLICKING(_FROLIC, ):
def __init__(self,):
_FROLIC.__init__(self)
self.name = "FROLICKING"
self.specie = 'nouns'
self.basic = "frolic"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
838d7b35de18b5ea779615da2b03c44eab3be0f5
|
1cbcf8660d3ea833b0a9aa3d36fe07839bc5cfc5
|
/apps/sources/reqparsers/enum.py
|
c1cbff7b7ef84aa38e0efda18fd5cdbe51f2f9ab
|
[] |
no_license
|
zhanghe06/migration_project
|
f77776969907740494281ac6d7485f35d4765115
|
0264b292873b211bfeca0d645cc41abc9efe883f
|
refs/heads/master
| 2022-12-12T10:55:43.475939
| 2019-09-29T09:19:13
| 2019-09-29T09:19:13
| 185,584,884
| 0
| 1
| null | 2022-12-08T05:04:58
| 2019-05-08T10:31:57
|
Python
|
UTF-8
|
Python
| false
| false
| 306
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: enum.py
@time: 2019-04-26 15:55
"""
from __future__ import unicode_literals
structure_key_item = 'enum'
structure_key_items = 'enums'
structure_key_item_cn = '枚举类型'
structure_key_items_cn = '枚举类型'
|
[
"zhang_he06@163.com"
] |
zhang_he06@163.com
|
20429d1f27e52eeb5e975b6e89b687a9a43d7777
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/wookayin_tensorflow-talk-debugging/tensorflow-talk-debugging-master/codes/40-mnist-name.py
|
76816d57890463ede3dc3542f2f265757740f1bb
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,989
|
py
|
import tensorflow as tf
import tensorflow.contrib.layers as layers
from datetime import datetime
# MNIST input data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
def multilayer_perceptron(x):
W_fc1 = tf.Variable(tf.random_normal([784, 256], mean=0, stddev=1))
b_fc1 = tf.Variable([0] * 256) # ???????
fc1 = tf.nn.xw_plus_b(x, W_fc1, b_fc1)
fc2 = layers.fully_connected(fc1, 256, activation_fn=tf.nn.relu, scope='fc2')
out = layers.fully_connected(fc2, 10, activation_fn=None, scope='out')
return out
# build model, loss, and train op
x = tf.placeholder(tf.float32, [None, 784], name='placeholder_x')
y = tf.placeholder(tf.float32, [None, 10], name='placeholder_y')
pred = multilayer_perceptron(x)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
def train(session):
batch_size = 200
session.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(10):
epoch_loss = 0.0
batch_steps = mnist.train.num_examples / batch_size
for i in range(batch_steps):
batch_x, batch_y = mnist.train.next_batch(batch_size)
_, c = session.run([train_op, loss],
feed_dict={x: batch_x, y: batch_y})
epoch_loss += c / batch_steps
print "[%s] Epoch %02d, Loss = %.6f" % (datetime.now(), epoch, epoch_loss)
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
def main():
with tf.Session(config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=True),
device_count={'GPU': 1})) as session:
train(session)
if __name__ == '__main__':
main()
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
e00f6197eedaeabfbe42d2c14ed4cc0528ae47e6
|
67e817ca139ca039bd9eee5b1b789e5510119e83
|
/Tree/[508]Most Frequent Subtree Sum.py
|
d1d33c1d3ec9ef186d0feb9017ac84a00713155f
|
[] |
no_license
|
dstch/my_leetcode
|
0dc41e7a2526c2d85b6b9b6602ac53f7a6ba9273
|
48a8c77e81cd49a75278551048028c492ec62994
|
refs/heads/master
| 2021-07-25T21:30:41.705258
| 2021-06-06T08:58:29
| 2021-06-06T08:58:29
| 164,360,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,920
|
py
|
#
# Given the root of a tree, you are asked to find the most frequent subtree sum.
# The subtree sum of a node is defined as the sum of all the node values formed b
# y the subtree rooted at that node (including the node itself). So what is the mo
# st frequent subtree sum value? If there is a tie, return all the values with the
# highest frequency in any order.
#
#
# Examples 1
# Input:
#
# 5
# / \
# 2 -3
#
# return [2, -3, 4], since all the values happen only once, return all of them i
# n any order.
#
#
# Examples 2
# Input:
#
# 5
# / \
# 2 -5
#
# return [2], since 2 happens twice, however -5 only occur once.
#
#
# Note:
# You may assume the sum of values in any subtree is in the range of 32-bit sign
# ed integer.
# Related Topics Hash Table Tree
# 👍 840 👎 143
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def findFrequentTreeSum(self, root: TreeNode) -> List[int]:
dic = {}
m = 0
def func(node, m):
if node is not None:
m = func(node.left, m)
m = func(node.right, m)
s = node.val
if node.left:
s += node.left.val
if node.right:
s += node.right.val
node.val = s
if node.val in dic:
dic[node.val] += 1
else:
dic[node.val] = 1
if dic[node.val] > m:
m = dic[node.val]
return m
m = func(root, m)
print(dic)
return [x for x in dic if dic[x] == m]
# leetcode submit region end(Prohibit modification and deletion)
|
[
"dstch@163.com"
] |
dstch@163.com
|
7d7cf6f0f0807e1d13c773d7fd9c579e4981bccd
|
3bfaf1de118ecb3863b89737a935c32d7bcf9d28
|
/Leetcode/range-sum-query-immutable/source.py
|
d0628d3d2202ca61a51883566eace81843919629
|
[] |
no_license
|
ndjman7/Algorithm
|
13db942c6228c2d55e5c0cdf997db834cf617988
|
a4bd428e51a2dcebc81214564ac3fd967175f7ae
|
refs/heads/master
| 2021-06-21T12:41:42.006005
| 2020-12-25T07:47:52
| 2020-12-26T12:43:15
| 163,792,061
| 2
| 0
| null | 2019-09-03T04:35:15
| 2019-01-02T03:57:05
|
C++
|
UTF-8
|
Python
| false
| false
| 537
|
py
|
class NumArray(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.cache = [0 for i in range(len(nums)+1)]
for i in range(len(nums)):
self.cache[i + 1] = self.cache[i] + nums[i]
def sumRange(self, i, j):
"""
:type i: int
:type j: int
:rtype: int
"""
return self.cache[j + 1] - self.cache[i]
# Your NumArray object will be instantiated and called as such:
# obj = NumArray(nums)
# param_1 = obj.sumRange(i,j)
|
[
"ndjman7@gmail.com"
] |
ndjman7@gmail.com
|
6e7b2f202fd14008d5f076ad4fbac305307205ba
|
2d358ffb51f03cc64cc2da0f684b0928aebe139c
|
/test7/myapp/models.py
|
833b515eca90cae41e62460be8e577d84dc608ad
|
[] |
no_license
|
853695319/learningdjango
|
195ffabdbd3a5b6bc4386cbb678504c0d2cd0095
|
d2aac1117bb2ca31e4f247a9d206adcf3a9f39a2
|
refs/heads/master
| 2020-05-03T04:59:16.094900
| 2019-04-23T06:25:02
| 2019-04-23T06:25:02
| 178,437,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
from django.db import models
from django.contrib.auth.models import User
class Note(models.Model):
user = models.ForeignKey(User)
pub_date = models.DateTimeField()
title = models.CharField(max_length=200)
body = models.TextField()
def __unicode__(self):
return self.title
|
[
"853695319@qq.com"
] |
853695319@qq.com
|
50e2de4f73782ca8217362cece9dcdc7a601d9ce
|
e1891d21594e17431abfcbbc36f3b479cc3eb5d3
|
/blog/models.py
|
6bd8baaea251d83aa5a832c354cda7a972d1e99d
|
[] |
no_license
|
Ankrate/venezoproject
|
01978d72332cf642637333715f695f6c6d04d7a7
|
a26ccd270573420d29ba39fd37f2b9f572d010c3
|
refs/heads/master
| 2020-07-07T11:15:36.953266
| 2019-08-26T12:40:05
| 2019-08-26T12:40:05
| 202,558,469
| 0
| 0
| null | 2019-08-15T14:42:57
| 2019-08-15T14:42:57
| null |
UTF-8
|
Python
| false
| false
| 1,780
|
py
|
from django.db import models
import os, random
from django.db.models.signals import pre_save
from .utils import unique_slug_generator
def get_filename_ext(filepath):
base_name = os.path.basename(filepath)
name, ext = os.path.splitext(base_name)
return name, ext
def upload_image_path(instance, filename):
print (instance)
print (filename)
name, ext = get_filename_ext(filename)
new_filename = random.randint(1, 234982304)
final_filename = '{}{}'.format(new_filename, ext)
return f'blog/{new_filename}/{final_filename}'
class BlogCategories(models.Model):
title = models.CharField(max_length=2555)
slug = models.SlugField(blank=True, unique=True, max_length=255)
class Meta:
verbose_name_plural = 'Категории блога'
def __str__(self):
return self.title
class BlogModel(models.Model):
title = models.CharField(max_length=120)
slug = models.SlugField(blank=True, unique=True, max_length=255)
description = models.TextField()
short_desc = models.TextField(blank=True)
image = models.ImageField(upload_to=upload_image_path, null=True, blank=True)
category = models.ForeignKey(BlogCategories,related_name='category', on_delete=models.CASCADE, default=1)
publish = models.DateField(auto_now=True)
class Meta:
verbose_name_plural = 'Статьи'
def __str__(self):
return self.title
def blog_pre_save_receiver(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = unique_slug_generator(instance)
pre_save.connect(blog_pre_save_receiver, sender=BlogModel)
pre_save.connect(blog_pre_save_receiver, sender=BlogCategories)
|
[
"angara99@gmail.com"
] |
angara99@gmail.com
|
4fa9358de28f9b8aa414f45aa1e563eb13718b7a
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/python/python_all/124_1.py
|
1860506797a3534ef6306aab0ebc6ad7919b518a
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965
| 2021-06-15T08:39:26
| 2021-06-15T08:39:26
| 349,059,725
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,000
|
py
|
Python | Intersection in Tuple Records Data
Sometimes, while working with data, we may have a problem in which we require
to find the matching records between two lists that we receive. This is a very
common problem and records usually occur as a tuple. Let’s discuss certain
ways in which this problem can be solved.
**Method #1 : Using list comprehension**
List comprehension can opt as method to perform this task in one line rather
than running a loop to find the common element. In this, we just iterate for
single list and check if any element occurs in other one.
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Intersection in Tuple Records Data
# Using list comprehension
# Initializing lists
test_list1 = [('gfg', 1), ('is', 2), ('best',
3)]
test_list2 = [('i', 3), ('love', 4), ('gfg', 1)]
# printing original lists
print("The original list 1 is : " + str(test_list1))
print("The original list 2 is : " + str(test_list2))
# Intersection in Tuple Records Data
# Using list comprehension
res = [ele1 for ele1 in test_list1
for ele2 in test_list2 if ele1 == ele2]
# printing result
print("The Intersection of data records is : " + str(res))
---
__
__
**Output :**
The original list 1 is : [('gfg', 1), ('is', 2), ('best', 3)]
The original list 2 is : [('i', 3), ('love', 4), ('gfg', 1)]
The Intersection of data records is : [('gfg', 1)]
**Method #2 : Usingset.intersection()**
This task can also be performed in smaller way using the generic set
intersection. In this, we first convert the list of records to a set and then
perform its intersection using intersection().
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Intersection in Tuple Records Data
# Using set.intersection()
# Initializing lists
test_list1 = [('gfg', 1), ('is', 2), ('best',
3)]
test_list2 = [('i', 3), ('love', 4), ('gfg', 1)]
# printing original lists
print("The original list 1 is : " + str(test_list1))
print("The original list 2 is : " + str(test_list2))
# Intersection in Tuple Records Data
# set.intersection()
res = list(set(test_list1).intersection(set(test_list2)))
# printing result
print("The Intersection of data records is : " + str(res))
---
__
__
**Output :**
The original list 1 is : [('gfg', 1), ('is', 2), ('best', 3)]
The original list 2 is : [('i', 3), ('love', 4), ('gfg', 1)]
The Intersection of data records is : [('gfg', 1)]
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
|
[
"qmnguyenw@gmail.com"
] |
qmnguyenw@gmail.com
|
a767ea85226a3a33df945842b2bedd5921f415aa
|
a537ac145d19a1a6cfb1ee3556144292df4ebe29
|
/django_libs/tests/test_app/models.py
|
0859a44e00cd78b6ad4899f9cf438e081b81b6e3
|
[
"MIT"
] |
permissive
|
papasax/django-libs
|
80e0d122ec2c41ebbafddb11ffc69bd2794efeec
|
734c7b3506d8aab78f91dae10eda4c150be18240
|
refs/heads/master
| 2021-01-09T09:01:01.400301
| 2014-12-04T21:12:09
| 2014-12-04T21:12:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
"""Models for the ``test_app`` app."""
from django.db import models
from django.utils.translation import ugettext_lazy as _
from simple_translation.translation_pool import translation_pool
from ...models_mixins import (
SimpleTranslationMixin,
SimpleTranslationPublishedManager,
)
class DummyProfile(SimpleTranslationMixin, models.Model):
"""Just a dummy profile model for testing purposes."""
user = models.ForeignKey('auth.User')
dummy_field = models.CharField(
verbose_name=_('Dummy Field'),
max_length=128,
)
objects = SimpleTranslationPublishedManager()
class DummyProfileTranslation(models.Model):
"""Just a translation of the dummy profile."""
dummy_translation = models.CharField(max_length=128)
is_published = models.BooleanField(default=True)
language = models.CharField(max_length=8, default='en')
dummyprofile = models.ForeignKey(DummyProfile)
translation_pool.register_translation(DummyProfile, DummyProfileTranslation)
|
[
"daniel.kaufhold@bitmazk.com"
] |
daniel.kaufhold@bitmazk.com
|
95495b389e7e95436f048fe3e1e335059fb2bd5d
|
748c4ba7058336eb2d09b413066d21582e26d71b
|
/course_catalog/migrations/0041_rename_course_run.py
|
64d107c8459b49ba3d72ecb2957b228b3bd05eb5
|
[
"BSD-3-Clause"
] |
permissive
|
mitodl/open-discussions
|
6dbb8ae2843263889634849ddd9096f74536b78e
|
ba7442482da97d463302658c0aac989567ee1241
|
refs/heads/master
| 2023-08-10T02:54:45.706067
| 2023-08-01T17:05:36
| 2023-08-01T17:05:36
| 93,760,926
| 13
| 3
|
BSD-3-Clause
| 2023-08-01T17:05:40
| 2017-06-08T14:46:35
|
Python
|
UTF-8
|
Python
| false
| false
| 380
|
py
|
# Generated by Django 2.1.11 on 2019-10-02 17:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
("course_catalog", "0040_add_program_published"),
]
operations = [
migrations.RenameModel(old_name="CourseRun", new_name="LearningResourceRun")
]
|
[
"noreply@github.com"
] |
mitodl.noreply@github.com
|
4c6ed8f33aa431abae76a14dd95bb5c435bb4a2f
|
39385e706c34202539ee8ee1089ebc4faa7e15c5
|
/inference_server/inference_server_pb2_grpc.py
|
9325a8429cd3ccb4e86f0e55f8a3001b70159127
|
[] |
no_license
|
paulhendricks/inference-server
|
f7845d8aeab13f95dd1ce069c6740fc80af6ca87
|
bdf6ccc0e2559b2fef8ed8a02cb0b6cfbbfaba63
|
refs/heads/master
| 2020-03-17T22:22:12.580704
| 2018-05-18T20:54:00
| 2018-05-18T20:54:00
| 134,000,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,413
|
py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import inference_server_pb2 as inference__server__pb2
class InferenceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Compute = channel.unary_unary(
'/inference_server.Inference/Compute',
request_serializer=inference__server__pb2.Input.SerializeToString,
response_deserializer=inference__server__pb2.Output.FromString,
)
class InferenceServicer(object):
# missing associated documentation comment in .proto file
pass
def Compute(self, request, context):
"""Sends a greeting
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_InferenceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Compute': grpc.unary_unary_rpc_method_handler(
servicer.Compute,
request_deserializer=inference__server__pb2.Input.FromString,
response_serializer=inference__server__pb2.Output.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'inference_server.Inference', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
[
"paul.hendricks.2013@owu.edu"
] |
paul.hendricks.2013@owu.edu
|
d25ee342daa07ce418636d6017598ceccbc395a2
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03086/s109673720.py
|
9c53994051ea917600e7062df13c6995566eab3f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
s=input()
n=len(s)
res=0
for i in range(n):
if s[i]=='A' or s[i]=='C' or s[i]=='G' or s[i]=='T':
ans=1
j=i+1
while j<n:
if s[j]=='A' or s[j]=='C' or s[j]=='G' or s[j]=='T':
ans+=1
j+=1
else:
break
else:
continue
res=max(res,ans)
print(res)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
57447872fefc81ffb2c0125733fb53f138cc9502
|
d4e9fd6dd51d29ad374b460a2cfbd467502ede7d
|
/ros2param/ros2param/command/param.py
|
1aa9026af3542f670f8f3224a40aaf22089810ad
|
[
"Apache-2.0"
] |
permissive
|
ros2/ros2cli
|
3f7b93ff44d18b2292a50d3b6ff119494142328b
|
351ef3c7442f49013d84084dea23fe399517690f
|
refs/heads/rolling
| 2023-08-07T03:53:23.635067
| 2023-08-03T19:50:28
| 2023-08-03T19:50:28
| 93,568,427
| 142
| 157
|
Apache-2.0
| 2023-09-14T07:36:46
| 2017-06-06T22:13:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,366
|
py
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ros2cli.command import add_subparsers_on_demand
from ros2cli.command import CommandExtension
class ParamCommand(CommandExtension):
"""Various param related sub-commands."""
def add_arguments(self, parser, cli_name, *, argv=None):
self._subparser = parser
# add arguments and sub-commands of verbs
add_subparsers_on_demand(
parser, cli_name, '_verb', 'ros2param.verb', required=False,
argv=argv)
def main(self, *, parser, args):
if not hasattr(args, '_verb'):
# in case no verb was passed
self._subparser.print_help()
return 0
extension = getattr(args, '_verb')
# call the verb's main method
return extension.main(args=args)
|
[
"noreply@github.com"
] |
ros2.noreply@github.com
|
ff786bb5396ad92d10ed243592117497f6cf1e1c
|
c071eb46184635818e8349ce9c2a78d6c6e460fc
|
/system/python_stubs/-745935208/_signal.py
|
46398c329673fba8df3fa9b203f853e8f6dd22a6
|
[] |
no_license
|
sidbmw/PyCharm-Settings
|
a71bc594c83829a1522e215155686381b8ac5c6e
|
083f9fe945ee5358346e5d86b17130d521d1b954
|
refs/heads/master
| 2020-04-05T14:24:03.216082
| 2018-12-28T02:29:29
| 2018-12-28T02:29:29
| 156,927,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,547
|
py
|
# encoding: utf-8
# module _signal
# from (built-in)
# by generator 1.146
"""
This module provides mechanisms to use signal handlers in Python.
Functions:
alarm() -- cause SIGALRM after a specified time [Unix only]
setitimer() -- cause a signal (described below) after a specified
float time and the timer may restart then [Unix only]
getitimer() -- get current value of timer [Unix only]
signal() -- set the action for a given signal
getsignal() -- get the signal action for a given signal
pause() -- wait until a signal arrives [Unix only]
default_int_handler() -- default SIGINT handler
signal constants:
SIG_DFL -- used to refer to the system default handler
SIG_IGN -- used to ignore the signal
NSIG -- number of defined signals
SIGINT, SIGTERM, etc. -- signal numbers
itimer constants:
ITIMER_REAL -- decrements in real time, and delivers SIGALRM upon
expiration
ITIMER_VIRTUAL -- decrements only when the process is executing,
and delivers SIGVTALRM upon expiration
ITIMER_PROF -- decrements both when the process is executing and
when the system is executing on behalf of the process.
Coupled with ITIMER_VIRTUAL, this timer is usually
used to profile the time spent by the application
in user and kernel space. SIGPROF is delivered upon
expiration.
*** IMPORTANT NOTICE ***
A signal handler function is called with two arguments:
the first is the signal number, the second is the interrupted stack frame.
"""
# no imports
# Variables with simple values
CTRL_BREAK_EVENT = 1
CTRL_C_EVENT = 0
NSIG = 23
SIGABRT = 22
SIGBREAK = 21
SIGFPE = 8
SIGILL = 4
SIGINT = 2
SIGSEGV = 11
SIGTERM = 15
SIG_DFL = 0
SIG_IGN = 1
# functions
def default_int_handler(*more): # real signature unknown; restored from __doc__
"""
default_int_handler(...)
The default handler for SIGINT installed by Python.
It raises KeyboardInterrupt.
"""
pass
def getsignal(*args, **kwargs): # real signature unknown
"""
Return the current action for the given signal.
The return value can be:
SIG_IGN -- if the signal is being ignored
SIG_DFL -- if the default action for the signal is in effect
None -- if an unknown handler is in effect
anything else -- the callable Python object used as a handler
"""
pass
def set_wakeup_fd(fd, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
set_wakeup_fd(fd, *, warn_on_full_buffer=True) -> fd
Sets the fd to be written to (with the signal number) when a signal
comes in. A library can use this to wakeup select or poll.
The previous fd or -1 is returned.
The fd must be non-blocking.
"""
pass
def signal(): # real signature unknown; restored from __doc__
"""
Set the action for the given signal.
The action can be SIG_DFL, SIG_IGN, or a callable Python object.
The previous action is returned. See getsignal() for possible return values.
*** IMPORTANT NOTICE ***
A signal handler function is called with two arguments:
the first is the signal number, the second is the interrupted stack frame.
"""
pass
# classes
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def create_module(cls, *args, **kwargs): # real signature unknown
""" Create a built-in module """
pass
@classmethod
def exec_module(cls, *args, **kwargs): # real signature unknown
""" Exec a built-in module """
pass
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
"""
Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
# variables with complex values
__spec__ = None # (!) real value is ''
|
[
"siddharthnatamai@gmail.com"
] |
siddharthnatamai@gmail.com
|
37a1db073986ca0971ba4944632aafaec7a0d5ff
|
f57529f95a0fd10676f46063fdcd273fb5a81427
|
/boj/05001-06000/5063.py
|
6a2433d4269d1553b50fb5ef00f2d4236113033c
|
[] |
no_license
|
hoyasmh/PS
|
a9b83b0044e483586590c9b7c6bf8a77236b67e7
|
6bbaa0ce77b2726f6af782af049d73720820f761
|
refs/heads/master
| 2023-04-23T10:43:27.349785
| 2021-05-17T13:43:53
| 2021-05-17T13:43:53
| 311,239,034
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
n=int(input())
for i in range(n):
r,e,c=map(int, input().split())
a=e-c
if a>r:
print('advertise')
elif a==r:
print('does not matter')
else:
print('do not advertise')
|
[
"hoyasmh@gmail.com"
] |
hoyasmh@gmail.com
|
600a02249547f25a443efccb2f227a4daf743e72
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03626/s691421903.py
|
12b73d2e8e8b33454998cc794473feec516fdf85
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 933
|
py
|
# https://atcoder.jp/contests/abc071/tasks/arc081_b
# 横ドミノと縦ドミノの場合わけを考える
# 横ドミノ→横ドミノの場合 : いままでの通り*3通り
# 横ドミノ→縦ドミノ : いままで*1通り ok
# 縦ドミノ→横ドミノ : いままで*2通り ok
# 縦ドミノ→縦ドミノ : いままで*2通り ok
MOD = 10**9+7
N = int(input())
S1 = input()
S2 = input()
pre1 = S1[0]
pre2 = S2[0]
if pre1 != pre2:
ans = 6
else:
ans = 3
for s1, s2 in zip(S1[1:], S2[1:]):
if pre1 == s1 and pre2 == s2:
pass
elif pre1 != pre2 and s1 != s2:
# 横→横
ans *= 3
elif pre1 != pre2 and s1 == s2:
# 横→縦
pass
elif pre1 == pre2 and s1 != s2:
# 縦→横
ans *= 2
elif pre1 == pre2 and s1 == s2:
# 縦→縦
ans *= 2
if ans >= MOD:
ans %= MOD
pre1, pre2 = s1, s2
print(ans % MOD)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
511cd08228a358eb20555f4016aa0f102b17db57
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_2/HazelParr/pancake_revenge.py
|
0d9c90b6f19e36f7eff6120670502de3018d5d1b
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 381
|
py
|
T = int(input())
for case in range(T):
x = input()
count = 0
while "-" in x:
new_string = ""
unchanged = ""
y = x.rfind("-")
fragment = x[:y+1]
unchanged = x[y+1:]
for i in fragment:
if i == "-":
new_string += "+"
else:
new_string += "-"
count += 1
x = new_string + unchanged
print("Case #{}: {}".format(case+1, count))
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
8cbdf7ab2ec66f8f1e82cb4824f60c32e935e855
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/120_design_patterns/001_SOLID_design_principles/_exercises/_exercises/isp/ISP/Book/IBook.py
|
01dad5411393ea75f180ee3e25c50e409867ecae
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 220
|
py
|
# ___ ISP.IL.. ______ IL..
#
#
# c_ IBookIL..
#
# ?p..
# ___ pages -> ?
# r_ N...
#
# ??.?
# ___ pages num_pages ?
# r_ N...
#
# ?p..
# ___ title __ ?
# r_ N...
#
# ??.?
# ___ title titleName ?
# r_ N...
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
f6b1a79848c4fc34a82d807b82b44e98bececaf5
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2320/60716/267286.py
|
b7837b56224bdc4c298a9aada04e063f0c2c204d
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
strs = input()
k = int(input())
if k==1:
lists = list()
lists.append(strs)
for i in range(len(strs)):
strlist = list(strs)
k = strlist.pop(0)
strlist.append(k)
strs = ''.join(strlist)
lists.append(strs)
lists.sort()
print(lists[0])
else:
strlist = list(strs)
strlist.sort()
strs = ''.join(strlist)
print(strs)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
391f37b3390c8466064f8f93b5a3f7eba2852de0
|
cdd79cef15bdf6a0b9098e27028bbe38607bc288
|
/AOJ/Vol02/0241_Quaternion Multiplication.py
|
a94a35629d177665c61f8dcc76a162305205fbfc
|
[] |
no_license
|
nord2sudjp/atcoder
|
ee35a3eb35717485dc62627172de24c9dac102fb
|
6b1cc5102a615492cc7ff8a33813bbb954641782
|
refs/heads/master
| 2023-08-25T11:27:14.205593
| 2021-09-27T05:43:04
| 2021-09-27T05:43:04
| 302,855,505
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
# Volume2-0241 Quaternion Multiplication
#l1=[1,2,3,4]
#l2=[7,6,7,8]
while True:
N=int(input())
if N==0:break
for _ in range(N):
*t,=list(map(int,input().split()))
l1=t[0:4]
l2=t[4:]
s=[x*y for x in l1 for y in l2]
ans=[0]*4 #1,i,j,k
ans[0]+=s[0]
ans[1]+=s[1]+s[4]
ans[2]+=s[2]+s[8]
ans[3]+=s[3]+s[12]
ans[0]+=-1*s[5]-1*s[10]-1*s[15]
ans[1]+=s[11]-1*s[14]
ans[2]+=-1*s[7]+s[13]
ans[3]+=s[6]-1*s[9]
print(' '.join(map(str,ans)))
|
[
"nord2sudjp@gmail.com"
] |
nord2sudjp@gmail.com
|
e6c627876e58af236e4193f1e7495a258b610ed7
|
2f38331b8a0bc8867859d853d352f9f5cc9cd1b5
|
/day07/code/value.py
|
ae8c35d75226626552aef586887bad597c2a649a
|
[] |
no_license
|
Chenhuaqi6/python_net
|
aaf66a15a2f438e1f3fc67f338abd15e2bbfd6a3
|
56efd53bbaa1212a86c65e9cd3b29d2f5f30b752
|
refs/heads/master
| 2020-04-09T08:30:20.817513
| 2019-01-19T06:51:41
| 2019-01-19T06:51:41
| 160,196,516
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
from multiprocessing import Process,Value
import time
import random
#创建共享内存
money = Value("i",5000)
#挣钱
def boy():
for i in range(30):
time.sleep(0.2)
money.value += random.randint(1,1500)
def girl():
for i in range(30):
time.sleep(0.15)
money.value -= random.randint(100,1200)
b = Process(target = boy)
g = Process(target = girl)
b.start()
g.start()
b.join()
g.join()
print("一个月余额:",money.value)
|
[
"466940759@qq.com"
] |
466940759@qq.com
|
4621879f40c0fc877a782f9a6e4748cf4c1db4cc
|
077a17b286bdd6c427c325f196eb6e16b30c257e
|
/00_BofVar-unit-tests/05_32/remenissions-work/exploit-BofFunc-28.py
|
a140ada92f16b731fc6c68505ca91b55c8fc744a
|
[] |
no_license
|
KurSh/remenissions_test
|
626daf6e923459b44b82521aa4cb944aad0dbced
|
9dec8085b62a446f7562adfeccf70f8bfcdbb738
|
refs/heads/master
| 2023-07-08T20:25:04.823318
| 2020-10-05T06:45:16
| 2020-10-05T06:45:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 920
|
py
|
from pwn import *
import time
import sys
import signal
import sf
target = process("./chall-test_BofVar-05-x86")
gdb.attach(target, execute="verify_exploit")
bof_payload = sf.BufferOverflow(arch=32)
bof_payload.set_input_start(0x66)
bof_payload.add_int32(0x34, 0xdeab)
bof_payload.add_int32(0x30, 0xbeef)
bof_payload.add_int32(0x2c, 0xfacadf)
bof_payload.add_int32(0x28, 0xbef0)
bof_payload.add_int32(0x24, 0xfacade)
bof_payload.add_int32(0x20, 0xdeac)
bof_payload.set_ret(0x8048456)
payload = bof_payload.generate_payload()
target.sendline(payload)
# Exploit Verification starts here 15935728
def handler(signum, frame):
raise Exception("Timed out")
def check_verification_done():
while True:
if os.path.exists("pwned") or os.path.exists("rip"):
sys.exit(0)
signal.signal(signal.SIGALRM, handler)
signal.alarm(2)
try:
while True:
check_verification_done()
except Exception:
print("Exploit timed out")
|
[
"ryancmeinke@gmail.com"
] |
ryancmeinke@gmail.com
|
4ce830497d5d62e49c120012bdfb1463222c1714
|
0b406d2c041c76d9ef8789539e0e3af9a50e3613
|
/Extract_refactor/Ocr/lib/BusinessImagen.py
|
1efc3e6dc179167f58ae293994b0d864ef57c812
|
[] |
no_license
|
aise17/ExtractPdf
|
221b47c5f0e75a823284b4f52981917962042592
|
7e1bfbc759cb7473d727574e5df78eaaac9fa8a4
|
refs/heads/master
| 2022-02-26T06:39:14.265795
| 2019-06-04T15:01:39
| 2019-06-04T15:01:39
| 184,154,301
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,186
|
py
|
import glob
# de aki cojeremos la calse Image de PIL que es necesaria para pasarsela a tresseract
from PIL import Image
# con treseract extaeremos eltexto de la imagen png pasada por escala de grises
import pytesseract
# se usea OpenCv para aplicar escala de grises sobre imagen
import cv2
# usamos sistema para crear una imagen/archivo temporal y eliminarla por su PID
import os
from Ocr.Conf.Config import configuracion
from Extract_refactor.settings import MEDIA_URL , IMAGENES_PATH, JPG_PATH
class BusinessImagen():
def configurarEscalaDeGrisesDefecto(self,image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return gray
def configurarEscalaDeGrisesBlur(self, image):
gray = cv2.medianBlur(self.configurarEscalaDeGrisesDefecto(image), 3)
return gray
def configurarEscalaDeGrisesThresh(self, image):
gray = cv2.threshold(self.configurarEscalaDeGrisesDefecto(image), 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
return gray
def configuracionEscalaDeColoresThresBinary(self, image):
gray = cv2.adaptiveThreshold(self.configurarEscalaDeGrisesDefecto(image), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY, 11, 2)
return gray
def contarImagenesGeneradas(self):
"""
cuenta el numero de archivos acabados en jpg que existen en la carpeta output
:return: numero de imagenes
"""
number_of_images = len(glob.glob( JPG_PATH + configuracion['rutas_de_todas_imagenes']))
print(" [+] NUMERO DE PAGINAS -> " + number_of_images.__str__())
return number_of_images
def cargaImagen (self, count):
"""
carga las imagenes generadas aprtir del pdf
:param count: para iterar entreimagenessi son mas de una
:return: debulve una imagen tipo OpenCV
"""
if os.path.exists(JPG_PATH + 'image_name.jpg'):
image = cv2.imread(JPG_PATH + 'image_name.jpg')
else:
image = cv2.imread(JPG_PATH + 'image_name-' + count.__str__() + '.jpg')
return image
def aplicarEcalaDeGrises(self, gray):
"""
escribimos la imagen procesada(en escala de grises) en el disco como una imagen/fichero temporal,
y sobre este aplicamos openCV
:param gray:
:return: ruta filename temporal creado
"""
filename = "{}.jpg".format(os.getpid())
cv2.imwrite(filename, gray)
#cv2.imshow('output', gray)
#cv2.waitKey(0)
return filename
def aplicarORC(self, filename):
"""
cargamos la imagen con la variable tipo Image de PIL/Pillow,
y se aplica el ORC
:param filename: ruta de imagen temporal
:return: str texto extraido de imagen con tresseract-orc
"""
text = pytesseract.image_to_string(Image.open(filename))
# y eliminamos la imagen temporal
os.remove(filename)
return text
def borrarImagenesCreadas(self):
for imagen in glob.glob(JPG_PATH +configuracion['rutas_de_todas_imagenes']):
os.remove(imagen)
|
[
"sergio.martinez-g@hotmail.com"
] |
sergio.martinez-g@hotmail.com
|
bbefdb7f7b0c7af6bc87b4dd1079b37651970d18
|
8fc653ed827dc185cc92196826c94e01c0a532c4
|
/setup.py
|
90e957663dd75dd44cbb3931a9637eb2385ae74f
|
[
"MIT"
] |
permissive
|
Deepakdubey90/prometheus_flask_exporter
|
825dd1d1b8f4d81c7c78d9b9d2107b17f673748d
|
ae613e94573a28c72ef19dcd1372cc52fa07b5a7
|
refs/heads/master
| 2020-04-08T19:36:43.364482
| 2018-11-13T11:21:35
| 2018-11-13T11:21:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,361
|
py
|
from setuptools import setup
with open('README.md') as readme:
long_description = readme.read()
setup(
name='prometheus_flask_exporter',
packages=['prometheus_flask_exporter'],
version='0.4.0',
description='Prometheus metrics exporter for Flask',
long_description=long_description,
long_description_content_type='text/markdown',
license='MIT',
author='Viktor Adam',
author_email='rycus86@gmail.com',
url='https://github.com/rycus86/prometheus_flask_exporter',
download_url='https://github.com/rycus86/prometheus_flask_exporter/archive/0.4.0.tar.gz',
keywords=['prometheus', 'flask', 'monitoring', 'exporter'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
install_requires=['prometheus_client', 'flask'],
)
|
[
"rycus86@gmail.com"
] |
rycus86@gmail.com
|
e26e6b3fb181566bc0a2c8b7ecbf18f53dc42fef
|
e27333261b8e579564016c71d2061cc33972a8b8
|
/development_codes/Backend/.history/BERTimplementation_20210810215300.py
|
e2cbc16e68d4af0fc027546c697ad4f250821dcd
|
[] |
no_license
|
Dustyik/NewsTweet_InformationRetrieval
|
882e63dd20bc9101cbf48afa6c3302febf1989b1
|
d9a6d92b51c288f5bcd21ea1cc54772910fa58f7
|
refs/heads/master
| 2023-07-01T09:12:53.215563
| 2021-08-12T08:28:33
| 2021-08-12T08:28:33
| 382,780,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,561
|
py
|
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
from tfidfImplementation import *
from utils import QueryParsers
#First segment involves retrieving K documents with tf-idf
#Second segment involves reranking them with a BERT encoder
K = 100
#BERT_MODEL = 'bert-base-nli-mean-tokens'
BERT_MODEL = "paraphrase-multilingual-mpnet-base-v2"
class BERTmodel:
def __init__(self, tweets_data):
self.tweets_data = tweets_data
self.cosineSimilarity = CosineSimilarity(tweets_data, return_size=K)
self.BERT_model = SentenceTransformer(BERT_MODEL)
def tfidf_retrieve_K_tweets(self, article_id, article_title):
topKResults = self.cosineSimilarity.query(query_id=article_id, query_text=article_title)
return topKResults
def return_BERT_query(self, article_id, article_title):
topKResults = self.tfidf_retrieve_K_tweets(article_id, article_title)
queryStemmed = " ".join(QueryParsers(article_title).query)
query_vector_embedding = self.BERT_model.encode(queryStemmed)
topKResults['vector_embedding'] = topKResults.apply(lambda row: self.BERT_model.encode(row.clean_text), axis = 1)
topKResults["BERT_similarity"] = topKResults.apply(lambda row: cosine_similarity(np.array(query_vector_embedding).reshape(1, -1), np.array(row.vector_embedding).reshape(1, -1)).item(), axis = 1)
topKResults.sort_values(by='BERT_similarity',ascending=False,inplace=True)
return topKResults
|
[
"chiayik_tan@mymail.sutd.edu.sg"
] |
chiayik_tan@mymail.sutd.edu.sg
|
fc2adf8051fdd9f97147f4dc02075974b54ab7ac
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03265/s414771268.py
|
b56306c65e375123788a4b9e6db496b3f1d196d4
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
x1, y1, x2, y2 = map(int, input().split())
a = x2 - x1
b = y2 - y1
x3 = x2 - b
y3 = y2 + a
x4 = x3 - a
y4 = y3 - b
print(f'{x3} {y3} {x4} {y4}')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
a22223b26b8737255728236d842d299cecf4eb12
|
d91a0186cec0452a8eb54fd6fabe0ef9e75cd738
|
/chapter_8/exercise_8.16/build_prof_module.py
|
301a4e69f18d34e27417fae0a4179c61e1060dee
|
[] |
no_license
|
MaximZolotukhin/erik_metiz
|
31a6f5146b8bb58b8f04a6b9635b36a67830e52a
|
8afde60aa2bddd6858a5f7a7189169a82bde4322
|
refs/heads/main
| 2023-05-03T07:39:06.731413
| 2021-05-30T19:04:31
| 2021-05-30T19:04:31
| 361,544,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
# Передача неограниченного числа именованных аргументов
def build_profile(firs_name, last_name, **user_info):
"""Строит словарь с информацией о пользователе"""
user_info['firs_name'] = firs_name
user_info['last_name'] = last_name
return user_info
|
[
"ScorpionEvil777@gmail.com"
] |
ScorpionEvil777@gmail.com
|
407d88f8bd05212b374fa5dd67bf6d0bcceb9d30
|
4c5d113b19bf8d55d2d94fe7dc08fd90e0152174
|
/thor/constants.py
|
1787df00d88af014488203ee7972eb248d049a43
|
[
"BSD-3-Clause"
] |
permissive
|
swipswaps/thor
|
f4b2b956fbd71c3fa4a84d457ff67f158d9e9c21
|
d3d1dcbe86f67a62c90b4cde3fc577e414825cf2
|
refs/heads/master
| 2023-04-05T11:48:31.884619
| 2021-02-12T19:38:23
| 2021-02-12T19:38:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,014
|
py
|
import numpy as np
class Constants:
# km to au
KM_P_AU = 149597870.700
# seconds to days
S_P_DAY = 86400.0
# Speed of light: AU per day (173.14463267424034) (299792.458 km/s -- DE431/DE430)
C = 299792.458 / KM_P_AU * S_P_DAY
# Gravitational constant: AU**3 / M_sun / d**2 (0.295912208285591100E-3 -- DE431/DE430)
G = 0.295912208285591100E-3
# Solar Mass: M_sun (1.0)
M_SUN = 1.0
# Earth Equatorial Radius: km (6378.1363 km -- DE431/DE430)
R_EARTH = 6378.1363 / KM_P_AU
# Mean Obliquity at J2000: radians (84381.448 arcseconds -- DE431/DE430)
OBLIQUITY = 84381.448 * np.pi / (180.0 * 3600.0)
# Transformation matrix from Equatorial J2000 to Ecliptic J2000
TRANSFORM_EQ2EC = np.array([
[1, 0, 0],
[0, np.cos(OBLIQUITY), np.sin(OBLIQUITY)],
[0, -np.sin(OBLIQUITY), np.cos(OBLIQUITY)
]])
# Transformation matrix from Ecliptic J2000 to Equatorial J2000
TRANSFORM_EC2EQ = TRANSFORM_EQ2EC.T
|
[
"moeyensj@gmail.com"
] |
moeyensj@gmail.com
|
2dd3b4102b27ed2302263574898acc47681bba6c
|
43eb7f8581a8dbfa1298b4e6d84fc7b7a552e335
|
/python/kserve/test/test_v1beta1_explainer_config.py
|
462733a4fe7179cd230f7ac00ba92e086e952e08
|
[
"Apache-2.0"
] |
permissive
|
Suresh-Nakkeran/kserve
|
c2d114f7258a70b4c8ddeb8ee8c584d4eee0f81b
|
d3910e0fc6af4bf73156a53bd912d6e4acc87533
|
refs/heads/master
| 2023-07-29T00:17:28.900100
| 2021-09-11T08:04:54
| 2021-09-11T08:04:54
| 406,243,335
| 0
| 0
|
Apache-2.0
| 2021-09-14T05:59:05
| 2021-09-14T05:59:04
| null |
UTF-8
|
Python
| false
| false
| 2,073
|
py
|
# Copyright 2020 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KServe
Python SDK for KServe # noqa: E501
The version of the OpenAPI document: v0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kserve
from kserve.models.v1beta1_explainer_config import V1beta1ExplainerConfig # noqa: E501
from kserve.rest import ApiException
class TestV1beta1ExplainerConfig(unittest.TestCase):
"""V1beta1ExplainerConfig unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test V1beta1ExplainerConfig
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kserve.models.v1beta1_explainer_config.V1beta1ExplainerConfig() # noqa: E501
if include_optional :
return V1beta1ExplainerConfig(
default_image_version = '0',
image = '0'
)
else :
return V1beta1ExplainerConfig(
default_image_version = '0',
image = '0',
)
def testV1beta1ExplainerConfig(self):
"""Test V1beta1ExplainerConfig"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
Suresh-Nakkeran.noreply@github.com
|
6331b3fa3fff6a07d5467b20340d9d1d30e4fe9b
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/instances/nistData/atomic/unsignedByte/Schema+Instance/NISTXML-SV-IV-atomic-unsignedByte-minExclusive-4-2.py
|
ad0db34b2b85a1754a789abff5f0a8024ff7d358
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 302
|
py
|
from output.models.nist_data.atomic.unsigned_byte.schema_instance.nistschema_sv_iv_atomic_unsigned_byte_min_exclusive_4_xsd.nistschema_sv_iv_atomic_unsigned_byte_min_exclusive_4 import NistschemaSvIvAtomicUnsignedByteMinExclusive4
obj = NistschemaSvIvAtomicUnsignedByteMinExclusive4(
value=218
)
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
7a92b6df84eaf024ce5f084a8fbb89c734db180b
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_229/ch74_2020_04_13_03_09_30_010178.py
|
593012b482da142f910dee4af2f450b410ca8283
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
def conta_bigramas(string):
dic = dict()
for i in string and x in string:
if i+x in string:
if i+x in dic:
dic[i+x] += 1
else:
dic[i+x] = 1
return dic
print(conta_bigramas("banana nanica"))
|
[
"you@example.com"
] |
you@example.com
|
424a7ef7d48763ef6c952dd34adea36b3238cc13
|
23f3349e8b50f0cb3e461bbd65c1ea8dec792d0b
|
/2_semestr/encryption_1.py
|
beb9439133bbf54535c99776ead5297b83c186b0
|
[] |
no_license
|
JacobLutin/Python_bmstu
|
d17866dbab0e74f0f9d600c4dbd9d53eb5c5b7be
|
66fd8679de7556978b9cd1e9fd8646a8d7d6daa8
|
refs/heads/master
| 2020-05-29T14:40:09.310602
| 2017-03-27T05:18:58
| 2017-03-27T05:18:58
| 64,742,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,367
|
py
|
def rotate(Tb,N):
for i in range(0,N//2):
for j in range(i,N-1-i):
tmp = Tb[i][j]
Tb[i][j] = Tb[n-j-1][i]
Tb[n-j-1][i] = Tb[n-i-1][n-j-1]
Tb[n-i-1][n-j-1] = Tb[j][n-i-1]
Tb[j][n-i-1] = tmp
return Tb
def PrintTable(Tb,N):
for i in range(N):
for j in range(N):
print(Tb[i][j], end="\t")
print()
print()
def shifravanie(mask,out,sent,k):
for i in range(n):
for j in range(n):
if (mask[i][j] == 1):
out[i][j] = sent[k]
k += 1
return out,k
"""
def decimalToBinary(num):
while num > 1:
num = num // 2
def decode(arr, n):
matrix = []
for i in arr:
"""
import random
n = int(input("Введите размер шифровальной таблицы: "))
Table = []
for i in range(n):
Table.append([])
for j in range(n):
Table[i].append(0)
c = 1
for i in range(n // 2):
for j in range(n // 2):
Table[i][j] = c
Table[j][n - i - 1] = c
Table[n - i - 1][n - j - 1] = c
Table[n - j - 1][i] = c
c = c + 1
c = c - 1
block = []
while (len(block) != (n // 2) ** 2):
i = random.randint(0, n - 1)
j = random.randint(0, n - 1)
flag = True
for k in range(len(block)):
if (Table[i][j] == -1) or (Table[i][j] == block[k]):
flag = False
break
if (flag == True):
block.append(Table[i][j])
Table[i][j] = -1
for i in range(n):
for j in range(n):
if (Table[i][j] != -1):
Table[i][j] = 0
else:
Table[i][j] = 1
PrintTable(Table,n)
key = []
for i in range(n):
m = 0
for j in range(n):
m += Table[i][j] * 2 ** (n - j - 1)
key.append(m)
print()
sentense = "серегаяобьяснютебекакэтоработает"
print(len(sentense))
k = 0
Shifr = []
for i in range(n):
Shifr.append([])
for j in range(n):
Shifr[i].append(0)
Shifr,k=shifravanie(Table,Shifr,sentense,k)
Table=rotate(Table,n)
Shifr,k=shifravanie(Table,Shifr,sentense,k)
Table=rotate(Table,n)
Shifr,k=shifravanie(Table,Shifr,sentense,k)
Table=rotate(Table,n)
Shifr,k=shifravanie(Table,Shifr,sentense,k)
Table=rotate(Table,n)
PrintTable(Table,n)
PrintTable(Shifr,n)
print()
print(key)
|
[
"jacoblutin@gmail.com"
] |
jacoblutin@gmail.com
|
0fbe0e50be80a501c819d446dd212e8cea341c8d
|
8862d671654ed336f1de2895323e4cf76f0855d7
|
/syft/mpc/utils.py
|
3a4a7d9b250c7487e661dab920df36ddfed39296
|
[
"Apache-2.0"
] |
permissive
|
Vinohith/PySyft
|
f0f29b000af586faca88756533079a4bfea17ff1
|
1921efeeda2c7b0bf93f17a33ddf59f8020fa653
|
refs/heads/master
| 2020-03-31T14:24:27.596173
| 2018-10-09T16:03:43
| 2018-10-09T16:03:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 893
|
py
|
cache = {}
def egcd(a, b):
"""
greatest common denominator
:param a:
:param b:
:return:
"""
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(a, m):
"""
calculate the multiplicative inverse of a modulus m such that
(x * result) % m == (x / a)
for any integer between 0 and m
:param a: the number we wish to divide by
:param m: the size of the modular field
:return: the number we can multiply by to actually divide by a
"""
if(a in cache):
sub_cache = cache[a]
if m in sub_cache:
return sub_cache[m]
else:
cache[a] = {}
g, x, y = egcd(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
result = x % m
cache[a][m] = result
return result
|
[
"liamtrask@gmail.com"
] |
liamtrask@gmail.com
|
9e77404a90a4c116a9c72e7dd494b94705ede353
|
ffc02736617d5bb4308427b3df5e43811601cea0
|
/examples/run_curl.py
|
b1dfee4e9ef691aaa9a75caf82946d6d611dde11
|
[
"MIT"
] |
permissive
|
weihancool/tf2rl
|
4315dd94f8f924f15085f26a9434f6824aa3736c
|
0ef45d4a32a177f14fb579c9c2332f71404a9595
|
refs/heads/master
| 2023-05-02T17:00:05.775610
| 2021-05-29T03:32:52
| 2021-05-29T03:32:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,618
|
py
|
import dmc2gym
from tf2rl.algos.curl_sac import CURLSAC
from tf2rl.envs.dmc_wrapper import DMCWrapper
from tf2rl.experiments.trainer import Trainer
def main():
dm_envs = {
'finger': ['finger', 'spin', 2],
'cartpole': ['cartpole', 'balance', 4],
'reacher': ['reacher', 'easy', 4],
'cheetah': ['cheetah', 'run', 4],
'walker': ['walker', 'walk', 2],
'ball': ['ball_in_cup', 'catch', 4],
'humanoid': ['humanoid', 'stand', 4],
'bring_ball': ['manipulator', 'bring_ball', 4],
'bring_peg': ['manipulator', 'bring_peg', 4],
'insert_ball': ['manipulator', 'insert_ball', 4],
'insert_peg': ['manipulator', 'insert_peg', 4]}
parser = Trainer.get_argument()
parser = CURLSAC.get_argument(parser)
parser.add_argument('--env-name', type=str, default="cartpole", choices=dm_envs.keys())
parser.add_argument('--seed', type=int, default=1)
parser.set_defaults(batch_size=256)
parser.set_defaults(n_warmup=10000)
parser.set_defaults(max_steps=3e6)
parser.set_defaults(save_summary_interval=100)
args = parser.parse_args()
domain_name, task_name, action_repeat = dm_envs[args.env_name]
original_obs_shape = (100, 100, 9)
input_obs_shape = (84, 84, 9)
def make_env():
return DMCWrapper(
dmc2gym.make(
domain_name=domain_name,
task_name=task_name,
seed=args.seed,
visualize_reward=False,
from_pixels=True,
height=100,
width=100,
frame_skip=action_repeat,
channels_first=False),
obs_shape=original_obs_shape,
k=3,
channel_first=False)
env = make_env()
test_env = make_env()
# see Table 3 of CURL paper
lr_sac = lr_curl = 2e-4 if args.env_name == "cheetah" else 1e-3
policy = CURLSAC(
obs_shape=input_obs_shape,
action_dim=env.action_space.high.size,
gpu=args.gpu,
memory_capacity=int(1e5),
n_warmup=int(1e3),
max_action=env.action_space.high[0],
batch_size=512,
actor_units=(1024, 1024),
critic_units=(1024, 1024),
lr_sac=lr_sac,
lr_curl=lr_curl,
lr_alpha=1e-4,
tau=0.01,
init_temperature=0.1,
auto_alpha=True,
stop_q_grad=args.stop_q_grad)
trainer = Trainer(policy, env, args, test_env=test_env)
if args.evaluate:
trainer.evaluate_policy_continuously()
else:
trainer()
if __name__ == "__main__":
main()
|
[
"dev.ohtakei@gmail.com"
] |
dev.ohtakei@gmail.com
|
ea79b41539481557905fbd47f8df65758118b68d
|
96ec8ea87fb2cfdd2d850a0471c9820f92152847
|
/九章算法/递归/Fibonacci easy.py
|
ed586d5e4c2c29806c786838bc6a997aa82f867e
|
[] |
no_license
|
bitterengsci/algorithm
|
ae0b9159fd21cc30c9865f981f9c18cf9c6898d7
|
bf70d038b70c51edc6ddd6bfef1720fb5f9f2567
|
refs/heads/master
| 2023-08-10T10:22:18.774232
| 2023-07-31T21:04:11
| 2023-07-31T21:04:11
| 186,261,880
| 95
| 46
| null | 2023-07-31T21:04:12
| 2019-05-12T13:57:27
|
Python
|
UTF-8
|
Python
| false
| false
| 323
|
py
|
from functools import lru_cache
class Solution:
"""
@param n: an integer
@return: an integer f(n)
"""
@lru_cache(maxsize=10000)
def fibonacci(self, n):
if n == 1:
return 0
elif n in [2, 3]:
return 1
return self.fibonacci(n - 1) + self.fibonacci(n - 2)
|
[
"yanran2012@gmail.com"
] |
yanran2012@gmail.com
|
d7366dbf8bbfbc57e036cf38cc8c864998245935
|
97a192ac8a3feca408bb3f0ad746a8004e6bfcb7
|
/to_do_list/venv/bin/pip3.6
|
e29e4a64c8ccf31bea1aab4c20a66f68917835e0
|
[
"MIT",
"Python-2.0"
] |
permissive
|
DitooAZ/Python-Games
|
e46aed297c2e2ab2a5ca9869241a711d2e15f6e2
|
587cb499c57437acbe052d9eb5fb8d48272735e9
|
refs/heads/master
| 2023-03-21T00:34:28.405176
| 2021-01-30T15:25:22
| 2021-01-30T15:25:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
6
|
#!/home/afroz/PycharmProjects/to_do_list/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.6'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.6')()
)
|
[
"aaaanchakure@gmail.com"
] |
aaaanchakure@gmail.com
|
a547055c3a6c7346b003d025b49edfef8cd3e2b8
|
329d8c2e0a259ad4880774c84ca4f6b28bbb641c
|
/bangali_19/configs/augs/v1.py
|
2ccdd35a4f447281decfd6fdc7264984bca77b04
|
[] |
no_license
|
dodler/kgl
|
1d3eeb6032b74afb761abe5fa8620325594d5a75
|
b17525299e98d41da6f6631bd796084097e8a94e
|
refs/heads/master
| 2021-06-24T17:26:38.683242
| 2021-06-20T08:14:23
| 2021-06-20T08:14:23
| 192,477,506
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
config = {
"arch": "multi-head",
"backbone": "se_resnext50_32x4d",
"pretrained": True,
"in-bn": True,
'opt': 'sgd',
'loss_aggregate_fn': 'mean',
'schedule': 'cosine_annealing_warm_restarts',
'T_0': 6,
'lr': 0.1,
'train_aug': 'augmentations.geom.v1',
'valid_aug': 'augmentations.geom.v0',
}
|
[
"tntlagf93@mail.ru"
] |
tntlagf93@mail.ru
|
f26abbd82e77317f1c957bdf7e3267f5c65f2d83
|
833e69b32f9bf9f9ac746ac46851cedae6366e63
|
/hta_expense_management/models/expense_request.py
|
415674635f4aa5001ca3aadb44016a0887ff5dc5
|
[] |
no_license
|
halltech-ci/odoo_15
|
2984d3ac5dbd446f2fb8ef49dd37ea53e71a0f71
|
8e587e38535ccf8fa10fd42be1bc75d957e63311
|
refs/heads/main
| 2023-08-23T14:48:10.712042
| 2021-11-06T20:14:46
| 2021-11-06T20:14:46
| 414,481,467
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,209
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from odoo.exceptions import UserError, ValidationError
class ExpenseRequest(models.Model):
_name = 'expense.request'
_description = 'Custom expense request'
_inherit = ['mail.thread', 'mail.activity.mixin']
_order = 'date desc'
@api.model
def _default_employee_id(self):
return self.env.user.employee_id
@api.model
def _get_default_requested_by(self):
return self.env['res.users'].browse(self.env.uid)
def _get_default_name(self):
return self.env["ir.sequence"].next_by_code("expense.request.code")
name = fields.Char(default=_get_default_name, readonly=True)
description = fields.Char('Description', required=True)
state = fields.Selection(selection=[
('draft', 'Draft'),
('submit', 'Submitted'),
('validate', 'Validate'),
('to_approve', 'To Approve'),
('approve', 'Approved'),
('post', 'Paid'),
('refuse', 'Refused'),
('cancel', 'Cancelled')
], string='Status', index=True, readonly=True, tracking=True, copy=False, default='draft', required=True, help='Expense Report State')
|
[
"maurice.atche@halltech-africa.com"
] |
maurice.atche@halltech-africa.com
|
e97f08ac73a48b46ed2e88f1aa02f9e54c37b37e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03549/s657113138.py
|
7bc42c0eb4301b72eea8570f2ec52dbe884e2fce
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
n,m = map(int,input().split())
# 1ケースでかかる時間を求める
total = m*1900 + (n-m)*100
# 全てのケースで正解する確率の分母
prob_all = 2**m
print(total * prob_all)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
aec3ae45025c14557e08868f995361392ecd97e0
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/tools/binary_size/print_trybot_sizes.py
|
3dfbbc52800a48e7e37c53d5d46648cf12e7eaa3
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 5,713
|
py
|
#!/usr/bin/env python3
# Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints android-binary-size result for a given commit or commit range."""
import argparse
import collections
import concurrent.futures
import csv
import json
import os
import posixpath
import re
import subprocess
import sys
# Max number of commits to show when given a range and no -n parameter.
_COMMIT_LIMIT = 200
# Commit ranges where size bot was giving invalid results.
_BAD_COMMIT_RANGES = [
range(1045024, 1045552), # https://crbug.com/1361952
]
_COMMIT_RE = re.compile(r'^commit (?:(?!^commit).)*', re.DOTALL | re.MULTILINE)
_MAIN_FIELDS_RE = re.compile(
r'^commit (\S+).*?'
r'^Date:\s+(.*?)$.*?'
r'^ (\S.*?)$', re.DOTALL | re.MULTILINE)
_REVIEW_RE = re.compile(r'^ Reviewed-on: (\S+)', re.MULTILINE)
_CRREV_RE = re.compile(r'^ Cr-Commit-Position:.*?(\d+)', re.MULTILINE)
_GERRIT_RE = re.compile(r'https://([^/]+)/c/(.*?)/\+/(\d+)')
_CommitInfo = collections.namedtuple(
'_CommitInfo', 'git_hash date subject review_url cr_position')
def _parse_commit(text):
git_hash, date, subject = _MAIN_FIELDS_RE.match(text).groups()
review_url = ([''] + _REVIEW_RE.findall(text))[-1]
cr_position = int((['0'] + _CRREV_RE.findall(text))[-1])
return _CommitInfo(git_hash, date, subject, review_url, cr_position)
def _git_log(git_log_args):
cmd = ['git', 'log']
if len(git_log_args) == 1 and '..' not in git_log_args[0]:
# Single commit rather than commit range.
cmd += ['-n1']
elif not any(x.startswith('-n') for x in git_log_args):
# Ensure there's a limit on number of commits.
cmd += [f'-n{_COMMIT_LIMIT}']
cmd += git_log_args
log_output = subprocess.check_output(cmd, encoding='utf8')
ret = [_parse_commit(x) for x in _COMMIT_RE.findall(log_output)]
if len(ret) == _COMMIT_LIMIT:
sys.stderr.write(
f'Limiting to {_COMMIT_LIMIT} commits. Use -n## to override\n')
return ret
def _query_size(review_url, internal):
if not review_url:
return '<missing>'
m = _GERRIT_RE.match(review_url)
if not m:
return '<bad URL>'
host, project, change_num = m.groups()
if internal:
project = 'chrome'
builder = 'android-internal-binary-size'
else:
project = 'chromium'
builder = 'android-binary-size'
cmd = ['bb', 'ls', '-json', '-p']
# Request results for all patchsets, assuming fewer than 30.
for patchset in range(1, 30):
cmd += [
'-predicate',
"""{
"builder":{"project":"%s","bucket":"try","builder":"%s"},
"gerrit_changes":[{
"host":"%s","project":"%s",
"change":"%s","patchset":"%d"}
]}""" % (project, builder, host, project, change_num, patchset)
]
result = subprocess.run(cmd,
check=False,
stdout=subprocess.PIPE,
encoding='utf8')
if result.returncode:
return '<missing>'
# Take the last one that has a size set (output is in reverse order already).
for json_str in result.stdout.splitlines():
try:
obj = json.loads(json_str)
except json.JSONDecodeError:
sys.stderr.write(f'Problem JSON:\n{json_str}\n')
sys.exit(1)
properties = obj.get('output', {}).get('properties', {})
listings = properties.get('binary_size_plugin', {}).get('listings', [])
for listing in listings:
if listing['name'] == 'Android Binary Size':
return listing['delta']
return '<unknown>'
def _maybe_rewrite_crrev(git_log_args):
if len(git_log_args) != 1:
return
values = git_log_args[0].split('..')
if len(values) != 2 or not values[0].isdigit() or not values[1].isdigit():
return
values = [
subprocess.check_output(['git-crrev-parse', v], text=True).rstrip()
for v in values
]
git_log_args[0] = '..'.join(values)
print(f'Converted crrev to commits: {git_log_args[0]}')
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--csv', action='store_true', help='Print as CSV')
parser.add_argument('--internal',
action='store_true',
help='Query android-internal-binary-size (Googlers only)')
args, git_log_args = parser.parse_known_args()
# Ensure user has authenticated.
result = subprocess.run(['bb', 'auth-info'],
check=False,
stdout=subprocess.DEVNULL)
if result.returncode:
sys.stderr.write('First run: bb auth-login\n')
sys.exit(1)
_maybe_rewrite_crrev(git_log_args)
commit_infos = _git_log(git_log_args)
if not commit_infos:
sys.stderr.write('Did not find any commits.\n')
sys.exit(1)
print(f'Fetching bot results for {len(commit_infos)} commits...')
if args.csv:
print_func = csv.writer(sys.stdout).writerow
else:
print_func = lambda v: print('{:<12}{:14}{:12}{:32}{}'.format(*v))
print_func(('Commit #', 'Git Hash', 'Size', 'Date', 'Subject'))
num_bad_commits = 0
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as pool:
sizes = [
pool.submit(_query_size, info.review_url, args.internal)
for info in commit_infos
]
for info, size in zip(commit_infos, sizes):
if any(info.cr_position in r for r in _BAD_COMMIT_RANGES):
num_bad_commits += 1
size_str = size.result().replace(' bytes', '').lstrip('+')
crrev_str = info.cr_position or ''
print_func(
(crrev_str, info.git_hash[:12], size_str, info.date, info.subject))
if num_bad_commits:
print(f'Includes {num_bad_commits} commits from known bad revision range.')
if __name__ == '__main__':
main()
|
[
"chromium-scoped@luci-project-accounts.iam.gserviceaccount.com"
] |
chromium-scoped@luci-project-accounts.iam.gserviceaccount.com
|
9b22cfd35b76e0c6d5d9df02c069057c37a0a9c7
|
51d8f003828d6ee6e6611f0e133b1e35cf400601
|
/dnekcab-eciovni/invoice_api/core/migrations/0005_auto_20180831_1728.py
|
6c7e489ba33b303a972e1f95773abfe4a6348581
|
[] |
no_license
|
tatubola/xpto
|
23b5f7a42c13c7d39eb321e52b9b4b2d1ef76c4c
|
6ed8cec23b06bccb1edf57e6b67af017f9a162d3
|
refs/heads/master
| 2020-04-02T11:05:24.560009
| 2018-10-23T17:41:10
| 2018-10-23T17:41:10
| 154,370,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 697
|
py
|
# Generated by Django 2.0.8 on 2018-08-31 17:28
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20180827_1752'),
]
operations = [
migrations.AlterField(
model_name='fatura',
name='boleto_url',
field=models.CharField(blank=True, error_messages={'invalid': 'Insira uma url válida.'}, max_length=255, null=True, validators=[django.core.validators.URLValidator()]),
),
migrations.AlterField(
model_name='servico',
name='data_expiracao',
field=models.DateField(),
),
]
|
[
"dmoniz@nic.br"
] |
dmoniz@nic.br
|
210e2ef67d9dbcad596f1621a8653073ca4e2646
|
8168caa4ae066940dfedd788eeb107c5f65532ef
|
/node_modules/jest-haste-map/node_modules/fsevents/build/config.gypi
|
77c829ebd83516aa0e33f98d86df08a9e4e5fdef
|
[
"MIT"
] |
permissive
|
muzamilnazir/keeper
|
099a922068e028ca51b14c9bf85135fc2a509cf1
|
ade15bd80b95f31e640378db8a3ed9a1a2a4ea86
|
refs/heads/main
| 2023-06-18T18:28:01.293667
| 2021-07-07T07:17:52
| 2021-07-07T07:17:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,702
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt68l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "68",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "8",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/mac/Library/Caches/node-gyp/14.17.0",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/zsh",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/mac/.npm-init.js",
"userconfig": "/Users/mac/.npmrc",
"cidr": "",
"node_version": "14.17.0",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/mac/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.13 node/v14.17.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/cn/tv09tq6n7r10g948dc0phgyh0000gn/T",
"unsafe_perm": "true",
"format_package_lock": "true",
"link": "",
"prefix": "/usr/local"
}
}
|
[
"mac@Macs-MacBook-Pro.local"
] |
mac@Macs-MacBook-Pro.local
|
955597662f9fa120479a85f48ef72fc1c51a6ba7
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/WebMirror/management/rss_parser_funcs/feed_parse_extractSadhoovysinhumantranslationsWordpressCom.py
|
76bd4a048313ef6e663c631631bd11116a32297d
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051
| 2023-08-26T16:08:46
| 2023-08-26T16:08:46
| 39,611,770
| 207
| 20
|
BSD-3-Clause
| 2023-09-11T15:48:15
| 2015-07-24T04:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 596
|
py
|
def extractSadhoovysinhumantranslationsWordpressCom(item):
'''
Parser for 'sadhoovysinhumantranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
[
"something@fake-url.com"
] |
something@fake-url.com
|
e6b1a7a5d7f6c2ea08dcd31845e29efc0e8c606f
|
020fed990dcab7417f82bde82f19d6beae58b06f
|
/ethiostockdemo/ethiostockdemo/urls.py
|
af60814eaf5fbc6f4ca37c6961e8330427d55c92
|
[] |
no_license
|
TsiyonW/pracdjango
|
2cb27522bf201543eb262e060f70a765d59236e3
|
ef6d319fda2cde3d3c07b9e0162e30a6153cce5e
|
refs/heads/master
| 2021-01-16T09:06:21.008123
| 2020-02-25T20:37:16
| 2020-02-25T20:37:16
| 243,053,765
| 0
| 0
| null | 2020-02-25T20:37:17
| 2020-02-25T17:07:47
|
Python
|
UTF-8
|
Python
| false
| false
| 925
|
py
|
"""ethiostockdemo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.views.decorators.csrf import csrf_exempt
from graphene_django.views import GraphQLView
urlpatterns = [
path('admin/', admin.site.urls),
path('graphql/', csrf_exempt(GraphQLView.as_view(graphiql=True))),
]
|
[
"="
] |
=
|
ff1d767a12e4fd97828963a44224b8e3926cfc52
|
34ed92a9593746ccbcb1a02630be1370e8524f98
|
/lib/pints/pints/tests/test_toy_rosenbrock.py
|
b22f4651ddea441cae2d604c240719ec522216c8
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
HOLL95/Cytochrome_SV
|
87b7a680ed59681230f79e1de617621680ea0fa0
|
d02b3469f3ee5a4c85d756053bc87651093abea1
|
refs/heads/master
| 2022-08-01T05:58:16.161510
| 2021-02-01T16:09:31
| 2021-02-01T16:09:31
| 249,424,867
| 0
| 0
| null | 2022-06-22T04:09:11
| 2020-03-23T12:29:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,807
|
py
|
#!/usr/bin/env python3
#
# Tests the Rosenbrock toy problems.
#
# This file is part of PINTS.
# Copyright (c) 2017-2018, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
import pints
import pints.toy
import unittest
import numpy as np
class TestRosenbrock(unittest.TestCase):
"""
Tests the Rosenbrock toy problems.
"""
def test_error(self):
f = pints.toy.RosenbrockError()
self.assertEqual(f.n_parameters(), 2)
fx = f([10, 10])
self.assertTrue(np.isscalar(fx))
self.assertEqual(fx, 810081)
xopt = f.optimum()
fopt = f(xopt)
self.assertEqual(fopt, 0)
np.random.seed(1)
for x in np.random.uniform(-5, 5, size=(10, 2)):
self.assertTrue(f(x) > fopt)
def test_log_pdf(self):
f = pints.toy.RosenbrockLogPDF()
self.assertEqual(f.n_parameters(), 2)
fx = f([0.5, 6.0])
self.assertTrue(np.isscalar(fx))
self.assertAlmostEqual(fx, np.log(1.0 / 3307.5))
xopt = f.optimum()
fopt = f(xopt)
self.assertEqual(fopt, 0)
# sensitivity test
l, dl = f.evaluateS1([3, 4])
self.assertEqual(l, -np.log(2505))
self.assertEqual(len(dl), 2)
self.assertEqual(dl[0], float(-6004.0 / 2505.0))
self.assertEqual(dl[1], float(200.0 / 501.0))
# suggested bounds and distance measure
bounds = f.suggested_bounds()
bounds = [[-2, 4], [-1, 12]]
bounds = np.transpose(bounds).tolist()
self.assertTrue(np.array_equal(bounds, f.suggested_bounds()))
x = np.ones((100, 3))
self.assertRaises(ValueError, f.distance, x)
x = np.ones((100, 3, 2))
self.assertRaises(ValueError, f.distance, x)
# there is no simple way to generate samples from Rosenbrock
nsamples = 10000
g = pints.toy.GaussianLogPDF([1, 1], [1, 1])
samples = g.sample(nsamples)
self.assertTrue(f.distance(samples) > 0)
x = np.ones((100, 3))
self.assertRaises(ValueError, f.distance, x)
x = np.ones((100, 2, 2))
self.assertRaises(ValueError, f.distance, x)
# generate samples with mean and variance closer to true values
g1 = pints.toy.GaussianLogPDF([0.86935785, 2.59978086],
[[1.80537968, 2.70257559],
[2.70257559, 8.52658308]])
samples1 = g1.sample(nsamples)
self.assertTrue(f.distance(samples1) > 0)
self.assertTrue(f.distance(samples) > f.distance(samples1))
if __name__ == '__main__':
print('Add -v for more debug output')
import sys
if '-v' in sys.argv:
debug = True
unittest.main()
|
[
"henney@localhost.localdomain"
] |
henney@localhost.localdomain
|
465623bff38a425e2f71d5f8761761b68aabd562
|
51a37b7108f2f69a1377d98f714711af3c32d0df
|
/src/leetcode/P968.py
|
adec6da448f1dd1be7449761ba15ed897b6927e1
|
[] |
no_license
|
stupidchen/leetcode
|
1dd2683ba4b1c0382e9263547d6c623e4979a806
|
72d172ea25777980a49439042dbc39448fcad73d
|
refs/heads/master
| 2022-03-14T21:15:47.263954
| 2022-02-27T15:33:15
| 2022-02-27T15:33:15
| 55,680,865
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,925
|
py
|
# Definition for a binary tree node.
from functools import lru_cache
INF = 0xffffffff
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def minCameraCover(self, root):
"""
:type root: TreeNode
:rtype: int
"""
@lru_cache(maxsize=None)
def solve(node, t):
if node is None:
return 0
if node.left is None and node.right is None:
if t == 1:
return INF
if t == 0:
return 1
if t == -1:
return 0
l = []
for i in range(3):
l.append(solve(node.left, i - 1))
r = []
for i in range(3):
r.append(solve(node.right, i - 1))
if t == -1:
if node.left is not None and node.right is not None:
return min(min(l[2], l[1]) + min(r[2], r[1]), min(r[2], r[1]) + min(l[2], l[1]))
if node.left is not None:
return min(l[2], l[1])
else:
return min(r[2], r[1])
if t == 0:
return 1 + min(l) + min(r)
if t == 1:
if node.left is not None and node.right is not None:
return min(l[1] + min(r[2], r[1]), r[1] + min(l[2], l[1]))
if node.left is not None:
return l[1]
else:
return r[1]
return min(solve(root, 0), solve(root, 1))
if __name__ == '__main__':
node = TreeNode(1)
node.left = TreeNode(2)
node.left.left = TreeNode(3)
node.left.left.left = TreeNode(4)
node.left.left.left.left = TreeNode(5)
node.left.left.left.left.left = TreeNode(6)
print(Solution().minCameraCover(node))
|
[
"stupidchen@foxmail.com"
] |
stupidchen@foxmail.com
|
e645f221824750ae1df6e085b30a4d11d74f99d1
|
5ed21f38903512ff931cb0527fc0a651a1572127
|
/dag1/live_koding/gjettelek.py
|
b593a9791de1a0e0394b71192b55604fdfbfa7a4
|
[] |
no_license
|
kodeskolen/tekna_agder_h20_2
|
6eb52a21fa2425b82cb88108686cce0079ac71ab
|
16e869ad48b2411b3f2b133f3adbb382863a744d
|
refs/heads/main
| 2023-01-13T16:35:06.870744
| 2020-11-20T13:24:49
| 2020-11-20T13:24:49
| 310,009,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 838
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 12 19:28:47 2020
@author: Marie
"""
# Vi "tenker" på et tall mellom 0 og 100
# Spilleren gjetter et tall
# Dersom gjettet er rett så er spillet over
# Dersom gjettet er for lavt eller høyt, får spilleren beskjed
# Spilleren får gjette på nytt
# Spilleren har begrenset med gjett
from random import randint
riktig_tall = randint(0, 100)
maks_forsøk = 3
gjett = int(input("Gjett et tall mellom 0 og 100: "))
forsøk = 1
while gjett != riktig_tall and forsøk < maks_forsøk:
if gjett < riktig_tall:
print("For lavt!")
else:
print("For høyt!")
gjett = int(input("Gjett igjen: "))
forsøk += 1
if riktig_tall == gjett:
print("Det var riktig! :D")
else:
print(f"Du har brukt opp dine {maks_forsøk} forsøk!")
print("GAME OVER!!")
|
[
"roald.marie@gmail.com"
] |
roald.marie@gmail.com
|
b8e423e679443fc6f005a3baffb6c63992fc9384
|
98420fdd66b8dce46ef88cd34fcace36777fa232
|
/py3/torch_motion_retarget_autoencoder/main_train.py
|
63456fb7a475f29d83de2bf2e04dd5e88bb0e5e9
|
[] |
no_license
|
Daiver/jff
|
f972fe7464f78ba6008a036b697ea3f04b7010a4
|
33d6a781af8d7f6ae60c25e10051977af2fef1b9
|
refs/heads/master
| 2023-04-07T06:33:41.487938
| 2022-05-03T10:07:32
| 2022-05-03T10:07:32
| 12,180,634
| 1
| 1
| null | 2023-04-03T19:25:00
| 2013-08-17T15:03:14
|
C++
|
UTF-8
|
Python
| false
| false
| 3,558
|
py
|
import numpy as np
import cv2
import torch
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import DataLoader
import np_draw_tools
from data_generation import make_rectangle_dataset, make_circle_dataset
from models import Encoder, Decoder
from utils import numpy_images_to_torch
def main():
n_samples_to_generate = 1500
epochs = 50
device = "cuda"
batch_size = 8
circle_set = make_circle_dataset(n_samples_to_generate)
rect_set = make_rectangle_dataset(n_samples_to_generate)
train_rect_set = numpy_images_to_torch(rect_set)
train_circle_set = numpy_images_to_torch(circle_set)
print(f"N rect samples {len(train_rect_set)} N circle samples {len(train_circle_set)}")
train_rect_loader = DataLoader(train_rect_set, batch_size=batch_size, shuffle=True, drop_last=True)
val_rect_loader = DataLoader(train_rect_set, batch_size=batch_size * 16, shuffle=False)
train_circle_loader = DataLoader(train_circle_set, batch_size=batch_size, shuffle=True, drop_last=True)
encoder = Encoder().to(device)
decoder_rect = Decoder().to(device)
decoder_circle = Decoder().to(device)
optimizer = optim.Adam(list(encoder.parameters()) + list(decoder_rect.parameters()) + list(decoder_circle.parameters()), lr=1e-4)
# criterion = nn.MSELoss()
# criterion = nn.L1Loss()
criterion = nn.BCELoss()
for epoch_ind in range(epochs):
losses = []
losses_rect = []
losses_circle = []
encoder.train()
decoder_rect.train()
decoder_circle.train()
for sample_rect, sample_circle in zip(train_rect_loader, train_circle_loader):
sample_rect = sample_rect.to(device)
pred_rect = encoder(sample_rect)
pred_rect = decoder_rect(pred_rect)
loss_rect = criterion(pred_rect, sample_rect)
sample_circle = sample_circle.to(device)
pred_circle = encoder(sample_circle)
pred_circle = decoder_circle(pred_circle)
loss_circle = criterion(pred_circle, sample_circle)
loss = loss_rect + loss_circle
losses.append(loss.item())
losses_rect.append(loss_rect.item())
losses_circle.append(loss_circle.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
encoder.eval()
decoder_rect.eval()
decoder_circle.eval()
print(f"{epoch_ind + 1} / {epochs} loss {np.mean(losses)} loss rect {np.mean(losses_rect)} loss circle {np.mean(losses_circle)}")
for sample_rect in val_rect_loader:
sample_rect = sample_rect.to(device)
pred_rect = encoder(sample_rect)
# pred_rect = decoder_rect(pred_rect)
pred_rect = decoder_circle(pred_rect)
pred_rect = (pred_rect.detach().permute(0, 2, 3, 1).cpu().numpy() * 255).astype(np.uint8)
sample_rect = (sample_rect.detach().permute(0, 2, 3, 1).cpu().numpy() * 255).astype(np.uint8)
to_show = []
for p, s in zip(pred_rect, sample_rect):
to_show.append(p)
to_show.append(s)
to_show = np_draw_tools.make_grid(to_show[:64], 8)
cv2.imshow("", to_show)
cv2.waitKey(100)
break
torch.save(encoder.state_dict(), "encoder.pt")
torch.save(decoder_circle.state_dict(), "decoder_circle.pt")
torch.save(decoder_rect.state_dict(), "decoder_rect.pt")
cv2.waitKey()
if __name__ == '__main__':
main()
|
[
"ra22341@ya.ru"
] |
ra22341@ya.ru
|
db035bcd80d85a97d93d283c7d8154a62e1f3161
|
9d530dc15db600c0630bf7f5141a1277e11d7da6
|
/wagtail_shell/test/urls.py
|
e816acb3672e0c9c3b22078e2ca91f8db34363bd
|
[
"BSD-3-Clause"
] |
permissive
|
kaedroho/wagtail-shell
|
a7b549800a6302d2338d79c5472457662b0d01d3
|
cddab026bc3d647c77eac7e31236b662276698af
|
refs/heads/main
| 2023-04-13T10:55:56.916119
| 2021-04-26T17:12:41
| 2021-04-26T17:13:06
| 312,389,122
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
from django.conf.urls import include, url
from django.contrib import admin
from wagtail.admin import urls as wagtailadmin_urls
from wagtail.documents import urls as wagtaildocs_urls
from wagtail.core import urls as wagtail_urls
urlpatterns = [
url(r"^django-admin/", admin.site.urls),
url(r"^admin/", include(wagtailadmin_urls)),
url(r"^documents/", include(wagtaildocs_urls)),
url(r"", include(wagtail_urls)),
]
|
[
"karl@kaed.uk"
] |
karl@kaed.uk
|
caea99874c479e8fff1f0d8d70f1c26b8bf9f39e
|
344e2956b4e2a30a8ef7532d951f96d995d1dd1e
|
/18_mmaction/lib/mmcv/mmcv/ops/__init__.py
|
b38aff92534ddc32fe7d6ee4eb59383b38c688f7
|
[
"Apache-2.0",
"LGPL-3.0-only",
"MIT",
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause",
"GPL-3.0-only"
] |
permissive
|
karndeepsingh/Monk_Object_Detection
|
e64199705326e4cd65e4b29946cae210a4ef9649
|
425fa50a3236cb9097389646275da06bf9185f6b
|
refs/heads/master
| 2022-12-22T18:26:53.933397
| 2020-09-28T12:49:50
| 2020-09-28T12:49:50
| 299,307,843
| 1
| 1
|
Apache-2.0
| 2020-09-28T12:52:18
| 2020-09-28T12:52:17
| null |
UTF-8
|
Python
| false
| false
| 2,136
|
py
|
from .bbox import bbox_overlaps
from .carafe import CARAFE, CARAFENaive, CARAFEPack, carafe, carafe_naive
from .cc_attention import CrissCrossAttention
from .corner_pool import CornerPool
from .deform_conv import DeformConv2d, DeformConv2dPack, deform_conv2d
from .deform_roi_pool import (DeformRoIPool, DeformRoIPoolPack,
ModulatedDeformRoIPoolPack, deform_roi_pool)
from .focal_loss import (SigmoidFocalLoss, SoftmaxFocalLoss,
sigmoid_focal_loss, softmax_focal_loss)
from .info import get_compiler_version, get_compiling_cuda_version
from .masked_conv import MaskedConv2d, masked_conv2d
from .modulated_deform_conv import (ModulatedDeformConv2d,
ModulatedDeformConv2dPack,
modulated_deform_conv2d)
from .nms import batched_nms, nms, nms_match, soft_nms
from .point_sample import (SimpleRoIAlign, point_sample,
rel_roi_point_to_rel_img_point)
from .psa_mask import PSAMask
from .roi_align import RoIAlign, roi_align
from .roi_pool import RoIPool, roi_pool
from .saconv import SAConv2d
from .sync_bn import SyncBatchNorm
from .tin_shift import TINShift, tin_shift
from .wrappers import Conv2d, ConvTranspose2d, Linear, MaxPool2d
__all__ = [
'bbox_overlaps', 'CARAFE', 'CARAFENaive', 'CARAFEPack', 'carafe',
'carafe_naive', 'CornerPool', 'DeformConv2d', 'DeformConv2dPack',
'deform_conv2d', 'DeformRoIPool', 'DeformRoIPoolPack',
'ModulatedDeformRoIPoolPack', 'deform_roi_pool', 'SigmoidFocalLoss',
'SoftmaxFocalLoss', 'sigmoid_focal_loss', 'softmax_focal_loss',
'get_compiler_version', 'get_compiling_cuda_version', 'MaskedConv2d',
'masked_conv2d', 'ModulatedDeformConv2d', 'ModulatedDeformConv2dPack',
'modulated_deform_conv2d', 'batched_nms', 'nms', 'soft_nms', 'nms_match',
'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool', 'SyncBatchNorm', 'Conv2d',
'ConvTranspose2d', 'Linear', 'MaxPool2d', 'CrissCrossAttention', 'PSAMask',
'point_sample', 'rel_roi_point_to_rel_img_point', 'SimpleRoIAlign',
'SAConv2d', 'TINShift', 'tin_shift'
]
|
[
"abhishek4273@gmail.com"
] |
abhishek4273@gmail.com
|
62923615d10f8b267c51040f2482bd55da9e58cf
|
7615badcbd9cc22a263c5f206e951c8c1e6b3e70
|
/setup.py
|
81eed71015da29a0162829431b4559150308db73
|
[] |
no_license
|
winkidney/PyMonitor
|
216c88140ea942d23e8f3a634e63c5e3052f46c8
|
f772153af217d89b74e5fca2427f3d92ca919f34
|
refs/heads/master
| 2021-01-23T15:29:45.711809
| 2014-09-18T06:06:14
| 2014-09-18T06:06:14
| 24,089,355
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 564
|
py
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
requires = [
'psutil',
]
setup(name='PyMonitor',
version='0.1',
description='Monitor system status',
classifiers=[
"Programming Language :: Python",
],
author='',
author_email='',
url='',
keywords='system tools',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
test_suite="none",
)
|
[
"winkidney@gmail.com"
] |
winkidney@gmail.com
|
d3480cfe285646810d63a5bcafcf319224170244
|
d4679b63ff98399a2e2c90a70196ca61be12d5ed
|
/Part_1/Алгоритмы и структуры данных/Homeworks/Sort1/quick_sort_new.py
|
96af6e92a923fca83c9b9828b92291d6e436ae13
|
[] |
no_license
|
akoshel/MADE
|
9a702064f9dd5f89664efed4e76f9a2fb0a94743
|
e0c3aceaf190bb86bae9f8239ae181d5529bc044
|
refs/heads/main
| 2023-04-28T14:25:53.210880
| 2021-05-19T16:23:52
| 2021-05-19T16:23:52
| 328,123,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
from random import randint
list_lenght = int(input())
raw_list = list(map(int, input().split(' ')))
assert(len(raw_list) == list_lenght)
def quick_sort_step(input_list, l, r):
ll = l
i = l
hh = r
x = input_list[l]
while i <= hh:
if input_list[i] < x:
input_list[ll], input_list[i] = input_list[i], input_list[ll]
ll += 1
i += 1
elif input_list[i] > x:
input_list[i], input_list[hh] = input_list[hh], input_list[i]
hh -= 1
else:
i += 1
return ll, hh
def quick_sort(init_list, l, r):
if l < r:
m = randint(l, r)
init_list[m], init_list[l] = init_list[l], init_list[m]
ll, hh = quick_sort_step(init_list, l, r)
quick_sort(init_list, l, ll - 1)
quick_sort(init_list, hh + 1, r)
quick_sort(raw_list, 0, len(raw_list)-1)
print(' '.join(list(map(str, raw_list))))
|
[
"johndoe@example.com"
] |
johndoe@example.com
|
38713d85ffd79716879376e48de891c2aaa7b329
|
13f4a06cd439f579e34bf38406a9d5647fe7a0f3
|
/nn_ns/data_structure/TreeNodeOps/UnbalancedMultiWayTreeNodeOps/IUnbalancedMultiWayTreeNodeOps.py
|
628ddf2735bb46b14ed81b58dd3dae4abfa781b8
|
[] |
no_license
|
edt-yxz-zzd/python3_src
|
43d6c2a8ef2a618f750b59e207a2806132076526
|
41f3a506feffb5f33d4559e5b69717d9bb6303c9
|
refs/heads/master
| 2023-05-12T01:46:28.198286
| 2023-05-01T13:46:32
| 2023-05-01T13:46:32
| 143,530,977
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,174
|
py
|
__all__ = '''
IUnbalancedMultiWayTreeNodeOps
'''.split()
from .abc import not_implemented, override
from ..TreeNodeOps.IOrientedTreeNodeOps__inorder import \
IOrientedTreeNodeOps__inorder
class IUnbalancedMultiWayTreeNodeOps(IOrientedTreeNodeOps__inorder):
'''
assume nonleaf.num_children > 0 since num_entities = num_children-1
assume nonleaf.num_children >= 2 since inorder
entity_position == innode_position
except:
entity_position != innode_last == entity_end
new methods:
`get_entity_at
`_iter_entities_of_nonleaf_
`_iter_reversed_entities_of_nonleaf_
iter_entities_of_nonleaf
iter_reversed_entities_of_nonleaf
get_num_entities_of_nonleaf
calc_num_entities_of_subtree
iter_innode_position_entity_pairs_of_nonleaf
iter_reversed_innode_position_entity_pairs_of_nonleaf
get_innode_entity_begin
get_innode_entity_end
get_innode_entity_begin_or_end
iter_entities_of_subtree
iter_reversed_entities_of_subtree
leaf_to_iter_entities_of_subtree
leaf_to_iter_reversed_entities_of_subtree
'''
__slots__ = ()
@override
def why_not_subtree_ok(ops, self, **kwargs):
# kwargs readonly, should not remove key from it
# i.e. donot override: def is_subtree_ok(ops, self, *, as_root=..., **kwargs)
return (ops.why_not_num_entities_of_nonleaf_ok(self)
+ super().why_not_subtree_ok(self, **kwargs)
)
def why_not_num_entities_of_nonleaf_ok(ops, self):
# num_entities_of_nonleaf = num_children-1
#
is_leaf = ops.is_leaf
unstable_iter_nodes_of_subtree = ops.unstable_iter_nodes_of_subtree
get_num_children = ops.get_num_children
get_num_entities_of_nonleaf = ops.get_num_entities_of_nonleaf
#if ops.is_leaf(self): return ()
for node in unstable_iter_nodes_of_subtree(self):
if is_leaf(node): continue
nonleaf = node; del node
num_children = get_num_children(nonleaf)
num_entities_of_nonleaf = get_num_entities_of_nonleaf(nonleaf)
if num_children != 1+num_entities_of_nonleaf:
return ('num_children != 1+num_entities_of_nonleaf',)
return ()
def get_num_entities_of_nonleaf(ops, self):
assert not ops.is_leaf(self)
return ops.get_num_children(self) - 1
########## require num_children > 0 ################
#def get_num_entities_of_subtree(ops, self):
def calc_num_entities_of_subtree(ops, self):
on_leaf = lambda _: 0
on_nonleaf = ops.get_num_entities_of_nonleaf
combine = lambda a, bs: sum(bs, a)
return ops.bottomup_eval_unoriented_subtree(
self, on_leaf, on_nonleaf, combine)
if ops.is_leaf(self):
return 0
return sum(map(ops.calc_num_entities_of_subtree
, ops.unstable_iter_children(self))
, ops.get_num_entities_of_nonleaf(self))
# nonleaf
@not_implemented
def _iter_entities_of_nonleaf_(ops, self):
# self as node, not as subtree
assert not ops.is_leaf(self)
...
@not_implemented
def _iter_reversed_entities_of_nonleaf_(ops, self):
# self as node, not as subtree
assert not ops.is_leaf(self)
...
def iter_entities_of_nonleaf(ops, self, *, reverse=False):
if not reverse:
f = ops._iter_entities_of_nonleaf_
else:
f = ops._iter_reversed_entities_of_nonleaf_
return f(self)
def iter_reversed_entities_of_nonleaf(ops, self, *, reverse=False):
return ops.iter_entities_of_nonleaf(self, reverse=not reverse)
def iter_innode_position_entity_pairs_of_nonleaf(ops, self, *, reverse=False):
# NOTE: not output innode_last (== child_last == entity_end)
return zip(ops.iter_innode_positions(self, reverse=reverse)
, ops.iter_entities_of_nonleaf(self, reverse=reverse))
def iter_reversed_innode_position_entity_pairs_of_nonleaf(ops, self, *, reverse=False):
# NOTE: not output innode_last (== child_last == entity_end)
return ops.iter_innode_position_entity_pairs_of_nonleaf(self, reverse=not reverse)
# entity_begin == child_begin
# entity_end == child_last
def get_innode_entity_begin_or_end(ops, self, end:bool):
return ops.get_innode_first_or_last_position(self)
def get_innode_entity_begin(ops, self):
return ops.get_innode_first_position(self)
def get_innode_entity_end(ops, self):
return ops.get_innode_last_position(self)
@not_implemented
def get_entity_at(ops, self, entity_position):
# like get_child_at
assert entity_position != ops.get_innode_entity_end(self)
...
def iter_reversed_entities_of_subtree(ops, self, *, reverse=False):
return ops.iter_entities_of_subtree(self, reverse=not reverse)
def iter_entities_of_subtree(ops, self, *, reverse=False):
# reverse ==>> last leaf
last = reverse = bool(reverse)
leaf, depth = ops.get_first_or_last_leaf_ex(self, 0, last)
return ops.leaf_to_iter_entities_of_subtree(leaf, depth, reverse=reverse)
@staticmethod
def __nonleaf_triples2entities(get_entity_at, triples):
# triple = (nonleaf, entity_position, depth)
# get_entity_at = ops.get_entity_at
for nonleaf, entity_position, depth in triples:
yield get_entity_at(nonleaf, entity_position)
def leaf_to_iter_reversed_entities_of_subtree(ops, self, depth, *, reverse=False):
f = ops.leaf_to_iter_entities_of_subtree
return f(self, depth, reverse=not reverse)
def leaf_to_iter_entities_of_subtree(ops, self, depth, *, reverse=False):
assert ops.is_leaf(self)
f = ops.leaf_to_inorder_iter_nonleaf_entity_position_triples
it = f(self, depth, reverse=reverse)
return __class__.__nonleaf_triples2entities(ops.get_entity_at, it)
if __name__ == '__main__':
XXX = IUnbalancedMultiWayTreeNodeOps
from seed.helper.print_methods import print_methods
print_methods(XXX)
|
[
"wuming_zher@zoho.com.cn"
] |
wuming_zher@zoho.com.cn
|
f2c89a5b91a1fc71833013689a89d7bf15352771
|
beea74a2a1f2445b107af411197e8b6300e715e6
|
/supervised_learning/0x12-transformer_apps/1-dataset.py
|
486a2ffdb9d99fe7221d1babf782fa0a793b1816
|
[] |
no_license
|
95ktsmith/holbertonschool-machine_learning
|
0240d8fa8523b06d3353c2bffa74205b84253be8
|
2757c8526290197d45a4de33cda71e686ddcbf1c
|
refs/heads/master
| 2023-07-26T16:02:26.399758
| 2021-09-09T15:57:57
| 2021-09-09T15:57:57
| 310,087,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,150
|
py
|
#!/usr/bin/env python3
""" Dataset """
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
class Dataset:
""" Class to load an prepare a dataset for machine translation """
def __init__(self):
""" Init """
data = tfds.load(
"ted_hrlr_translate/pt_to_en",
as_supervised=True
)
self.data_train = data['train']
self.data_valid = data['validation']
tokenizer_pt, tokenizer_en = self.tokenize_dataset(self.data_train)
self.tokenizer_pt = tokenizer_pt
self.tokenizer_en = tokenizer_en
def tokenize_dataset(self, data):
"""
Creates sub-word tokenizers for a dataset
data: tf.data.Dataset whose examples are formatted as a table (pt, en)
pt: tf.Tensor containing the Portuguese sentence
en: tf.Tensor containing the English sentence
Returns: tokenizer_pt, tokenizer_en
tokenizer_pt: Portuguese tokenizer
tokenizer_en: English tokenizer
"""
encoder = tfds.deprecated.text.SubwordTextEncoder
tokenizer_pt = encoder.build_from_corpus(
(pt.numpy() for pt, en in data),
target_vocab_size=2 ** 15
)
tokenizer_en = encoder.build_from_corpus(
(en.numpy() for pt, en in data),
target_vocab_size=2 ** 15
)
return tokenizer_pt, tokenizer_en
def encode(self, pt, en):
"""
Encodes a translation into tokens
pt: tf.Tensor containing the Portuguese sentence
en: tf.Tensor containing the corresponding English sentence
Returns: pt_tokens, en_tokens
pt_tokens: np.ndarray containing Portuguese tokens
en_tokens: np.ndarray containing the English tokens
"""
pt_tokens = [self.tokenizer_pt.vocab_size]
pt_tokens += self.tokenizer_pt.encode(pt.numpy())
pt_tokens += [pt_tokens[0] + 1]
en_tokens = [self.tokenizer_en.vocab_size]
en_tokens += self.tokenizer_en.encode(en.numpy())
en_tokens += [en_tokens[0] + 1]
return pt_tokens, en_tokens
|
[
"95ktsmith@gmail.com"
] |
95ktsmith@gmail.com
|
6bb4155195ddc4b87cc695213e8d01711e32e57a
|
115d568228ea4dd48bc567fac1afbe90a67e9a8c
|
/LSTM/SegWords/BI-LSTM/Demo4/seqlib.py
|
30ee4fca743daeff75bb34637089947b506e7f4c
|
[] |
no_license
|
sunshinelu/NLPLearnNote
|
6eb6b016ed18602be3a2fe8ce2f1bdb770efb226
|
76cfd64438e8acbf0aadc727675d7b17b63549e3
|
refs/heads/master
| 2020-03-08T07:03:25.652478
| 2018-05-06T14:13:02
| 2018-05-06T14:13:02
| 127,985,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,052
|
py
|
#!/usr/bin/python
#-*-coding:utf-8-*-
'''
Created on 2018-04-25 15:41
@author:wangrs
'''
#1.导入模块包和语料库文件
import codecs
from gensim.models.word2vec import Word2Vec
import numpy as np
import nltk
from nltk.probability import FreqDist
import pandas as pd
from pickle import dump,load
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM,GRU,SimpleRNN
from keras.layers.core import Reshape,Flatten,Dropout,Dense,Activation
from keras.regularizers import l1,l2
from keras.layers.convolutional import Convolution2D,MaxPooling2D,MaxPooling1D
from sklearn.cross_validation import train_test_split
from keras.optimizers import SGD,RMSprop,Adagrad
from keras.utils import np_utils
#2.使用分词语料库生成词向量(模型)
def load_file(input_file): #读单个文本
input_data = codecs.open(input_file,'r',encoding='utf-8')
input_text = input_data.read()
return input_text
#使用gensim的word2vec库
def trainW2V(corpus,epochs = 10,num_features=100,sg=1,min_word_count=1,num_works=4,context=4,sample=1e-5,negative=5):
w2v = Word2Vec(workers=num_works,sample= sample,size=num_features,min_count=min_word_count,window=context)
np.random.shuffle(corpus) #打乱顺序函数
w2v.build_vocab(corpus)
w2v.train(corpus,total_examples=w2v.corpus_count,epochs=epochs)
print("word2vec DONE.")
return w2v
#3.语料预处理
def freq_func(input_text): #nltk输入文本,输出词频
corpus = nltk.Text(input_text)
fdist = FreqDist(corpus)
w = list(fdist.keys())
v = list(fdist.values())
freqpd = pd.DataFrame({'word':w,'freq':v})
freqpd.sort_values(by='freq',ascending=False,inplace=True)
freqpd['idx'] = np.arange(len(v))
return freqpd
#初始化权重
def init_weightlist(w2v,idx2word,word2idx):
init_weight_wv = []
for i in range(len(idx2word)):
init_weight_wv.append(w2v[idx2word[i]])
#定义‘U’为未登录新字,‘P’为两头padding用途,并增加两个相应的向量表示
char_num = len(init_weight_wv)
idx2word[char_num] = 'U'
word2idx['U'] = char_num
idx2word[char_num+1] = 'P'
word2idx['P'] = char_num+1
init_weight_wv.append(np.random.randn(100))
init_weight_wv.append(np.zeros(100))
return init_weight_wv,idx2word,word2idx
def character_tagging(input_file,output_file): #加入标注标签:BMES(B是词首,M是词中,E是词尾,S是单字词)
#带BOM的utf-8编码的txt文件时开头会有一个多余的字符\ufeff,BOM被解码为一个字符\ufeff,如何去掉?
# 修改encoding为utf-8_sig或者utf_8_sig
input_data = codecs.open(input_file,'r',encoding='utf-8_sig')
output_data = codecs.open(output_file,'w',encoding='utf-8')
for line in input_data.readlines():
word_list = line.strip().split()
for word in word_list:
if len(word) == 1:
output_data.write(word+"/S ")
else:
output_data.write(word[0]+'/B ')
for w in word[1:len(word)-1]:
output_data.write(w+"/M ")
output_data.write(word[len(word)-1]+'/E ')
output_data.write('\n')
output_data.close()
input_data.close()
def featContext(sentence,word2idx='',context = 7):
predict_word_num = []
for w in sentence: #文本中的字如果在词典中则转为数字,如果不在则设置为U
if w in word2idx:
predict_word_num.append(word2idx[w])
else:
predict_word_num.append(word2idx['U'])
num = len(predict_word_num) #首尾padding
pad = int((context-1)*0.5)
for i in range(pad):
predict_word_num.insert(0,word2idx['P'])
predict_word_num.append(word2idx['P'])
train_x = []
for i in range(num):
train_x.append(predict_word_num[i:i+context])
return train_x
#4.训练语料
class Lstm_Net(object):
def __init__(self):
self.init_weight=[]
self.batch_size = 128
self.word_dim = 100
self.maxlen = 7
self.hidden_units = 100
self.nb_classes = 0
def buildnet(self):
self.maxfeatures = self.init_weight[0].shape[0] #词典大小
self.model = Sequential()
print('stacking LSTM .....')#使用了堆叠的LSTM架构
self.model.add(Embedding(self.maxfeatures,self.word_dim,input_length=self.maxlen))
self.model.add(LSTM(self.hidden_units,return_sequences=True))
self.model.add(LSTM(self.hidden_units,return_sequences=False))
self.model.add(Dropout(0.5))
self.model.add(Dense(self.nb_classes))
self.model.add(Activation('softmax'))
self.model.compile(loss='categorical_crossentropy',optimizer='adam')
def train(self,modelname):
result= self.model.fit(self.train_X,self.Y_train,batch_size=self.batch_size,epochs=20,validation_data=(self.test_X,self.Y_test))
self.model.save_weights(modelname)
def splitset(self,train_word_num,train_label,train_size=0.9,random_state=1):
self.train_X,self.test_X,train_y,test_y = train_test_split(train_word_num,train_label,train_size=0.9,random_state=1)
print(np.shape(self.train_X))
self.Y_train = np_utils.to_categorical(train_y,self.nb_classes)
print(np.shape(self.Y_train))
self.Y_test = np_utils.to_categorical(test_y,self.nb_classes)
def predict_num(self,input_num,input_txt,label_dict='',num_dict=''):
#根据输入得到标注推断
input_num = np.array(input_num)
predict_prob = self.model.predict_proba(input_num,verbose=False)
predict_label = self.model.predict_classes(input_num,verbose=False)
for i,label in enumerate(predict_label[:-1]):
if i==0: #如果是首字,不可为E,M
predict_prob[i,label_dict['E']] = 0
predict_prob[i,label_dict['M']] = 0
if label == label_dict['B']: #前字为B,后字不可为B,S
predict_prob[i+1, label_dict['B']] = 0
predict_prob[i+1, label_dict['S']] = 0
if label == label_dict['E']: #前字为E,后字不可为M,E
predict_prob[i+1, label_dict['M']] = 0
predict_prob[i+1, label_dict['E']] = 0
if label == label_dict['M']: #前字为M,后字不可为B,S
predict_prob[i+1, label_dict['B']] = 0
predict_prob[i+1, label_dict['S']] = 0
if label == label_dict['S']: # 前字为S,后字不可为M,E
predict_prob[i + 1, label_dict['M']] = 0
predict_prob[i + 1, label_dict['E']] = 0
predict_label[i+1] = predict_prob[i+1].argmax()
predict_label_new = [num_dict[x] for x in predict_label]
result = [w+'/'+label for w,label in zip(input_txt,predict_label_new)]
return ' '.join(result)+'\n'
def getweights(self,wfname):
return self.model.load_weights(wfname)
|
[
"sunlu900326@yeah.net"
] |
sunlu900326@yeah.net
|
9a8f98740162c5d7d7746170ae5aac8824d90bb8
|
930c207e245c320b108e9699bbbb036260a36d6a
|
/BRICK-RDFAlchemy/generatedCode/brick/brickschema/org/schema/_1_0_2/Brick/Average_Supply_Air_Flow.py
|
9859e698689ef384cb9feebcee0e03310d17392d
|
[] |
no_license
|
InnovationSE/BRICK-Generated-By-OLGA
|
24d278f543471e1ce622f5f45d9e305790181fff
|
7874dfa450a8a2b6a6f9927c0f91f9c7d2abd4d2
|
refs/heads/master
| 2021-07-01T14:13:11.302860
| 2017-09-21T12:44:17
| 2017-09-21T12:44:17
| 104,251,784
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Supply_Air_Flow import Supply_Air_Flow
class Average_Supply_Air_Flow(Supply_Air_Flow):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Average_Supply_Air_Flow
|
[
"Andre.Ponnouradjane@non.schneider-electric.com"
] |
Andre.Ponnouradjane@non.schneider-electric.com
|
9f22af2e0e7505b1cc7333dc94157e766abb8b25
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit959.py
|
fdc98c144976bd8c7ca16f4120e60feff1cf388a
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,967
|
py
|
# qubit number=5
# total number=41
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
prog.x(input_qubit[2]) # number=26
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(1):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.y(input_qubit[3]) # number=25
prog.x(input_qubit[0]) # number=9
prog.h(input_qubit[1]) # number=32
prog.cz(input_qubit[0],input_qubit[1]) # number=33
prog.h(input_qubit[1]) # number=34
prog.h(input_qubit[1]) # number=38
prog.cz(input_qubit[0],input_qubit[1]) # number=39
prog.h(input_qubit[1]) # number=40
prog.x(input_qubit[1]) # number=36
prog.cx(input_qubit[0],input_qubit[1]) # number=37
prog.cx(input_qubit[0],input_qubit[1]) # number=30
prog.cx(input_qubit[0],input_qubit[2]) # number=22
prog.x(input_qubit[2]) # number=23
prog.y(input_qubit[3]) # number=27
prog.cx(input_qubit[0],input_qubit[2]) # number=24
prog.x(input_qubit[3]) # number=12
prog.cx(input_qubit[1],input_qubit[2]) # number=31
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit959.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
9ac8fbd2093b050ddc4cfc599c8ec66a84de265d
|
35fdd5b42b47a1dbe6a25f6fc1865f4e48b842a5
|
/evalml/data_checks/class_imbalance_data_check.py
|
9eedc3f25b4b5d9b027a72bcabf93ac8fc2b90fa
|
[
"BSD-3-Clause"
] |
permissive
|
skvorekn/evalml
|
41e5426f9f7d5ad625c21b74336009894c79c7de
|
2cbfa344ec3fdc0fb0f4a0f1093811135b9b97d8
|
refs/heads/main
| 2023-03-27T01:42:07.691406
| 2021-03-19T18:53:43
| 2021-03-19T18:53:43
| 349,555,689
| 0
| 0
|
BSD-3-Clause
| 2021-03-21T14:57:01
| 2021-03-19T21:08:12
| null |
UTF-8
|
Python
| false
| false
| 7,631
|
py
|
from evalml.data_checks import (
DataCheck,
DataCheckError,
DataCheckMessageCode,
DataCheckWarning
)
from evalml.utils import _convert_woodwork_types_wrapper, infer_feature_types
class ClassImbalanceDataCheck(DataCheck):
"""Checks if any target labels are imbalanced beyond a threshold. Use for classification problems"""
def __init__(self, threshold=0.1, min_samples=100, num_cv_folds=3):
"""Check if any of the target labels are imbalanced, or if the number of values for each target
are below 2 times the number of cv folds
Arguments:
threshold (float): The minimum threshold allowed for class imbalance before a warning is raised.
This threshold is calculated by comparing the number of samples in each class to the sum of samples in that class and the majority class.
For example, a multiclass case with [900, 900, 100] samples per classes 0, 1, and 2, respectively,
would have a 0.10 threshold for class 2 (100 / (900 + 100)). Defaults to 0.10.
min_samples (int): The minimum number of samples per accepted class. If the minority class is both below the threshold and min_samples,
then we consider this severely imbalanced. Must be greater than 0. Defaults to 100.
num_cv_folds (int): The number of cross-validation folds. Must be positive. Choose 0 to ignore this warning.
"""
if threshold <= 0 or threshold > 0.5:
raise ValueError("Provided threshold {} is not within the range (0, 0.5]".format(threshold))
self.threshold = threshold
if min_samples <= 0:
raise ValueError("Provided value min_samples {} is not greater than 0".format(min_samples))
self.min_samples = min_samples
if num_cv_folds < 0:
raise ValueError("Provided number of CV folds {} is less than 0".format(num_cv_folds))
self.cv_folds = num_cv_folds * 2
def validate(self, X, y):
"""Checks if any target labels are imbalanced beyond a threshold for binary and multiclass problems
Ignores NaN values in target labels if they appear.
Arguments:
X (ww.DataTable, pd.DataFrame, np.ndarray): Features. Ignored.
y (ww.DataColumn, pd.Series, np.ndarray): Target labels to check for imbalanced data.
Returns:
dict: Dictionary with DataCheckWarnings if imbalance in classes is less than the threshold,
and DataCheckErrors if the number of values for each target is below 2 * num_cv_folds.
Example:
>>> import pandas as pd
>>> X = pd.DataFrame()
>>> y = pd.Series([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
>>> target_check = ClassImbalanceDataCheck(threshold=0.10)
>>> assert target_check.validate(X, y) == {"errors": [{"message": "The number of instances of these targets is less than 2 * the number of cross folds = 6 instances: [0]",\
"data_check_name": "ClassImbalanceDataCheck",\
"level": "error",\
"code": "CLASS_IMBALANCE_BELOW_FOLDS",\
"details": {"target_values": [0]}}],\
"warnings": [{"message": "The following labels fall below 10% of the target: [0]",\
"data_check_name": "ClassImbalanceDataCheck",\
"level": "warning",\
"code": "CLASS_IMBALANCE_BELOW_THRESHOLD",\
"details": {"target_values": [0]}},\
{"message": "The following labels in the target have severe class imbalance because they fall under 10% of the target and have less than 100 samples: [0]",\
"data_check_name": "ClassImbalanceDataCheck",\
"level": "warning",\
"code": "CLASS_IMBALANCE_SEVERE",\
"details": {"target_values": [0]}}],\
"actions": []}
"""
results = {
"warnings": [],
"errors": [],
"actions": []
}
y = infer_feature_types(y)
y = _convert_woodwork_types_wrapper(y.to_series())
fold_counts = y.value_counts(normalize=False, sort=True)
if len(fold_counts) == 0:
return results
# search for targets that occur less than twice the number of cv folds first
below_threshold_folds = fold_counts.where(fold_counts < self.cv_folds).dropna()
if len(below_threshold_folds):
below_threshold_values = below_threshold_folds.index.tolist()
error_msg = "The number of instances of these targets is less than 2 * the number of cross folds = {} instances: {}"
DataCheck._add_message(DataCheckError(message=error_msg.format(self.cv_folds, below_threshold_values),
data_check_name=self.name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": below_threshold_values}), results)
counts = fold_counts / (fold_counts + fold_counts.values[0])
below_threshold = counts.where(counts < self.threshold).dropna()
# if there are items that occur less than the threshold, add them to the list of results
if len(below_threshold):
below_threshold_values = below_threshold.index.tolist()
warning_msg = "The following labels fall below {:.0f}% of the target: {}"
DataCheck._add_message(DataCheckWarning(message=warning_msg.format(self.threshold * 100, below_threshold_values),
data_check_name=self.name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": below_threshold_values}), results)
sample_counts = fold_counts.where(fold_counts < self.min_samples).dropna()
if len(below_threshold) and len(sample_counts):
sample_count_values = sample_counts.index.tolist()
severe_imbalance = [v for v in sample_count_values if v in below_threshold]
warning_msg = "The following labels in the target have severe class imbalance because they fall under {:.0f}% of the target and have less than {} samples: {}"
DataCheck._add_message(DataCheckWarning(message=warning_msg.format(self.threshold * 100, self.min_samples, severe_imbalance),
data_check_name=self.name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_SEVERE,
details={"target_values": severe_imbalance}), results)
return results
|
[
"noreply@github.com"
] |
skvorekn.noreply@github.com
|
5bcd1a408337e34fde01241f6fa33a12fd231a0c
|
da497ddf926b8791f3812c79543120215822216b
|
/icsbep/pu-sol-therm-007/openmc/case-6/generate_materials.py
|
9bf69b784be27e86c3ab8ef8378f2f525260bcdb
|
[] |
no_license
|
mit-crpg/benchmarks
|
55f38e569699554d07df254103e2f828dc5b4ff8
|
58e15679ec684b9e2f552df58099e3648b5708cc
|
refs/heads/master
| 2022-05-17T12:27:45.590757
| 2022-05-09T15:07:00
| 2022-05-09T15:07:00
| 2,704,358
| 23
| 30
| null | 2019-11-11T16:35:27
| 2011-11-03T19:04:29
|
Python
|
UTF-8
|
Python
| false
| false
| 926
|
py
|
import openmc
mats = openmc.Materials()
mat = openmc.Material(1)
mat.name = "Plutonium nitrate solution"
mat.set_density('sum')
mat.add_nuclide('Pu238', 1.5406e-08)
mat.add_nuclide('Pu239', 2.4294e-04)
mat.add_nuclide('Pu240', 1.1886e-05)
mat.add_nuclide('Pu241', 7.7338e-07)
mat.add_nuclide('Pu242', 2.2727e-08)
mat.add_nuclide('N14', 1.2577e-03)
mat.add_nuclide('H1', 6.3655e-02)
mat.add_nuclide('O16', 3.5483e-02)
mat.add_s_alpha_beta('c_H_in_H2O')
mats.append(mat)
mat = openmc.Material(2)
mat.name = "304L stainless steel"
mat.set_density('sum')
mat.add_element('Fe', 5.9355e-02)
mat.add_element('Cr', 1.7428e-02)
mat.add_element('Ni', 7.7203e-03)
mat.add_element('Mn', 1.7363e-03)
mats.append(mat)
mat = openmc.Material(3)
mat.name = "Water at 25 C"
mat.set_density('sum')
mat.add_nuclide('H1', 6.6655e-02)
mat.add_nuclide('O16', 3.3327e-02)
mat.add_s_alpha_beta('c_H_in_H2O')
mats.append(mat)
mats.export_to_xml()
|
[
"paul.k.romano@gmail.com"
] |
paul.k.romano@gmail.com
|
f7bdc9446ef34ddd93a77d2b6caff30f2bd83d5c
|
26536ad8f07242ea5411a02117adc80462cc1173
|
/ssseg/modules/models/nonlocalnet/nonlocalnet.py
|
6c52ff731e0d8a7bb1f2e4ed9300222c35906f67
|
[
"MIT"
] |
permissive
|
yawudede/sssegmentation
|
451b34c7e383b61d74b483c3048c0ed760821956
|
b7fb5bd955a59cda0cfa20ac0c51aea67bfe0e30
|
refs/heads/main
| 2023-01-30T20:28:10.976883
| 2020-12-16T08:45:49
| 2020-12-16T08:45:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,243
|
py
|
'''
Function:
Implementation of NonLocalNet
Author:
Zhenchao Jin
'''
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...backbones import *
from ..base import BaseModel
from .nonlocalblock import NonLocal2d
'''NonLocalNet'''
class NonLocalNet(BaseModel):
def __init__(self, cfg, **kwargs):
super(NonLocalNet, self).__init__(cfg, **kwargs)
align_corners, norm_cfg, act_cfg = self.align_corners, self.norm_cfg, self.act_cfg
# build non-local block
nl_cfg = cfg['nonlocal']
self.conv_before_nl = nn.Sequential(
nn.Conv2d(nl_cfg['in_channels'], nl_cfg['out_channels'], kernel_size=3, stride=1, padding=1, bias=False),
BuildNormalizationLayer(norm_cfg['type'], (nl_cfg['out_channels'], norm_cfg['opts'])),
BuildActivation(act_cfg['type'], **act_cfg['opts']),
)
self.nl_block = NonLocal2d(
in_channels=nl_cfg['out_channels'],
reduction=nl_cfg['reduction'],
use_scale=nl_cfg['use_scale'],
mode=nl_cfg['mode'],
norm_cfg=copy.deepcopy(norm_cfg),
act_cfg=copy.deepcopy(act_cfg),
)
self.conv_after_nl = nn.Sequential(
nn.Conv2d(nl_cfg['out_channels'], nl_cfg['out_channels'], kernel_size=3, stride=1, padding=1, bias=False),
BuildNormalizationLayer(norm_cfg['type'], (nl_cfg['out_channels'], norm_cfg['opts'])),
BuildActivation(act_cfg['type'], **act_cfg['opts']),
)
# build decoder
decoder_cfg = cfg['decoder']
self.decoder = nn.Sequential(
nn.Conv2d(decoder_cfg['in_channels'], decoder_cfg['out_channels'], kernel_size=3, stride=1, padding=1, bias=False),
BuildNormalizationLayer(norm_cfg['type'], (decoder_cfg['out_channels'], norm_cfg['opts'])),
BuildActivation(act_cfg['type'], **act_cfg['opts']),
nn.Dropout2d(decoder_cfg['dropout']),
nn.Conv2d(decoder_cfg['out_channels'], cfg['num_classes'], kernel_size=1, stride=1, padding=0)
)
# build auxiliary decoder
auxiliary_cfg = cfg['auxiliary']
self.auxiliary_decoder = nn.Sequential(
nn.Conv2d(auxiliary_cfg['in_channels'], auxiliary_cfg['out_channels'], kernel_size=3, stride=1, padding=1, bias=False),
BuildNormalizationLayer(norm_cfg['type'], (auxiliary_cfg['out_channels'], norm_cfg['opts'])),
BuildActivation(act_cfg['type'], **act_cfg['opts']),
nn.Dropout2d(auxiliary_cfg['dropout']),
nn.Conv2d(auxiliary_cfg['out_channels'], cfg['num_classes'], kernel_size=1, stride=1, padding=0)
)
# freeze normalization layer if necessary
if cfg.get('is_freeze_normlayer', False): self.freezenormlayer()
'''forward'''
def forward(self, x, targets=None, losses_cfg=None):
h, w = x.size(2), x.size(3)
# feed to backbone network
x1, x2, x3, x4 = self.backbone_net(x)
# feed to non-local block
feats = self.conv_before_nl(x4)
feats = self.nl_block(feats)
feats = self.conv_after_nl(feats)
# feed to decoder
feats = torch.cat([x4, feats], dim=1)
preds = self.decoder(feats)
# feed to auxiliary decoder and return according to the mode
if self.mode == 'TRAIN':
preds = F.interpolate(preds, size=(h, w), mode='bilinear', align_corners=self.align_corners)
preds_aux = self.auxiliary_decoder(x3)
preds_aux = F.interpolate(preds_aux, size=(h, w), mode='bilinear', align_corners=self.align_corners)
return self.calculatelosses(
predictions={'loss_cls': preds, 'loss_aux': preds_aux},
targets=targets,
losses_cfg=losses_cfg
)
return preds
'''return all layers'''
def alllayers(self):
return {
'backbone_net': self.backbone_net,
'conv_before_nl': self.conv_before_nl,
'nl_block': self.nl_block,
'conv_after_nl': self.conv_after_nl,
'decoder': self.decoder,
'auxiliary_decoder': self.auxiliary_decoder
}
|
[
"1159254961@qq.com"
] |
1159254961@qq.com
|
94a022a9f21e7f396b42b3a6c186a2c6f0e4cf76
|
4015291afebfd346da3fee4b1d5a775882b5b461
|
/packages/models-library/src/models_library/services_ui.py
|
2933c09a2120995bb48d1c780f35c5ca452fbded
|
[
"MIT"
] |
permissive
|
pcrespov/osparc-simcore
|
3a8a6b5252038542f515c7e90d983ac6f1fb4de7
|
eb5e00bc2cf4acfe81f5dc422a5e50a4646c9596
|
refs/heads/master
| 2023-08-06T04:33:38.594066
| 2023-07-12T09:47:00
| 2023-07-12T09:47:00
| 130,357,545
| 0
| 1
|
MIT
| 2023-04-18T08:04:27
| 2018-04-20T12:10:41
|
Python
|
UTF-8
|
Python
| false
| false
| 895
|
py
|
from enum import Enum
from typing import Union
from pydantic import BaseModel, Extra, Field
from pydantic.types import PositiveInt
class WidgetType(str, Enum):
TextArea = "TextArea"
SelectBox = "SelectBox"
class TextArea(BaseModel):
min_height: PositiveInt = Field(
..., alias="minHeight", description="minimum Height of the textarea"
)
class Config:
extra = Extra.forbid
class Structure(BaseModel):
key: Union[str, bool, float]
label: str
class Config:
extra = Extra.forbid
class SelectBox(BaseModel):
structure: list[Structure] = Field(..., min_items=1)
class Config:
extra = Extra.forbid
class Widget(BaseModel):
widget_type: WidgetType = Field(
..., alias="type", description="type of the property"
)
details: Union[TextArea, SelectBox]
class Config:
extra = Extra.forbid
|
[
"noreply@github.com"
] |
pcrespov.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.