blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M โ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 โ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 โ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3c6ef5ab0efd64646da6bb20b1a0a4dbe7ed9d52 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03427/s581758715.py | e0f6f58c743a818c6bcf68751f6112a9ffbc4167 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | N = input().strip()
k = len(N)
cmax = 0
for i in range(k):
cmax += int(N[i])
if k>1:
cnt = (int(N[0])-1)+9*(k-1)
cmax = max(cmax,cnt)
print(cmax) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
024d7c9d22a9d10759f9603ccacb53f3aac364fd | 0104add04cd6da515e2ccb2c27e44bc6693f9bcf | /Yurii_Khomych/l_1_functions/currying_2.py | c3cd7ebafac18d91b79596981f4c1728f815323c | [] | no_license | YuriiKhomych/ITEA-advanced | c96c3cf9b279caf62fefcd41faf543cee7534626 | 90bc47733c07b5b866aa3a14aa12a169f5df289c | refs/heads/master | 2022-12-09T20:38:23.607426 | 2019-12-22T17:30:59 | 2019-12-22T17:30:59 | 209,354,034 | 0 | 9 | null | 2022-12-08T03:04:04 | 2019-09-18T16:23:12 | Python | UTF-8 | Python | false | false | 854 | py |
def change(func_1, func_2, func_3):
def inner_func(arg):
return func_1(func_2(func_3(arg)))
return inner_func
def kilometer2meter(dist):
""" Function that converts km to m. """
return dist * 1000
def meter2centimeter(dist):
""" Function that converts m to cm. """
return dist * 100
def centimeter2feet(dist):
""" Function that converts cm to ft. """
return dist / 30.48
kilometer2meter_result = kilometer2meter(565)
meter2centimeter_result = meter2centimeter(kilometer2meter_result)
centimeter2feet_result = centimeter2feet(meter2centimeter_result)
centimeter2feet_result = centimeter2feet(meter2centimeter(kilometer2meter(565)))
transform = change(centimeter2feet, meter2centimeter, kilometer2meter)
e = transform(565)
print(e)
result = change(centimeter2feet, meter2centimeter, kilometer2meter)(565)
| [
"yuriykhomich@gmail.com"
] | yuriykhomich@gmail.com |
f2061a6cc57c32db6bff0cdc2dfda4b0d2a2a292 | 5d027f4d32fc503212a824355ef45295e6df90b5 | /Homework/HW-Scheduler/HW-Scheduler/scheduler.py | 59e134b93c656a04b20b187861469354df0c6331 | [] | no_license | M1c17/OP_three_easy_steps | c99fa0a9214e42e8b44df14e84125c034b9cb1f3 | ea6190c55358d027169e7911bebd7aa6f2b56dff | refs/heads/master | 2022-12-18T22:25:52.055978 | 2020-09-15T03:10:19 | 2020-09-15T03:10:19 | 295,588,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,799 | py | #! /usr/bin/env python
import sys
from optparse import OptionParser
import random
parser = OptionParser()
parser.add_option("-s", "--seed", default=0, help="the random seed",
action="store", type="int", dest="seed")
parser.add_option("-j", "--jobs", default=3, help="number of jobs in the system",
action="store", type="int", dest="jobs")
parser.add_option("-l", "--jlist", default="", help="instead of random jobs, provide a comma-separated list of run times",
action="store", type="string", dest="jlist")
parser.add_option("-m", "--maxlen", default=10, help="max length of job",
action="store", type="int", dest="maxlen")
parser.add_option("-p", "--policy", default="FIFO", help="sched policy to use: SJF, FIFO, RR",
action="store", type="string", dest="policy")
parser.add_option("-q", "--quantum", help="length of time slice for RR policy", default=1,
action="store", type="int", dest="quantum")
parser.add_option("-c", help="compute answers for me", action="store_true", default=False, dest="solve")
(options, args) = parser.parse_args()
random.seed(options.seed)
print('ARG policy', options.policy)
if options.jlist == '':
print('ARG jobs', options.jobs)
print('ARG maxlen', options.maxlen)
print('ARG seed', options.seed)
else:
print('ARG jlist', options.jlist)
print('')
print('Here is the job list, with the run time of each job: ')
import operator
joblist = []
if options.jlist == '':
for jobnum in range(0,options.jobs):
runtime = int(options.maxlen * random.random()) + 1
joblist.append([jobnum, runtime])
print(' Job', jobnum, '( length = ' + str(runtime) + ' )')
else:
jobnum = 0
for runtime in options.jlist.split(','):
joblist.append([jobnum, float(runtime)])
jobnum += 1
for job in joblist:
print(' Job', job[0], '( length = ' + str(job[1]) + ' )')
print('\n')
if options.solve == True:
print('** Solutions **\n')
if options.policy == 'SJF':
joblist = sorted(joblist, key=operator.itemgetter(1))
options.policy = 'FIFO'
if options.policy == 'FIFO':
thetime = 0
print('Execution trace:')
for job in joblist:
print(' [ time %3d ] Run job %d for %.2f secs ( DONE at %.2f )' % (thetime, job[0], job[1], thetime + job[1]))
thetime += job[1]
print('\nFinal statistics:')
t = 0.0
count = 0
turnaroundSum = 0.0
waitSum = 0.0
responseSum = 0.0
for tmp in joblist:
jobnum = tmp[0]
runtime = tmp[1]
response = t
turnaround = t + runtime
wait = t
print(' Job %3d -- Response: %3.2f Turnaround %3.2f Wait %3.2f' % (jobnum, response, turnaround, wait))
responseSum += response
turnaroundSum += turnaround
waitSum += wait
t += runtime
count = count + 1
print('\n Average -- Response: %3.2f Turnaround %3.2f Wait %3.2f\n' % (responseSum/count, turnaroundSum/count, waitSum/count))
if options.policy == 'RR':
print('Execution trace:')
turnaround = {}
response = {}
lastran = {}
wait = {}
quantum = float(options.quantum)
jobcount = len(joblist)
for i in range(0,jobcount):
lastran[i] = 0.0
wait[i] = 0.0
turnaround[i] = 0.0
response[i] = -1
runlist = []
for e in joblist:
runlist.append(e)
thetime = 0.0
while jobcount > 0:
# print '%d jobs remaining' % jobcount
job = runlist.pop(0)
jobnum = job[0]
runtime = float(job[1])
if response[jobnum] == -1:
response[jobnum] = thetime
currwait = thetime - lastran[jobnum]
wait[jobnum] += currwait
if runtime > quantum:
runtime -= quantum
ranfor = quantum
print(' [ time %3d ] Run job %3d for %.2f secs' % (thetime, jobnum, ranfor))
runlist.append([jobnum, runtime])
else:
ranfor = runtime;
print(' [ time %3d ] Run job %3d for %.2f secs ( DONE at %.2f )' % (thetime, jobnum, ranfor, thetime + ranfor))
turnaround[jobnum] = thetime + ranfor
jobcount -= 1
thetime += ranfor
lastran[jobnum] = thetime
print('\nFinal statistics:')
turnaroundSum = 0.0
waitSum = 0.0
responseSum = 0.0
for i in range(0,len(joblist)):
turnaroundSum += turnaround[i]
responseSum += response[i]
waitSum += wait[i]
print(' Job %3d -- Response: %3.2f Turnaround %3.2f Wait %3.2f' % (i, response[i], turnaround[i], wait[i]))
count = len(joblist)
print('\n Average -- Response: %3.2f Turnaround %3.2f Wait %3.2f\n' % (responseSum/count, turnaroundSum/count, waitSum/count))
if options.policy != 'FIFO' and options.policy != 'SJF' and options.policy != 'RR':
print('Error: Policy', options.policy, 'is not available.')
sys.exit(0)
else:
print('Compute the turnaround time, response time, and wait time for each job.')
print('When you are done, run this program again, with the same arguments,')
print('but with -c, which will thus provide you with the answers. You can use')
print('-s <somenumber> or your own job list (-l 10,15,20 for example)')
print('to generate different problems for yourself.')
print('')
| [
"pictor117@gmail.com"
] | pictor117@gmail.com |
f940347b0a46dad50d1d229095586c5651621a8f | 397e125e94f4f139f2bf5055824d81f24b8b1757 | /ABC/138/E.py | 90a94533e38572141fe743f2f69457c94250fa59 | [] | no_license | tails1434/Atcoder | ecbab6ee238e3f225551297db961b1b502841fa4 | e7c7fed36be46bbaaf020a70997842240ba98d62 | refs/heads/master | 2021-07-07T00:31:49.235625 | 2020-09-30T01:42:01 | 2020-09-30T01:42:01 | 189,009,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | from collections import defaultdict
from bisect import bisect_right
def main():
S = input()
T = input()
d = defaultdict(list)
for i, s in enumerate(S):
d[s].append(i+1)
pre = 0
cnt = 0
loop = 0
len_S = len(S)
for t in T:
if not d[t]:
print(-1)
exit()
index = bisect_right(d[t],pre)
if index > len(d[t])-1:
pre = 0
loop += 1
index = bisect_right(d[t],pre)
pre = d[t][index]
ans = loop * len_S + pre
print(ans)
if __name__ == "__main__":
main() | [
"sososo1333@gmail.com"
] | sososo1333@gmail.com |
dbd2baaba12c468d6326baad9fc89420ad6d9071 | 388556baa0c2ee53d8767ae8a4bce18c03124488 | /Chapter11/0017_difference_between_abstraction_encapsulation.py | 0a946d6f97028efc1ea5384b7d8c3e1eb976d848 | [] | no_license | 8563a236e65cede7b14220e65c70ad5718144a3/introduction-python-programming-solutions | 6e2e7c8cf8babc3c63f75d8d5e987f4dbc018269 | f21d70ae2062cc2d5d3a2fefce81a2a3b4ea3bfd | refs/heads/master | 2022-12-10T04:24:56.364629 | 2020-07-01T11:34:01 | 2020-07-01T11:34:01 | 294,878,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | """
Program 11.13
Demonstrate the Difference between Abstraction and Encapsulation
"""
class foo:
def __init__(self, a, b):
self.a = a
self.b = b
def add(self):
return self.a + self.b
def main():
foo_object = foo(3, 4)
print(foo_object.add())
if __name__ == "__main__":
main()
| [
"warren.jitsing@gmail.com"
] | warren.jitsing@gmail.com |
9af8fb8a7ef155427b16305335e2b3c950d6b53b | 61050d0d7f0c0a60474e4e85d30be4e5ea7c6b04 | /content/components/dom-access/job.odb | 3c76a64ef3a81dd1ef07fb2572060e156f42bdc7 | [] | no_license | danse-inelastic/vnf | 8173f06f32b4a2fa2b71fddfe0fecf9c19e05e9a | be989448577f14f424aca4ce852c7198304ca57b | refs/heads/master | 2021-01-22T01:06:00.294100 | 2015-05-02T23:25:45 | 2015-05-02T23:25:45 | 34,947,878 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,779 | odb | # -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2006-2009 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from vnf.components.DOMAccessor import DOMAccessor as base
class Accessor(base):
def __init__(self):
super(Accessor, self).__init__('job')
return
def _getOrm(self):
orm = self.director.clerk.orm
# job depends on computation, let us just reuse orm initialization from computation
self.director.retrieveDOMAccessor('computation').orm
return orm
orm = property(_getOrm)
def getJobRecord(self, id):
# make sure orm is initd
orm = self.orm
#
return self.getRecordByID(JobTable, id)
def getJobLabel(self, id):
r = self.getJobRecord(id)
t = 'Job %s' % id
if r.short_description:
t += '(%s)' % r.short_description
return t
def resetJob(self, id):
"reset job to 'initial' status, which allows someone to resubmit the job"
r = self.getJobRecord(id)
db = self.db
# itasks
from vnf.dom.ITask import ITask
tasks = r.getReferences(db, ITask, 'beneficiary')
for task in tasks:
task.state = 'failed'
db.updateRecord(task)
continue
# job itself
r.state = 'submissionfailed'
db.updateRecord(r)
# also reset the computation so that the job retrieval looks like failed
computation = r.computation.dereference(db)
domaccess = self.director.retrieveDOMAccessor('computation')
domaccess.resetResultRetrievalTask(
type=computation.getTableName(),
id=computation.id)
return
def countJobs(self, filter=None, label=None, mine=False):
q = self.makeQuery(filter=filter, label=label, mine=mine)
return q.alias('tocount').count().execute().fetchone()[0]
def getJobIDs(
self,
filter=None, order_by=None, reverse_order=None, slice=None,
label=None, mine=False,
):
db = self.db
q = self.makeQuery(filter=filter, label=label, mine=mine)
if order_by:
q = q.order_by(order_by)
if slice:
if reverse_order:
n = self.countJobs(filter=filter, label=label, mine=mine)
slice = n-slice[1], n-slice[0]
q = sqlalchemy.select(
[q.alias('toslice')],
limit = slice[1]-slice[0],
offset = slice[0])
ret = q.execute().fetchall()
if reverse_order:
ret.reverse()
return [i.id for i in ret]
def getJobRecords(
self,
filter=None, order_by=None, reverse_order=None, slice=None,
label=None, mine=False,
):
ids = self.getJobIDs(
filter=filter, order_by=order_by, reverse_order=reverse_order, slice=slice,
label=label, mine=mine,
)
return map(self.getJobRecord, ids)
def makeQuery(self, filter=None, label=None, mine=False):
if label:
if filter: raise RuntimeError
return self.makeLabeledQuery(label, mine=mine)
db = self.db
st = db._tablemap.TableToSATable(JobTable)
cols = [
st.c.id.label('id'),
st.c.short_description.label('short_description'),
st.c.state.label('state'),
st.c.time_start.label('time_start'),
st.c.creator,
st.c.globalpointer,
]
username = self.director.sentry.username
if mine:
where = st.c.creator == username
q = sqlalchemy.select(cols, where)
else:
from vnf.utils.query.accesscontrol import select_public_or_owned_records
q = select_public_or_owned_records(cols, st, username, db)
if filter:
q = sqlalchemy.select([q.alias('jobs')], whereclause=filter)
return q
def makeLabeledQuery(self, label, mine=False):
mastertablename = 'job'
db = self.db
sL = db._tablemap.TableToSATable(Label)
if label in common_labels:
whereclause="labelname='%s'" % (label,)
else:
whereclause="labelname='%s' and targettable='%s'" % (
label, mastertablename)
labelq = sqlalchemy.select(
[sL.c.entity.label('entity'),
sL.c.labelname.label('label'),
],
whereclause=whereclause,
).alias('labelq')
st = db._tablemap.TableToSATable(JobTable)
cols = [
st.c.id.label('id'),
st.c.short_description.label('short_description'),
st.c.state.label('state'),
st.c.time_start.label('time_start'),
labelq.c.entity.label('gptr'),
]
# where = st.c.globalpointer==labelq.c.entity
where = 'globalpointer=labelq.entity'
if mine:
username = self.director.sentry.username
mine = "creator='%s'" % username
where = '%s and %s' % (where, mine)
q = sqlalchemy.select(cols, whereclause = where)
return q
from vnf.dom.Label import Label, common_labels
from vnf.dom.Job import Job as JobTable
from dsaw.db.VersatileReference import global_pointer
import sqlalchemy
def accessor():
return Accessor()
# version
__id__ = "$Id$"
# End of file
| [
"linjiao@caltech.edu"
] | linjiao@caltech.edu |
8885c933ea131e903feb51fe6f4dbc4537a88af7 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/qos_send_receive_info.py | 7b4a4548e2c3a10bc21c875febcbe1d7b61af84f | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,634 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class QosSendReceiveInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'bitrate': 'list[QosDataNoThrElement]',
'latency': 'list[QosDataElement]',
'jitter': 'list[QosDataElement]',
'packet_loss_max': 'list[QosDataElement]',
'resolution': 'list[QosDataNoThrElement]',
'frame': 'list[QosDataNoThrElement]'
}
attribute_map = {
'bitrate': 'bitrate',
'latency': 'latency',
'jitter': 'jitter',
'packet_loss_max': 'packet_loss_max',
'resolution': 'resolution',
'frame': 'frame'
}
def __init__(self, bitrate=None, latency=None, jitter=None, packet_loss_max=None, resolution=None, frame=None):
"""QosSendReceiveInfo - a model defined in huaweicloud sdk"""
self._bitrate = None
self._latency = None
self._jitter = None
self._packet_loss_max = None
self._resolution = None
self._frame = None
self.discriminator = None
if bitrate is not None:
self.bitrate = bitrate
if latency is not None:
self.latency = latency
if jitter is not None:
self.jitter = jitter
if packet_loss_max is not None:
self.packet_loss_max = packet_loss_max
if resolution is not None:
self.resolution = resolution
if frame is not None:
self.frame = frame
@property
def bitrate(self):
"""Gets the bitrate of this QosSendReceiveInfo.
็ ็, ๅไฝkbps๏ผไธๅซ้ๅผๅ่ญฆใๅฝqosType = audio/video/screen ๆถๆๆใ
:return: The bitrate of this QosSendReceiveInfo.
:rtype: list[QosDataNoThrElement]
"""
return self._bitrate
@bitrate.setter
def bitrate(self, bitrate):
"""Sets the bitrate of this QosSendReceiveInfo.
็ ็, ๅไฝkbps๏ผไธๅซ้ๅผๅ่ญฆใๅฝqosType = audio/video/screen ๆถๆๆใ
:param bitrate: The bitrate of this QosSendReceiveInfo.
:type: list[QosDataNoThrElement]
"""
self._bitrate = bitrate
@property
def latency(self):
"""Gets the latency of this QosSendReceiveInfo.
ๆถๅปถ๏ผๅไฝๆฏซ็ง, ๅซ้ๅผๅ่ญฆใๅฝqosType = audio/video/screen ๆถๆๆใ
:return: The latency of this QosSendReceiveInfo.
:rtype: list[QosDataElement]
"""
return self._latency
@latency.setter
def latency(self, latency):
"""Sets the latency of this QosSendReceiveInfo.
ๆถๅปถ๏ผๅไฝๆฏซ็ง, ๅซ้ๅผๅ่ญฆใๅฝqosType = audio/video/screen ๆถๆๆใ
:param latency: The latency of this QosSendReceiveInfo.
:type: list[QosDataElement]
"""
self._latency = latency
@property
def jitter(self):
"""Gets the jitter of this QosSendReceiveInfo.
ๆๅจ, ๅไฝๆฏซ็ง๏ผๅซ้ๅผๅ่ญฆใๅฝqosType = audio/video/screen ๆถๆๆใ
:return: The jitter of this QosSendReceiveInfo.
:rtype: list[QosDataElement]
"""
return self._jitter
@jitter.setter
def jitter(self, jitter):
"""Sets the jitter of this QosSendReceiveInfo.
ๆๅจ, ๅไฝๆฏซ็ง๏ผๅซ้ๅผๅ่ญฆใๅฝqosType = audio/video/screen ๆถๆๆใ
:param jitter: The jitter of this QosSendReceiveInfo.
:type: list[QosDataElement]
"""
self._jitter = jitter
@property
def packet_loss_max(self):
"""Gets the packet_loss_max of this QosSendReceiveInfo.
ๆๅคงไธขๅ
็, ๅไฝ็พๅๆฏ ๅซ้ๅผๅ่ญฆใๅฝqosType = audio/video/screen ๆถๆๆใ
:return: The packet_loss_max of this QosSendReceiveInfo.
:rtype: list[QosDataElement]
"""
return self._packet_loss_max
@packet_loss_max.setter
def packet_loss_max(self, packet_loss_max):
"""Sets the packet_loss_max of this QosSendReceiveInfo.
ๆๅคงไธขๅ
็, ๅไฝ็พๅๆฏ ๅซ้ๅผๅ่ญฆใๅฝqosType = audio/video/screen ๆถๆๆใ
:param packet_loss_max: The packet_loss_max of this QosSendReceiveInfo.
:type: list[QosDataElement]
"""
self._packet_loss_max = packet_loss_max
@property
def resolution(self):
"""Gets the resolution of this QosSendReceiveInfo.
ๅ่พจ็, ไธๅซ้ๅผๅ่ญฆใๅฝqosType = video/screen ๆถๆๆใ
:return: The resolution of this QosSendReceiveInfo.
:rtype: list[QosDataNoThrElement]
"""
return self._resolution
@resolution.setter
def resolution(self, resolution):
"""Sets the resolution of this QosSendReceiveInfo.
ๅ่พจ็, ไธๅซ้ๅผๅ่ญฆใๅฝqosType = video/screen ๆถๆๆใ
:param resolution: The resolution of this QosSendReceiveInfo.
:type: list[QosDataNoThrElement]
"""
self._resolution = resolution
@property
def frame(self):
"""Gets the frame of this QosSendReceiveInfo.
ๅธง็, ๅไฝfps๏ผไธๅซ้ๅผๅ่ญฆใๅฝqosType = video/screen ๆถๆๆใ
:return: The frame of this QosSendReceiveInfo.
:rtype: list[QosDataNoThrElement]
"""
return self._frame
@frame.setter
def frame(self, frame):
"""Sets the frame of this QosSendReceiveInfo.
ๅธง็, ๅไฝfps๏ผไธๅซ้ๅผๅ่ญฆใๅฝqosType = video/screen ๆถๆๆใ
:param frame: The frame of this QosSendReceiveInfo.
:type: list[QosDataNoThrElement]
"""
self._frame = frame
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, QosSendReceiveInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
db126458e075c35c3a9606d6dc1f54e18b85536e | d4442db5a7ab9db2b04fef640a9864f3fba54758 | /src/python/WMCore/RequestManager/RequestDB/Oracle/Group/GetGroupFromAssoc.py | 3549961c0b5eea9816bb43e00f98608494e976f0 | [] | no_license | stuartw/WMCore | fa25ff19ab5058a635d35d3c58a0ac56a3e079a1 | 38c39c43f7237fd316930839674ac9be3c0ee8cc | refs/heads/master | 2021-01-18T07:18:18.324604 | 2012-10-18T22:30:34 | 2012-10-18T22:30:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | from WMCore.RequestManager.RequestDB.MySQL.Group.GetGroupFromAssoc import GetGroupFromAssoc as GetGroupFromAssocMySQL
class GetGroupFromAssoc(GetGroupFromAssocMySQL):
pass | [
"metson@4525493e-7705-40b1-a816-d608a930855b"
] | metson@4525493e-7705-40b1-a816-d608a930855b |
dfcf388075f9499cfdaf3e385b9bec1af4308eb3 | 3aef4825c5f2366f2e551cdfa54b88c034b0b4f4 | /tutorials/2_tensorflow_old/matplotlibTUT/plt14_3d.py | d742a0cd4768afe2bb32c874c3cc31368aaf5fd1 | [
"MIT"
] | permissive | wull566/tensorflow_demo | 4a65cbe1bdda7430ab1c3883889501a62258d8a6 | c2c45050867cb056b8193eb53466d26b80b0ec13 | refs/heads/master | 2020-04-06T17:34:05.912164 | 2018-11-15T07:41:47 | 2018-11-15T07:41:48 | 157,665,187 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,233 | py | # View more 3_python 2_tensorflow_old on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
# 14 - 3d
"""
Please note, this script is for python3+.
If you are using python2+, please modify it accordingly.
Tutorial reference:
http://www.3_python-course.eu/matplotlib_multiple_figures.php
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
# X, Y value
X = np.arange(-4, 4, 0.25)
Y = np.arange(-4, 4, 0.25)
X, Y = np.meshgrid(X, Y)
R = np.sqrt(X ** 2 + Y ** 2)
# height value
Z = np.sin(R)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=plt.get_cmap('rainbow'))
"""
============= ================================================
Argument Description
============= ================================================
*X*, *Y*, *Z* Data values as 2D arrays
*rstride* Array row stride (step size), defaults to 10
*cstride* Array column stride (step size), defaults to 10
*color* Color of the surface patches
*cmap* A colormap for the surface patches.
*facecolors* Face colors for the individual patches
*norm* An instance of Normalize to map values to colors
*vmin* Minimum value to map
*vmax* Maximum value to map
*shade* Whether to shade the facecolors
============= ================================================
"""
# I think this is different from plt12_contours
ax.contourf(X, Y, Z, zdir='z', offset=-2, cmap=plt.get_cmap('rainbow'))
"""
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the filled contour
on this position in plane normal to zdir
========== ================================================
"""
ax.set_zlim(-2, 2)
plt.show()
| [
"vicleo566@163.com"
] | vicleo566@163.com |
09948ecbf8dce75fc191482b02c52f34414e2dd2 | eea1c66c80784d4aefeb0d5fd2e186f9a3b1ac6e | /atcoder/AGC/agc037/agc037_a.py | 0a81674be640dbd4e2a9a68795fce2f0fa83a027 | [] | no_license | reo11/AtCoder | 4e99d6f40d8befe264761e3b8c33d3a6b7ba0fe9 | 69c6d67f05cb9190d8fb07204488cd7ce4d0bed2 | refs/heads/master | 2023-08-28T10:54:50.859288 | 2023-08-22T18:52:47 | 2023-08-22T18:52:47 | 162,085,118 | 4 | 0 | null | 2023-07-01T14:17:28 | 2018-12-17T06:31:10 | Python | UTF-8 | Python | false | false | 205 | py | s = str(input())
count = 1
pre = s[0]
now = ""
for i in range(1, len(s)):
now += s[i]
if pre == now:
continue
else:
count += 1
pre = now
now = ""
print(count)
| [
"reohirao116@gmail.com"
] | reohirao116@gmail.com |
3948a716848ac63fa942f4ff68789df1d13eec70 | 342ec51a35eef43fe1bafa31bdf8f0c9ef956cd9 | /comlib.py | 2ef51158ec9e1743618a2ad4d19ab834c1795910 | [
"MIT"
] | permissive | Strangemother/python-simple-tts-stt | a60ff7ce4e4b9dd58a3a906c7a8c266b0dc6bb2a | e7ac38e795b32f55367a58107d86bf04ea906f0c | refs/heads/master | 2020-03-21T10:18:41.407181 | 2019-01-02T07:32:24 | 2019-01-02T07:32:24 | 138,444,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | import speech
import comtypes.client # Importing comtypes.client will make the gen subpackage
import os
try:
assert(os.name == 'nt') # Checks for Windows
except:
raise RuntimeError("Windows is required.")
try:
from comtypes.gen import SpeechLib # comtypes
except ImportError:
# Generate the SpeechLib lib and any associated files
engine = comtypes.client.CreateObject("SAPI.SpVoice")
stream = comtypes.client.CreateObject("SAPI.SpFileStream")
from comtypes.gen import SpeechLib
| [
"jay@strangemother.com"
] | jay@strangemother.com |
5c82f1295336f62ee18d4ca09a43169108808919 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2007-EOL/applications/hardware/bluez-utils/actions.py | 998201a4580feb5fd60d2b947e9349576906fa8d | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2008 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.configure("--enable-all \
--enable-pie \
--disable-initscripts \
--disable-sdpd \
--disable-hidd \
--localstatedir=/var")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
# move bluetooth rules into correct place
pisitools.domove("/etc/udev/bluetooth.rules", "/etc/udev/rules.d", "40-bluetooth.rules")
pisitools.dodoc("AUTHORS", "ChangeLog", "README")
# optional bluetooth utils
pisitools.dobin("daemon/passkey-agent")
pisitools.dobin("daemon/auth-agent")
pisitools.dosbin("tools/hcisecfilter")
pisitools.dosbin("tools/ppporc")
# bluez test
pisitools.dobin("test/hsmicro")
pisitools.dobin("test/hsplay")
pisitools.dobin("test/hstest")
pisitools.dobin("test/attest")
pisitools.dobin("test/apitest")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
07896960fbc3c364f8fa514f19481ea4d06edca5 | ae10b60cb92a69146bfb05ef5dde735a0aa45d4b | /examples/New Functions/Example distance 1.py | 2d8b3d0afa43d6027b21ee429f3c43c74def211d | [
"MIT"
] | permissive | kantel/nodebox-pyobjc | 471cea4c5d7f1c239c490323186458a74edcc214 | 068ba64c87d607522a240ab60c3ba14f869f6222 | refs/heads/master | 2021-08-14T18:32:57.995445 | 2017-11-16T13:42:23 | 2017-11-16T13:42:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | import math
def params(val, name):
global x, y
# global scope needs updating.
val = int(round(val,0))
if name == "x":
x = int(val)
else:
y = int(val)
triangle(x, y)
def triangle(x, y):
x0, y0 = 100, 160
x1, y1 = x0 + x, y0
x2, y2 = x0, y0 + y
# draw a triangle
stroke(0.2)
nofill()
strokewidth(2)
autoclosepath(True)
beginpath(x0, y0)
lineto(x1, y1)
lineto(x2, y2)
endpath()
# labels
fill(0)
lx,ly = x0 + (x/2.0), y0 - 10
text("x", lx, ly)
lx,ly = x0 - 15, y0 + (y / 2.0)
text("y", lx, ly)
lx,ly = x0, y0 -130
text("x = %i" % x, lx, ly)
lx,ly = x0, y0 -100
text("y = %i" % y, lx, ly)
d = round(distance(x1, y1, x2, y2), 4)
lx,ly = x0, y0 -70
text("hypotenuse โ %.4f" % d, lx, ly)
var("x", NUMBER, default=50, min=10, max=300, handler=params)
var("y", NUMBER, default=50, min=10, max=300, handler=params)
triangle(x,y)
| [
"karstenwo@web.de"
] | karstenwo@web.de |
9216428587f42a0e67dbf4b9393da0b0e71f9cdc | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/apimanagement/v20201201/get_content_item.py | 839b7064a377f6a60900dac782fc801a0b858684 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 3,523 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetContentItemResult',
'AwaitableGetContentItemResult',
'get_content_item',
]
@pulumi.output_type
class GetContentItemResult:
"""
Content type contract details.
"""
def __init__(__self__, id=None, name=None, properties=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
Properties of the content item.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
class AwaitableGetContentItemResult(GetContentItemResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetContentItemResult(
id=self.id,
name=self.name,
properties=self.properties,
type=self.type)
def get_content_item(content_item_id: Optional[str] = None,
content_type_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetContentItemResult:
"""
Content type contract details.
:param str content_item_id: Content item identifier.
:param str content_type_id: Content type identifier.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['contentItemId'] = content_item_id
__args__['contentTypeId'] = content_type_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20201201:getContentItem', __args__, opts=opts, typ=GetContentItemResult).value
return AwaitableGetContentItemResult(
id=__ret__.id,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
| [
"noreply@github.com"
] | morrell.noreply@github.com |
c23e4d616602f79d9d239eacb7e8558f1639d2ff | e59602b7e17fafff70240700138bbe54ced28739 | /PythonSimpleVisualiser.py | 152c3204cf353fff3ddeab2c45ec43bdd5770b29 | [] | no_license | TestSubjector/CompGeometry2 | ddc3dae8517e45d419e7057f2d905ad5d95d67e7 | 3b7f30302c837d883132290789cd84305f0e0b10 | refs/heads/master | 2022-03-27T13:37:46.115463 | 2019-04-03T18:18:48 | 2019-04-03T18:18:48 | 177,393,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,173 | py | ## Takes input from a text file and plots all points.
## Connects the points in order read and connects last point to first one.
import argparse
import matplotlib.pyplot as plt
def getArgParser():
''' Returns ArgParser object to handle command-line args'''
parser = argparse.ArgumentParser()
parser.add_argument("filepath",default="./input.ch",nargs="?",help="path of the input file")
return parser
if __name__ == '__main__':
parser = getArgParser()
args = parser.parse_args()
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
inputfile = open(args.filepath,"r")
storeType = inputfile.readline();
# print(storeType)
numberOfPoints, ch_indices = [int(a) for a in inputfile.readline().split()]
# print(numberOfPoints)
# print(ch_indices)
points = []
for i in range(numberOfPoints):
x,y,z = inputfile.readline().split()
points.append([float(x), float(y)])
if storeType == "CHG\n" or storeType == "CHJ\n":
ch_indices = [element for element in map(int,inputfile.readline().split())]
for point in points:
plt.plot(point[0],point[1],'bo')
for i in range(len(ch_indices)-1):
plt.plot([points[ch_indices[i]][0],points[ch_indices[i+1]][0]],[points[ch_indices[i]][1],points[ch_indices[i+1]][1]],'g-')
plt.plot([points[ch_indices[0]][0],points[ch_indices[len(ch_indices)-1]][0]],[points[ch_indices[0]][1],points[ch_indices[len(ch_indices)-1]][1]],'g-')
plt.show()
if storeType == "CHK\n":
ch_points = [element for element in map(str,inputfile.readline().split())]
ch_indices = []
for item in ch_points:
x,y = item.split(",")
ch_indices.append([float(x), float(y)])
for point in points:
plt.plot(point[0],point[1],'bo')
for i in range(len(ch_indices)-1):
plt.plot([ch_indices[i][0], ch_indices[i+1][0]],[ch_indices[i][1], ch_indices[i+1][1]],'g-')
plt.plot([ch_indices[0][0], ch_indices[len(ch_indices)-1][0]], [ch_indices[0][1], ch_indices[len(ch_indices)-1][1]],'g-')
plt.show()
inputfile.close()
| [
"f2015845@hyderabad.bits-pilani.ac.in"
] | f2015845@hyderabad.bits-pilani.ac.in |
95621f79a92dc69d3ca8f9ac1482bf81b28cb8fa | 9cd180fc7594eb018c41f0bf0b54548741fd33ba | /sdk/python/pulumi_azure_nextgen/web/v20181101/get_web_app_function.py | f035db040d58335774ba7aec281ac9825034db4b | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | MisinformedDNA/pulumi-azure-nextgen | c71971359450d03f13a53645171f621e200fe82d | f0022686b655c2b0744a9f47915aadaa183eed3b | refs/heads/master | 2022-12-17T22:27:37.916546 | 2020-09-28T16:03:59 | 2020-09-28T16:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,319 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetWebAppFunctionResult',
'AwaitableGetWebAppFunctionResult',
'get_web_app_function',
]
@pulumi.output_type
class GetWebAppFunctionResult:
"""
Web Job Information.
"""
def __init__(__self__, config=None, config_href=None, files=None, function_app_id=None, href=None, kind=None, name=None, script_href=None, script_root_path_href=None, secrets_file_href=None, test_data=None, type=None):
if config and not isinstance(config, dict):
raise TypeError("Expected argument 'config' to be a dict")
pulumi.set(__self__, "config", config)
if config_href and not isinstance(config_href, str):
raise TypeError("Expected argument 'config_href' to be a str")
pulumi.set(__self__, "config_href", config_href)
if files and not isinstance(files, dict):
raise TypeError("Expected argument 'files' to be a dict")
pulumi.set(__self__, "files", files)
if function_app_id and not isinstance(function_app_id, str):
raise TypeError("Expected argument 'function_app_id' to be a str")
pulumi.set(__self__, "function_app_id", function_app_id)
if href and not isinstance(href, str):
raise TypeError("Expected argument 'href' to be a str")
pulumi.set(__self__, "href", href)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if script_href and not isinstance(script_href, str):
raise TypeError("Expected argument 'script_href' to be a str")
pulumi.set(__self__, "script_href", script_href)
if script_root_path_href and not isinstance(script_root_path_href, str):
raise TypeError("Expected argument 'script_root_path_href' to be a str")
pulumi.set(__self__, "script_root_path_href", script_root_path_href)
if secrets_file_href and not isinstance(secrets_file_href, str):
raise TypeError("Expected argument 'secrets_file_href' to be a str")
pulumi.set(__self__, "secrets_file_href", secrets_file_href)
if test_data and not isinstance(test_data, str):
raise TypeError("Expected argument 'test_data' to be a str")
pulumi.set(__self__, "test_data", test_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def config(self) -> Optional[Mapping[str, Any]]:
"""
Config information.
"""
return pulumi.get(self, "config")
@property
@pulumi.getter(name="configHref")
def config_href(self) -> Optional[str]:
"""
Config URI.
"""
return pulumi.get(self, "config_href")
@property
@pulumi.getter
def files(self) -> Optional[Mapping[str, str]]:
"""
File list.
"""
return pulumi.get(self, "files")
@property
@pulumi.getter(name="functionAppId")
def function_app_id(self) -> Optional[str]:
"""
Function App ID.
"""
return pulumi.get(self, "function_app_id")
@property
@pulumi.getter
def href(self) -> Optional[str]:
"""
Function URI.
"""
return pulumi.get(self, "href")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="scriptHref")
def script_href(self) -> Optional[str]:
"""
Script URI.
"""
return pulumi.get(self, "script_href")
@property
@pulumi.getter(name="scriptRootPathHref")
def script_root_path_href(self) -> Optional[str]:
"""
Script root path URI.
"""
return pulumi.get(self, "script_root_path_href")
@property
@pulumi.getter(name="secretsFileHref")
def secrets_file_href(self) -> Optional[str]:
"""
Secrets file URI.
"""
return pulumi.get(self, "secrets_file_href")
@property
@pulumi.getter(name="testData")
def test_data(self) -> Optional[str]:
"""
Test data used when testing via the Azure Portal.
"""
return pulumi.get(self, "test_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetWebAppFunctionResult(GetWebAppFunctionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebAppFunctionResult(
config=self.config,
config_href=self.config_href,
files=self.files,
function_app_id=self.function_app_id,
href=self.href,
kind=self.kind,
name=self.name,
script_href=self.script_href,
script_root_path_href=self.script_root_path_href,
secrets_file_href=self.secrets_file_href,
test_data=self.test_data,
type=self.type)
def get_web_app_function(function_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebAppFunctionResult:
"""
Use this data source to access information about an existing resource.
:param str function_name: Function name.
:param str name: Site name.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['functionName'] = function_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:web/v20181101:getWebAppFunction', __args__, opts=opts, typ=GetWebAppFunctionResult).value
return AwaitableGetWebAppFunctionResult(
config=__ret__.config,
config_href=__ret__.config_href,
files=__ret__.files,
function_app_id=__ret__.function_app_id,
href=__ret__.href,
kind=__ret__.kind,
name=__ret__.name,
script_href=__ret__.script_href,
script_root_path_href=__ret__.script_root_path_href,
secrets_file_href=__ret__.secrets_file_href,
test_data=__ret__.test_data,
type=__ret__.type)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
5fbbbba6af3dfcfca89cb54da6713158b0a6ecbd | 4e60e8a46354bef6e851e77d8df4964d35f5e53f | /main.py | ae35090fa53b38726ed25a70d0f2454551d2dee5 | [] | no_license | cq146637/DockerManagerPlatform | cbae4154ad66eac01772ddd902d7f70b62a2d856 | 9c509fb8dca6633ed3afdc92d4e6491b5d13e322 | refs/heads/master | 2021-04-09T13:58:14.117752 | 2018-03-19T13:41:04 | 2018-03-19T13:41:04 | 125,712,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | # -*- coding: utf-8 -*-
__author__ = 'CQ'
import os
import sys
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from settings import settings
from url.urls import urls
from tornado.options import define, options, parse_command_line
define("port", default=8888, help="run on the given port", type=int)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
if __name__ == "__main__":
app = tornado.web.Application(
handlers=urls,
**settings,
)
parse_command_line()
print('The service is already running on port %s ...' % options.port)
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| [
"1016025625@qq.com"
] | 1016025625@qq.com |
080f45148a5f23811232fe76aaf7c83e197d9bfb | e8eb2cecee1ebc47455917fa11a58e7b5a912b74 | /python_lessons/python_advanced_02_tic_tac_toe/app/model/game.py | c5b61a7de11ddc51661518c76607cd7bc169970c | [] | no_license | cyr1z/python_education | ad0f9e116536a5583a12e05efe41ee173639ea9c | 37b2edbccf6f96c59c14cabf4bf749a3ec0f503d | refs/heads/main | 2023-06-08T10:24:26.217582 | 2021-06-27T17:02:05 | 2021-06-27T17:02:05 | 359,467,674 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | """
Game module.
"""
from app.model.table import GameTable
from app.view.table_view import TableView
class Game:
"""
Game play class
"""
def __init__(self, numbers_map, player1, player2):
self.table = GameTable(numbers_map)
self.players = []
self.players.append(player1)
self.players.append(player2)
def play_step(self, player):
"""
play step for player
:param player: Player
:return:
"""
print(TableView(**self.table.choices))
number = player.get_choice(self.table)
return self.table.choice_handler(number, player)
def iteration(self) -> dict:
"""
running game step for player
:return: bool
"""
request = {}
for player in self.players:
request = self.play_step(player)
if request:
break
return request
| [
"cyr@zolotarev.pp.ua"
] | cyr@zolotarev.pp.ua |
093b8ea7593c9c1b921e251e644de80e43c8a9f9 | ae12996324ff89489ded4c10163f7ff9919d080b | /LeetCodePython/DesignaTextEditor.py | 3f9a03e8ddcfd31fcc7ef5243000df664c94c4e6 | [] | no_license | DeanHe/Practice | 31f1f2522f3e7a35dc57f6c1ae74487ad044e2df | 3230cda09ad345f71bb1537cb66124ec051de3a5 | refs/heads/master | 2023-07-05T20:31:33.033409 | 2023-07-01T18:02:32 | 2023-07-01T18:02:32 | 149,399,927 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,725 | py | """
Design a text editor with a cursor that can do the following:
Add text to where the cursor is.
Delete text from where the cursor is (simulating the backspace key).
Move the cursor either left or right.
When deleting text, only characters to the left of the cursor will be deleted. The cursor will also remain within the actual text and cannot be moved beyond it. More formally, we have that 0 <= cursor.position <= currentText.length always holds.
Implement the TextEditor class:
TextEditor() Initializes the object with empty text.
void addText(string text) Appends text to where the cursor is. The cursor ends to the right of text.
int deleteText(int k) Deletes k characters to the left of the cursor. Returns the number of characters actually deleted.
string cursorLeft(int k) Moves the cursor to the left k times. Returns the last min(10, len) characters to the left of the cursor, where len is the number of characters to the left of the cursor.
string cursorRight(int k) Moves the cursor to the right k times. Returns the last min(10, len) characters to the left of the cursor, where len is the number of characters to the left of the cursor.
Example 1:
Input
["TextEditor", "addText", "deleteText", "addText", "cursorRight", "cursorLeft", "deleteText", "cursorLeft", "cursorRight"]
[[], ["leetcode"], [4], ["practice"], [3], [8], [10], [2], [6]]
Output
[null, null, 4, null, "etpractice", "leet", 4, "", "practi"]
Explanation
TextEditor textEditor = new TextEditor(); // The current text is "|". (The '|' character represents the cursor)
textEditor.addText("leetcode"); // The current text is "leetcode|".
textEditor.deleteText(4); // return 4
// The current text is "leet|".
// 4 characters were deleted.
textEditor.addText("practice"); // The current text is "leetpractice|".
textEditor.cursorRight(3); // return "etpractice"
// The current text is "leetpractice|".
// The cursor cannot be moved beyond the actual text and thus did not move.
// "etpractice" is the last 10 characters to the left of the cursor.
textEditor.cursorLeft(8); // return "leet"
// The current text is "leet|practice".
// "leet" is the last min(10, 4) = 4 characters to the left of the cursor.
textEditor.deleteText(10); // return 4
// The current text is "|practice".
// Only 4 characters were deleted.
textEditor.cursorLeft(2); // return ""
// The current text is "|practice".
// The cursor cannot be moved beyond the actual text and thus did not move.
// "" is the last min(10, 0) = 0 characters to the left of the cursor.
textEditor.cursorRight(6); // return "practi"
// The current text is "practi|ce".
// "practi" is the last min(10, 6) = 6 characters to the left of the cursor.
Constraints:
1 <= text.length, k <= 40
text consists of lowercase English letters.
At most 2 * 104 calls in total will be made to addText, deleteText, cursorLeft and cursorRight.
hint:
1 Making changes in the middle of some data structures is generally harder than changing the front/back of the same data structure.
2 Can you partition your data structure (text with cursor) into two parts, such that each part changes only near its ends?
3 Can you think of a data structure that supports efficient removals/additions to the front/back?
4 Try to solve the problem with two deques by maintaining the prefix and the suffix separately.
"""
class TextEditor:
def __init__(self):
self.s = ""
self.cursor = 0
def addText(self, text: str) -> None:
self.s = self.s[:self.cursor] + text + self.s[self.cursor:]
self.cursor += len(text)
def deleteText(self, k: int) -> int:
cur = max(0, self.cursor - k)
delete_cnt = k if self.cursor - k >= 0 else self.cursor
self.s = self.s[:cur] + self.s[self.cursor:]
self.cursor = cur
return delete_cnt
def cursorLeft(self, k: int) -> str:
self.cursor = max(0, self.cursor - k)
start = max(0, self.cursor - 10)
return self.s[start:self.cursor]
def cursorRight(self, k: int) -> str:
self.cursor = min(len(self.s), self.cursor + k)
start = max(0, self.cursor - 10)
return self.s[start:self.cursor]
# Your TextEditor object will be instantiated and called as such:
# obj = TextEditor()
# obj.addText(text)
# param_2 = obj.deleteText(k)
# param_3 = obj.cursorLeft(k)
# param_4 = obj.cursorRight(k) | [
"tengda.he@gmail.com"
] | tengda.he@gmail.com |
983d4615eb75145cbb024178a9271475c82399be | 29a38674cd5cda4880d539ee235ea118b750571e | /tests/flit_cli/flit_update/tst_badconfig.py | 495501f01a3766375523bcada2ceebbb585b41c1 | [] | no_license | hbrunie/FLiT | 1f0be509f776b86380b215b30e7b365921dd3425 | 836bc3cd2befebc9cfe20926fd855b1f5d29ae17 | refs/heads/master | 2020-07-09T20:26:40.644089 | 2019-04-10T18:47:25 | 2019-04-10T18:47:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,702 | py | # -- LICENSE BEGIN --
#
# Copyright (c) 2015-2018, Lawrence Livermore National Security, LLC.
#
# Produced at the Lawrence Livermore National Laboratory
#
# Written by
# Michael Bentley (mikebentley15@gmail.com),
# Geof Sawaya (fredricflinstone@gmail.com),
# and Ian Briggs (ian.briggs@utah.edu)
# under the direction of
# Ganesh Gopalakrishnan
# and Dong H. Ahn.
#
# LLNL-CODE-743137
#
# All rights reserved.
#
# This file is part of FLiT. For details, see
# https://pruners.github.io/flit
# Please also read
# https://github.com/PRUNERS/FLiT/blob/master/LICENSE
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the disclaimer below.
#
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the disclaimer
# (as noted below) in the documentation and/or other materials
# provided with the distribution.
#
# - Neither the name of the LLNS/LLNL nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
# SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
# Additional BSD Notice
#
# 1. This notice is required to be provided under our contract
# with the U.S. Department of Energy (DOE). This work was
# produced at Lawrence Livermore National Laboratory under
# Contract No. DE-AC52-07NA27344 with the DOE.
#
# 2. Neither the United States Government nor Lawrence Livermore
# National Security, LLC nor any of their employees, makes any
# warranty, express or implied, or assumes any liability or
# responsibility for the accuracy, completeness, or usefulness of
# any information, apparatus, product, or process disclosed, or
# represents that its use would not infringe privately-owned
# rights.
#
# 3. Also, reference herein to any specific commercial products,
# process, or services by trade name, trademark, manufacturer or
# otherwise does not necessarily constitute or imply its
# endorsement, recommendation, or favoring by the United States
# Government or Lawrence Livermore National Security, LLC. The
# views and opinions of authors expressed herein do not
# necessarily state or reflect those of the United States
# Government or Lawrence Livermore National Security, LLC, and
# shall not be used for advertising or product endorsement
# purposes.
#
# -- LICENSE END --
'''
Tests error cases in the configuration file, such as specifying more than one of a certain type of compiler.
>>> from io import StringIO
>>> import os
>>> import shutil
>>> from tst_common_funcs import runconfig
>>> configstr = \\
... '[dev_build]\\n' \\
... 'compiler_name = \\'name-does-not-exist\\'\\n'
>>> runconfig(configstr)
Traceback (most recent call last):
...
AssertionError: Compiler name name-does-not-exist not found
>>> configstr = \\
... '[ground_truth]\\n' \\
... 'compiler_name = \\'another-name-that-does-not-exist\\'\\n'
>>> runconfig(configstr)
Traceback (most recent call last):
...
AssertionError: Compiler name another-name-that-does-not-exist not found
>>> runconfig('[compiler]\\n')
Traceback (most recent call last):
...
tst_common_funcs.UpdateTestError: Failed to update Makefile: Error: flit-config.toml improperly configured, needs [[compiler]] section
>>> runconfig('[[compiler]]\\n')
Traceback (most recent call last):
...
tst_common_funcs.UpdateTestError: Failed to update Makefile: Error: flit-config.toml: compiler "{}" is missing the "name" field
>>> runconfig('[[compiler]]\\n'
... 'name = \\'hello\\'\\n')
Traceback (most recent call last):
...
tst_common_funcs.UpdateTestError: Failed to update Makefile: Error: flit-config.toml: compiler "{'name': 'hello'}" is missing the "type" field
>>> runconfig('[[compiler]]\\n'
... 'name = \\'hello\\'\\n'
... 'type = \\'gcc\\'\\n') # doctest:+ELLIPSIS
Traceback (most recent call last):
...
tst_common_funcs.UpdateTestError: Failed to update Makefile: Error: flit-config.toml: compiler "{...}" is missing the "binary" field
>>> runconfig('[[compiler]]\\n'
... 'binary = \\'my-special-compiler\\'\\n'
... 'name = \\'hello\\'\\n'
... 'type = \\'my-unsupported-type\\'\\n')
Traceback (most recent call last):
...
tst_common_funcs.UpdateTestError: Failed to update Makefile: Error: flit-config.toml: unsupported compiler type "my-unsupported-type"
>>> runconfig('[[compiler]]\\n'
... 'binary = \\'gcc\\'\\n'
... 'name = \\'gcc\\'\\n'
... 'type = \\'gcc\\'\\n'
... '\\n'
... '[[compiler]]\\n'
... 'binary = \\'gcc-2\\'\\n'
... 'name = \\'gcc-2\\'\\n'
... 'type = \\'gcc\\'\\n'
... )
Traceback (most recent call last):
...
tst_common_funcs.UpdateTestError: Failed to update Makefile: Error: flit-config.toml: cannot have multiple compilers of the same type (gcc)
>>> runconfig('[[compiler]]\\n'
... 'binary = \\'gcc\\'\\n'
... 'name = \\'gcc\\'\\n'
... 'type = \\'gcc\\'\\n'
... '\\n'
... '[[compiler]]\\n'
... 'binary = \\'gcc-2\\'\\n'
... 'name = \\'gcc\\'\\n'
... 'type = \\'clang\\'\\n'
... )
Traceback (most recent call last):
...
tst_common_funcs.UpdateTestError: Failed to update Makefile: Error: flit-config.toml: cannot have multiple compilers of the same name (gcc)
'''
# Test setup before the docstring is run.
import sys
before_path = sys.path[:]
sys.path.append('../..')
import test_harness as th
sys.path = before_path
if __name__ == '__main__':
from doctest import testmod
failures, tests = testmod()
sys.exit(failures)
| [
"mikebentley15@gmail.com"
] | mikebentley15@gmail.com |
b255fdf34e22a4165490cdca3b6a1c6e64f12f1d | 8d2a124753905fb0455f624b7c76792c32fac070 | /pytnon-month01/month01-shibw-notes/day08-shibw/demo02-ๅฝๆฐๅฎๅไผ ้ๆนๅผ.py | 8552c39f1fd88ff02da5f27af317be220d475eb2 | [] | no_license | Jeremy277/exercise | f38e4f19aae074c804d265f6a1c49709fd2cae15 | a72dd82eb2424e4ae18e2f3e9cc66fc4762ec8fa | refs/heads/master | 2020-07-27T09:14:00.286145 | 2019-09-17T11:31:44 | 2019-09-17T11:31:44 | 209,041,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py | '''
ๅฝๆฐไผ ๅ
ๅฎๅไผ ้ๆนๅผ
'''
def fun01(a,b,c):#ๅฝขๅ a b c
print(a)
print(b)
print(c)
#ไฝ็ฝฎๅฎๅ ๅฎๅๆฏๆ นๆฎไฝ็ฝฎไธๅฝขๅๅฏนๅบ็
#ๅฆๆๅฎๅไฝ็ฝฎๅ็ๆนๅ ไผๅฝฑๅๅฝๆฐ็ปๆ
# fun01(10,20,30)
# fun01(30,10,20)
#ๅบๅๅฎๅ ็จ*ๅฐๅบๅไธญ็ๅ
็ด ๆๅผ็ถๅไธๅฝขๅไพๆฌกๅฏนๅบ
#ๅบๅ ๅญ็ฌฆไธฒ ๅ่กจ ๅ
็ป
list01 = [10,20,30]
fun01(*list01)
# str01 = 'abcd'
# fun01(*str01)#ๆฅ้
#ๅ
ณ้ฎๅญๅฎๅ
#ๅฎๅ็ๅผไธๅฝขๅ็ๅ็งฐๅฏนๅบ
# fun01(a=10,b=20,c=30)
#ไฝฟ็จๅ
ณ้ฎๅญๅฎๅ ไผ ๅ็้กบๅบๅฏไปฅไธๅบๅฎ
# fun01(c=30,a=10,b=20)
# fun01(a=10,b=20,d=40)#้่ฏฏ
#ๅญๅ
ธๅฎๅ ไฝฟ็จ**ๅฐๅญๅ
ธๆๅผ๏ผๅญๅ
ธไธญ็้ฎๅผๅฏนไปฅๅ
ณ้ฎๅญ็ๅฝขๅผ่ฟ่กๅฏนๅบ๏ผไผ ้ๅผ
dict01 = {'a':10,'b':20,'c':30}
# a = 10 , b = 20 ,c = 30
fun01(**dict01)
#ๅญๅ
ธไธญ็้ฎ็ๅๅญ่ฆไธๅฝขๅๅๅฏนๅบ
# dict01 = {'a':10,'e':20,'d':30}
# fun01(**dict01)#ๆฅ้
# ๆททๅไฝฟ็จ
# ่ฏญๆณ่งๅฎ ๅ
ๅไฝ็ฝฎๅๆฐ ๅๅๅ
ณ้ฎๅญๅๆฐ
# fun01(10,20,c=30)
# fun01(c=30,b=20,10)#ๆฅ้
# fun01(10,c=30,b=20)
| [
"13572093824@163.com"
] | 13572093824@163.com |
4153e5a0a7053f1238faf6b9925f4b00dfa351d3 | 3f4f2bb867bf46818802c87f2f321a593f68aa90 | /smile/bin/cftp | f013e4cd473d18741239a41ee15b8ee6fe9ecd80 | [] | no_license | bopopescu/Dentist | 56f5d3af4dc7464544fbfc73773c7f21a825212d | 0122a91c1f0d3d9da125234a8758dea802cd38f0 | refs/heads/master | 2022-11-23T12:42:23.434740 | 2016-09-19T15:42:36 | 2016-09-19T15:42:36 | 282,608,405 | 0 | 0 | null | 2020-07-26T08:30:16 | 2020-07-26T08:30:16 | null | UTF-8 | Python | false | false | 358 | #!/SHARED-THINGS/ONGOING/We.smile/smile/bin/python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import sys, os
extra = os.path.dirname(os.path.dirname(sys.argv[0]))
sys.path.insert(0, extra)
try:
import _preamble
except ImportError:
sys.exc_clear()
sys.path.remove(extra)
from twisted.conch.scripts.cftp import run
run()
| [
"jamaalaraheem@gmail.com"
] | jamaalaraheem@gmail.com | |
ee3a35c8ea73fb5fd4cbd78944d7dd7318dfa7b2 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/pydev/pydev_tests_python/resources/_debugger_case_set_next_statement.py | 145f36d596c3dbca63ea3726dfa41035f960a000 | [
"EPL-1.0",
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 169 | py | def method():
a = 1
print('call %s' % (a,))
a = 2
print('call %s' % (a,))
a = 3
if __name__ == '__main__':
method()
print('TEST SUCEEDED!')
| [
"Elizaveta.Shashkova@jetbrains.com"
] | Elizaveta.Shashkova@jetbrains.com |
1da139e0f1926e0aebc7e93d592604dcfb4edf72 | 8d920a35fda0ba351a6fb5e7d6cb2b570d6f1ec6 | /grp_ejecucion_presupuestal/__openerp__.py | 21ef1b0627f49b703a741c5e18df87a2f6e4ca67 | [] | no_license | suningwz/odoo-coreuy | afeb661a1c6bd16e7804f2bd7df9ebe9dda7bab8 | d723860324e3d914a0a44bac14dd83eceefc96fe | refs/heads/master | 2020-09-14T22:43:17.884212 | 2019-07-03T13:48:50 | 2019-07-03T13:48:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,555 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Enterprise Management Solution
# GRP Estado Uruguay
# Copyright (C) 2017 Quanam (ATEL SA., Uruguay)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'GRP - Ejecuciรณn presupuestal SIIF',
'version': '1.0',
'author': 'Quanam',
'website': 'www.quanam.com',
'category': '',
'images': [],
'depends': ['grp_factura_siif'],
'description': """
GRP - Vista ejecuciรณn presupuestal SIIF aรฑos futuros
""",
'demo': [],
'data': [
'security/ir.model.access.csv',
'report/grp_ejecucion_presupuestal_siif_view.xml',
'report/grp_ejecucion_presupuestal_siif_documentos_view.xml',
],
'installable': True,
'auto_install': False,
}
| [
"lcontreras@sofis.com.uy"
] | lcontreras@sofis.com.uy |
cf72b166ab3a1fcf4523e0f20acf976d34f5e402 | ab8187626aa68c1f92301db78e9f8b0c4b088554 | /TwoPointer/75_h.py | a8a07b4bde84c80b9ccb625d2074edf0ebb7ed68 | [] | no_license | khj68/algorithm | 2818f87671019f9f2305ec761fd226e737f12025 | efebe142b9b52e966e0436be3b87fb32b4f7ea32 | refs/heads/master | 2023-04-25T02:33:13.403943 | 2021-05-04T03:09:38 | 2021-05-04T03:09:38 | 287,733,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
zeroP, twoP = 0, len(nums)-1
i = 0
while i <= twoP:
try:
while nums[zeroP] == 0: zeroP += 1
while nums[twoP] == 2: twoP -= 1
except:
print('error')
return
# print(zeroP, twoP)
if nums[i] == 0 and i > zeroP:
nums[i], nums[zeroP] = nums[zeroP], nums[i]
elif nums[i] == 2 and i < twoP:
nums[i], nums[twoP] = nums[twoP], nums[i]
else:
i += 1
| [
"maga40@naver.com"
] | maga40@naver.com |
51b14fca6562f512f53b9bd7533a151120bba916 | 128090f08a541eaf52a39bd811147e16fbcd2ef5 | /certificate/hooks.py | 9fd93618a5e0d2acdf2aa4969765c66f2e5bb01a | [
"MIT"
] | permissive | hrgadeha/certificate | e92d420773d2bdfafa641fb1239a38f21db54ee4 | b742679f0002f63a6afd4950b9f20903f9c8dc4b | refs/heads/master | 2020-03-31T12:59:00.530854 | 2018-10-23T07:13:23 | 2018-10-23T07:13:23 | 152,236,987 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,849 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "certificate"
app_title = "Certificate"
app_publisher = "Hardik Gadesha"
app_description = "Custom Certificate for Employee and Company"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "hardikgadesha@gmail.com"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/certificate/css/certificate.css"
# app_include_js = "/assets/certificate/js/certificate.js"
# include js, css files in header of web template
# web_include_css = "/assets/certificate/css/certificate.css"
# web_include_js = "/assets/certificate/js/certificate.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "certificate.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "certificate.install.before_install"
# after_install = "certificate.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "certificate.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
scheduler_events = {
"cron": {
"0 0 * * *": [
"certificate.certificate.doctype.certificate.certificate.emp_cert_mail",
"certificate.certificate.doctype.certificate.certificate.company_cert_mail"
]
}
}
# Testing
# -------
# before_tests = "certificate.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "certificate.event.get_events"
# }
| [
"you@example.com"
] | you@example.com |
e69e9f01a37105c49a7d07b7d0f7bf4f565436b7 | 8e3a02a5e104a14a1aa3ba3ba0f05596a9f73757 | /examples/test_gevent.py | 4789c8a0d20458c547189e187018ec4dcc5f1168 | [
"MIT"
] | permissive | jengdal/restkit | 9f114f0f7ded7217cb0c9d405dd1af469c9a918a | e32ff0c3d72415c998353644313bbc02805faa6c | refs/heads/master | 2021-01-18T06:02:55.463294 | 2012-07-19T10:39:32 | 2012-07-19T10:39:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | import timeit
from gevent import monkey; monkey.patch_all()
import gevent
from restkit import *
from restkit.conn import Connection
from socketpool import ConnectionPool
#set_logging("debug")
pool = ConnectionPool(factory=Connection, backend="gevent")
urls = [
"http://yahoo.fr",
"http://google.com",
"http://friendpaste.com",
"http://benoitc.io",
"http://couchdb.apache.org"]
allurls = []
for i in range(10):
allurls.extend(urls)
def fetch(u):
r = request(u, follow_redirect=True, pool=pool)
print "RESULT: %s: %s (%s)" % (u, r.status, len(r.body_string()))
def extract():
jobs = [gevent.spawn(fetch, url) for url in allurls]
gevent.joinall(jobs)
t = timeit.Timer(stmt=extract)
print "%.2f s" % t.timeit(number=1)
| [
"bchesneau@gmail.com"
] | bchesneau@gmail.com |
80b5d59d19dcfbe334c10f0bf90f3ced0118346d | 1804187f39dd6004250933b35ba9ce24297f32a5 | /practice_8_13.py | a00c9a29c89dc4a2064682f2e727369ac0a1010a | [] | no_license | xiaomengxiangjia/Python | ecd2e3e8576364f15482669cb75b52b8790543f5 | 7f52a33d7956068d26347cf34d35c953b945a635 | refs/heads/master | 2020-03-20T23:01:09.981928 | 2018-08-23T09:04:53 | 2018-08-27T05:46:38 | 137,825,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | def build_profile(first, last, **user_info):
profile = {}
profile['first_name'] = first
profile['last_name'] = last
for key, value in user_info.items():
profile[key] = value
return profile
user_profile = build_profile('benjiming','frankling',
location = 'chengdu',
filed = 'computer science and technology',
dream = 'study')
print(user_profile)
| [
"645334483@qq.com"
] | 645334483@qq.com |
1b7d543aa7cbb7b19a285e7d31a77ff3a6c069a8 | dbfc8ca4dbdef6002b0738dd4c30d569eb9e36c3 | /test/metadata/inbound/plugins/test_plugins_1000_0006_srtm.py | 2d729241c31eb65866a3e9474c3784cc474aaa64 | [] | no_license | GISdeveloper2017/imetadata | da32e35215cc024a2e5d244ee8afc375c296550d | 58516401a054ff0d25bfb244810a37838c4c8cf6 | refs/heads/master | 2023-03-26T06:38:28.721553 | 2021-03-06T09:32:06 | 2021-03-06T09:32:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,698 | py | # -*- coding: utf-8 -*-
# @Time : 2020/12/4 09:05
# @Author : ็่ฅฟไบ
# @File : test_plugins_aaa.py
import allure
import pytest
import settings
from imetadata.base.c_file import CFile
from imetadata.base.c_utils import CUtils
from imetadata.business.metadata.base.fileinfo.c_dmFilePathInfoEx import CDMFilePathInfoEx
from imetadata.business.metadata.base.plugins.c_plugins import CPlugins
from imetadata.business.metadata.inbound.plugins.dir.plugins_1000_0006_srtm import plugins_1000_0006_srtm
from test.metadata.inbound.plugins.plugins_test_base import Plugins_Test_Base
@allure.feature("ๅคฉๆดฅๆต็ปSRTM") # ๆจกๅๆ ้ข
class Test_plugins_1000_0006_srtm(Plugins_Test_Base):
def create_plugins(self, file_info: CDMFilePathInfoEx = None) -> CPlugins:
return plugins_1000_0006_srtm(file_info)
def test_file_info_list(self):
return [
{
self.Name_Test_File_Type: self.FileType_Dir,
self.Name_Test_file_path: '202008{0}srtm'.format(CFile.sep()),
self.Name_Test_object_confirm: self.Object_Confirm_IKnown,
self.Name_Test_object_name: 'srtm'
}
]
def init_before_test(self):
plugins_info = self.create_plugins().get_information()
plugins_catalog = CUtils.dict_value_by_name(plugins_info, CPlugins.Plugins_Info_Catalog_Title, '')
self._test_file_root_path = settings.application.xpath_one(self.Path_Setting_Dir_Test_Data, '')
self._test_file_parent_path = CFile.join_file(
settings.application.xpath_one(self.Path_Setting_Dir_Test_Data, ''),
plugins_catalog
)
if __name__ == '__main__':
pytest.main()
| [
"18437918096@163.COM"
] | 18437918096@163.COM |
7b795505b76f7218da50a6a9bf53221773d8cfae | 4738be4be8cda375e33ef606dbe82998d6e60bef | /common_nlp/word_histogram_comparison.py | 9cb68293cb5cf094917ad90f252498d2c754186e | [
"MIT"
] | permissive | Arieugon/Pesquisas | 39723d6ee642d50708f4a883b8d13faf5d018c3c | 87e3923c571d44774c36d4bc54e444cb1003b43b | refs/heads/master | 2023-01-21T10:23:53.645736 | 2020-12-02T19:23:48 | 2020-12-02T19:23:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,920 | py | from scipy.spatial import distance as dist
try:
from textNormalization import textNormalization
except:
from common_nlp.textNormalization import textNormalization
import numpy as np
class word_histogram_comparison():
def __init__(self):
self.scipy_methods = {
"Euclidean" : dist.euclidean,
"Manhattan" : dist.cityblock,
"Chebysev" : dist.chebyshev
}
def compare_two_hist(self,histA, histB, method):
return method(histA, histB)
def compare_all_all(self,texts, method):
results = {}
for i in range(len(texts)-1):
textA = texts[i]
other_texts = texts[:i] + texts[i+1:]
results[textA[0]] = self.compare_one_all(textA, other_texts, method)
return results
def compare_one_all(self,textA, texts, method):
txt_nrm = textNormalization()
results = {}
id_A, text_A = textA
histA = txt_nrm.text_to_hist(text_A)
for id_t, text_t in texts:
histogram_A_aux = []
histogram_T_aux = []
histogram_t = txt_nrm.text_to_hist(text_t)
for k,v in histA.items():
if k in histogram_t:
histogram_A_aux.append(v)
histogram_T_aux.append(histogram_t[k])
results[id_t] = method(histogram_A_aux,histogram_T_aux)
return results
def texts_to_mean_hist(self, texts, method):
aux_hist = Counter()
final_hist = {}
txt_nrm = textNormalization()
for t in texts:
aux_hist += txt_nrm.text_to_hist(t)
aux_hist = dict(aux_hist)
texts_size = len(texts)
for k,v in aux_hist.items():
final_hist[k] = v/texts_size
return final_hist
def mean_hist_dist_texts(self, texts, method):
mean_hist = 0
sd_hist = []
for i in range(len(texts)-1):
textA = texts[i]
other_texts = texts[:i] + texts[i+1:]
mean_aux = 0
for j in range(len(other_texts)):
mean_aux += self.compare_two_hist(textA,other_texts[j],method)
mean_aux = mean_aux/len(other_texts)
mean_hist += mean_aux
sd_hist.append(mean_aux)
return (mean_hist/len(texts),np.std(sd_hist))
| [
"danilopcarlotti@gmail.com"
] | danilopcarlotti@gmail.com |
72565b8fc05ad482fa524703a4d1c515750710d6 | b66bf5a58584b45c76b9d0c5bf828a3400ecbe04 | /week-03/day-1/hello_dictionary.py | 4019dd86864bc5aa97b59c9b4cbf5f586bf557d3 | [] | no_license | greenfox-velox/szepnapot | 1196dcb4be297f12af7953221c27cd1a5924cfaa | 41c3825b920b25e20b3691a1680da7c10820a718 | refs/heads/master | 2020-12-21T08:11:41.252889 | 2016-08-13T10:07:15 | 2016-08-13T10:07:15 | 58,042,932 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | import requests
import re
from bs4 import BeautifulSoup
r = requests.get('http://pocketcultures.com/2008/10/30/say-hello-in-20-languages/')
raw_html = r.text
soup = BeautifulSoup(raw_html, 'html.parser')
strongs = soup("strong")
GREETINGS = {}
hellos = r'\d+\.\s([A-Z]+\s?[A-Z]+.).?'
language = r'\d+.+\โ\s([A-Za-z\s()]+)'
for i in strongs:
i = i.text
print(i)
# if i[0].isdigit():
# hello_parts = re.match(hellos, i).group(1)
# # hy = (''.join(hello_parts)).capitalize()
# print(hello_parts)
# # country = re.match(language, i).group(0)
# # print(country)
| [
"silentpocok@gmail.com"
] | silentpocok@gmail.com |
97070ee6a3ede702c56f5d8433c185084a2ec962 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_201/2321.py | cef4f2742fcb1a85496d41097792834162b31ccd | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | from heapq import *
t = int(input()) # read a line with a single integer
for i in range(1, t + 1):
N, people = [int(s) for s in input().split(" ")]
h = []
heappush(h, 0)
while people:
people -= 1
biggestSpace = N - heappop(h)
if biggestSpace % 2 == 1:
if people == 0:
result = [int(biggestSpace/2)]
heappush(h, N - int(biggestSpace/2))
heappush(h, N - int(biggestSpace/2))
else:
if people == 0:
result = [(int(biggestSpace/2) - 1), int(biggestSpace/2)]
heappush(h, N - (int(biggestSpace/2) - 1))
heappush(h, N - int(biggestSpace/2))
print("Case #{}: {} {}".format(i, max(result), min(result))) | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
d924bb3f2ebc12d7519019fe62a01ce3fb6635e0 | dac960516a8b99ec7f0727282c4a9f1e58dcaa22 | /python/samples/v1_x/list_performance_report.py | cc73d135bcc413e5a63cb99be4fec1ea3518e208 | [
"Apache-2.0"
] | permissive | Baldri/googleads-adxbuyer-examples | 948da55e981cb85bfda1e4027beb482f29d1d87a | 285469fe1fff28416d0477c22e59746525694988 | refs/heads/master | 2023-02-22T18:50:29.166146 | 2020-04-17T20:52:22 | 2020-04-17T20:52:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,479 | py | #!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example lists the given account's Performance Report."""
import argparse
import pprint
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from googleapiclient.errors import HttpError
import samples_util
DEFAULT_ACCOUNT_ID = 'INSERT_ACCOUNT_ID'
DEFAULT_END_DATE_TIME = 'INSERT_END_DATE_TIME_HERE' # YYYY-MM-DD
DEFAULT_START_DATE_TIME = 'INSERT_START_DATE_TIME_HERE' # YYYY-MM-DD
DEFAULT_MAX_PAGE_SIZE = samples_util.MAX_PAGE_SIZE
def main(ad_exchange_buyer, account_id, start_date_time, end_date_time,
max_results):
try:
# Construct and execute the request.
report = ad_exchange_buyer.performanceReport().list(
accountId=account_id,
startDateTime=start_date_time,
endDateTime=end_date_time,
maxResults=max_results).execute()
print('Successfully retrieved the report.')
pprint.pprint(report)
except HttpError as e:
print(e)
if __name__ == '__main__':
# Optional arguments; overrides default values if set.
parser = argparse.ArgumentParser(description='Retrieves list of performance '
'metrics.')
parser.add_argument('-a', '--account_id', required=False, type=int,
default=DEFAULT_ACCOUNT_ID,
help=('The integer id of the account you\'re retrieving '
'the report for.'))
parser.add_argument('-s', '--start_date_time', required=False,
default=DEFAULT_START_DATE_TIME,
help=('The start time of the report in ISO 8601 '
'timestamp format using UTC. (YYYY-MM-DD)'))
parser.add_argument('-e', '--end_date_time', required=False,
default=DEFAULT_END_DATE_TIME,
help=('The end time of the report in ISO 8601 timestamp '
'format using UTC. (YYYY-MM-DD)'))
parser.add_argument('-m', '--max_results', required=False, type=int,
default=DEFAULT_MAX_PAGE_SIZE,
help=('The maximum number of entries returned on one '
'result page.'))
args = parser.parse_args()
if args.account_id:
ACCOUNT_ID = args.account_id
START_DATE_TIME = args.start_date_time
END_DATE_TIME = args.end_date_time
MAX_RESULTS = args.max_results
else:
ACCOUNT_ID = int('INSERT_ACCOUNT_ID')
START_DATE_TIME = 'YYYY-MM-DD' # Insert startDateTime here.
END_DATE_TIME = 'YYYY-MM-DD' # Insert endDateTime here.
MAX_RESULTS = samples_util.MAX_PAGE_SIZE
try:
service = samples_util.GetService()
except IOError as ex:
print(f'Unable to create adexchangebuyer service - {ex}')
print('Did you specify the key file in samples_util.py?')
sys.exit(1)
main(service, ACCOUNT_ID, START_DATE_TIME, END_DATE_TIME, MAX_RESULTS)
| [
"msaniscalchi@users.noreply.github.com"
] | msaniscalchi@users.noreply.github.com |
63cccd1521fb66bffc03c4ee7187a82d5af2de60 | 4c10305652193f7b1df8af4dfe28742910f07fcf | /hw/ip/otbn/util/rig/snippet_gen.py | 50d736d4072bbf9555a1219d46d06987be6eb924 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | jake-ke/opentitan | fc162fd3ec2dc3dff7cec6745379ea5aa3d7a5e0 | a7b16226ce13752896a71399910e39c7a5bda88a | refs/heads/master | 2023-07-05T12:38:49.186899 | 2021-01-21T22:38:13 | 2021-01-22T02:16:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,802 | py | # Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
'''A module defining a base class for a snippet generator.
The generators in the ./gens/ subdirectory all derive from this class. To
actually generate some snippets, use the wrapper in snippet_gens.py.
'''
from typing import Callable, Optional, Tuple
from shared.insn_yaml import Insn, InsnsFile
from .program import Program
from .model import Model
from .snippet import Snippet
# A continuation type that allows a generator to recursively generate some more
# stuff.
GenCont = Callable[[Model, Program], Optional[Tuple[Snippet, Model]]]
# The return type of a single generator. This is a tuple (snippet, model).
# snippet is a generated snippet. If the program is done (i.e. every execution
# ends with ecall) then model is None. Otherwise it is a Model object
# representing the state of the processor after executing the code in the
# snippet(s).
GenRet = Tuple[Snippet, Optional[Model]]
class SnippetGen:
'''A parameterised sequence of instructions
These can be added to the instructions generated so far for a given random
binary.
'''
def gen(self,
cont: GenCont,
model: Model,
program: Program) -> Optional[GenRet]:
'''Try to generate instructions for this type of snippet.
On success, inserts the instructions into program, updates the model,
and returns a GenRet tuple. See comment above the type definition for
more information.
On failure, leaves program and model unchanged and returns None. There
should always be at least one snippet generator with positive weight
(see pick_weight below) that succeeds unconditionally. This will be the
ecall generator. Failure is interpreted as "this snippet won't work
with the current program state", but the generator may be retried
later.
The cont argument is a continuation, used to call out to more
generators in order to do recursive generation. It takes a (mutable)
model and program and picks a sequence of instructions. The paths
through the generated code don't terminate with an ECALL but instead
end up at the resulting model.pc.
'''
raise NotImplementedError('gen not implemented by subclass')
def pick_weight(self,
model: Model,
program: Program) -> float:
'''Pick a weight by which to multiply this generator's default weight
This is called for each generator before we start trying to generate a
snippet for a given program and model state. This can be used to
disable a generator when we know it won't work (if model.fuel is too
small, for example).
It can also be used to alter weights depending on where we are in the
program. For example, a generator that generates ecall to end the
program could decrease its weight when size is large, to avoid
generating tiny programs by accident.
The default implementation always returns 1.0.
'''
return 1.0
def _get_named_insn(self, insns_file: InsnsFile, mnemonic: str) -> Insn:
'''Get an instruction from insns_file by mnemonic
This is used for specialized snippets that need to generate a specific
instruction and wraps the error handling for when someone has removed
the instruction from the file.
'''
insn = insns_file.mnemonic_to_insn.get(mnemonic.lower())
if insn is None:
raise RuntimeError('No {} instruction in instructions file.'
.format(mnemonic.upper()))
return insn
| [
"rswarbrick@gmail.com"
] | rswarbrick@gmail.com |
4860167b800f06c9498077979e99c03567633e94 | 9737a5e2cfe5521bb9731a356a7639d0dc3692de | /Exercises/week_2_netmiko/exercise6d.py | 88af3c6a3aa08b2bb55ebe967bd9c0ba844d1c91 | [] | no_license | akushnirubc/pyneta | 5c53cbcf42e2450ce6a2d7e6591d671661e84ba0 | ee68205c0b91974ea1cd79b8c06c36ae083fb02c | refs/heads/main | 2023-06-18T18:02:56.242732 | 2021-07-13T21:43:51 | 2021-07-13T21:43:51 | 358,647,513 | 0 | 0 | null | 2021-05-24T21:39:18 | 2021-04-16T15:45:34 | JavaScript | UTF-8 | Python | false | false | 1,272 | py | # Using SSH and netmiko connect to the Cisco4 router. In your device definition, specify both an 'secret' and a 'session_log'. Your device definition should look as follows:
# password = getpass()
# device = {
# "host": "cisco4.lasthop.io",
# "username": "pyclass",
# "password": password,
# "secret": password,
# "device_type": "cisco_ios",
# "session_log": "my_output.txt",
# }
# Execute the following sequence of events using Netmiko:
# Use the write_channel() method to send the 'disable' command down the SSH channel.
# Note, write_channel is a low level method so it requires that you add a newline to the end of your 'disable' command.
from netmiko import ConnectHandler
from getpass import getpass
password = getpass()
device = {
"host": "cisco4.lasthop.io",
"username": "pyclass",
"password": password,
"secret": password,
"device_type": "cisco_ios",
"session_log": "my_output.txt",
}
net_connect = ConnectHandler(**device)
print("\nExit priviledged exec (disable), Current prompt")
net_connect.write_channel("disable\n")
print ("\n>>>>>>>")
# print("Config mode check: {}".format(net_connect.exit_config_mode())
print("Current Prompt: {}".format(net_connect.find_prompt()))
print()
net_connect.disconnect() | [
"alex.kushnir@ubc.ca"
] | alex.kushnir@ubc.ca |
40355038bf589f532800b1282ca963ededb8e481 | bf0c13d412a7021b299c5e0622e63e72172cf725 | /week1/Hackerrank/easy/finding_percentage.py | 559579e9933496e2810d3b9115a6d038eedc180f | [] | no_license | Alibek120699/BFDjango | 765e734e925041947f607a1d15228309dfa3e647 | eac06c317551c561ffccb44750862972ae11dea3 | refs/heads/master | 2022-12-01T15:49:39.402815 | 2020-04-19T21:09:39 | 2020-04-19T21:09:39 | 233,657,360 | 0 | 0 | null | 2022-11-22T05:49:56 | 2020-01-13T17:50:13 | Python | UTF-8 | Python | false | false | 377 | py | if __name__ == '__main__':
n = int(input())
student_marks = {}
for _ in range(n):
name, *line = input().split()
scores = list(map(float, line))
student_marks[name] = scores
query_name = input()
marks = student_marks[query_name]
total = 0
for i in marks:
total += i
res = total/len(marks)
print('%.2f' % res)
| [
"sayakalibek1@gmail.com"
] | sayakalibek1@gmail.com |
4a95586f5e04b017913bbc56e25ca916b195e870 | 9322c270beaf1019328bf14c836d167145d45946 | /raoteh/sampler/_sample_mjp.py | fdab6bbbe2bc9dda67fb576bd4a2b27627ccec95 | [] | no_license | argriffing/raoteh | 13d198665a7a3968aad8d41ddad12c08d36d57b4 | cdc9cce8fdad0a79dbd90dfcdec6feece8fc931f | refs/heads/master | 2021-01-22T19:41:25.828133 | 2014-03-10T22:25:48 | 2014-03-10T22:25:48 | 10,087,018 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,172 | py | """
Rao-Teh samples of MJP trajectories on trees.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import networkx as nx
from raoteh.sampler import _util, _mjp
__all__ = []
#TODO move more stuff from _sampler.py into this module
def resample_poisson(T, state_to_rate, root=None):
"""
Parameters
----------
T : weighted undirected acyclic networkx graph
Weighted tree whose edges are annotated with states.
In other words, this is an MJP trajectory.
state_to_rate : dict
Map the state to the expected number of poisson events
per edge weight.
root : integer, optional
Root of the tree.
Returns
-------
T_out : weighted undirected acyclic networkx graph
Weighted tree without state annotation.
"""
# If no root was specified then pick one arbitrarily.
if root is None:
root = _util.get_first_element(T)
# Define the next node.
next_node = max(T) + 1
# Build the list of weighted edges.
weighted_edges = []
for a, b in nx.bfs_edges(T, root):
weight = T[a][b]['weight']
state = T[a][b]['state']
rate = state_to_rate[state]
prev_node = a
total_dwell = 0.0
while True:
dwell = np.random.exponential(scale = 1/rate)
if total_dwell + dwell > weight:
break
total_dwell += dwell
mid_node = next_node
next_node += 1
weighted_edges.append((prev_node, mid_node, dwell))
prev_node = mid_node
weighted_edges.append((prev_node, b, weight - total_dwell))
# Return the resampled tree with poisson events on the edges.
T_out = nx.Graph()
T_out.add_weighted_edges_from(weighted_edges)
return T_out
def get_uniformized_transition_matrix(Q,
uniformization_factor=None, omega=None):
"""
Parameters
----------
Q : directed weighted networkx graph
Rate matrix.
uniformization_factor : float, optional
A value greater than 1.
omega : float, optional
The uniformization rate.
Returns
-------
P : directed weighted networkx graph
Transition probability matrix.
"""
if (uniformization_factor is not None) and (omega is not None):
raise ValueError('the uniformization factor and omega '
'should not both be provided')
# Compute the total rates.
total_rates = _mjp.get_total_rates(Q)
# Compute omega if necessary.
if omega is None:
if uniformization_factor is None:
uniformization_factor = 2
omega = uniformization_factor * max(total_rates.values())
# Construct a uniformized transition matrix from the rate matrix
# and the uniformization rate.
P = nx.DiGraph()
for a in Q:
if Q[a]:
weight = 1.0 - total_rates[a] / omega
P.add_edge(a, a, weight=weight)
for b in Q[a]:
weight = Q[a][b]['weight'] / omega
P.add_edge(a, b, weight=weight)
# Return the uniformized transition matrix.
return P
| [
"argriffi@ncsu.edu"
] | argriffi@ncsu.edu |
d214dc2ec55b92f64a7b41190073cca7608a26c1 | d9a22d4dcdfc0c28176c0e8afd784b30d275597e | /multi/result_commands.py | 714185c8a5cd3ed163af2c3f3d34ece8aa591eaa | [] | no_license | jlec/relax | fda1b3ff77be0afc21c2e6cc52348ae7635cd07a | c317326ddeacd1a1c608128769676899daeae531 | refs/heads/master | 2016-09-08T00:27:57.256090 | 2015-02-10T12:24:55 | 2015-02-10T12:24:55 | 30,596,131 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,870 | py | ###############################################################################
# #
# Copyright (C) 2007 Gary S Thompson (https://gna.org/users/varioustoxins) #
# Copyright (C) 2008-2013 Edward d'Auvergne #
# #
# This file is part of the program relax (http://www.nmr-relax.com). #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
# Module docstring.
"""Module containing command objects sent from the slaves back to the master."""
# Python module imports.
import sys
# multi module imports.
from multi.misc import Result
class Result_command(Result):
"""A general result command - designed to be subclassed by users.
This is a general result command from a Slave command that will have its run() method called on
return to the master processor.
@see: multi.processor.Slave_command.
"""
def __init__(self, processor, completed, memo_id=None):
#TODO: check this method is documnted by its parent
super(Result_command, self).__init__(processor=processor, completed=completed)
self.memo_id = memo_id
def run(self, processor, memo):
"""The run method of the result command.
This method will be called when the result command is processed by the master and should
carry out any actions the slave command needs carried out on the master (e.g. save or
register results).
@see: multi.processor.Processor.
@see: multi.processor.Slave_command.
@see: multi.memo.Memo.
@param processor: The master processor that queued the original Slave_command.
@type processor: Processor instance
@param memo: A memo that was registered when the original slave command was placed on
the queue. This provides local storage on the master.
@type memo: Memo instance or None
"""
pass
class Batched_result_command(Result_command):
def __init__(self, processor, result_commands, io_data=None, completed=True):
super(Batched_result_command, self).__init__(processor=processor, completed=completed)
self.result_commands = result_commands
# Store the IO data to print out via the run() method called by the master.
self.io_data = io_data
def run(self, processor, batched_memo):
"""The results command to be run by the master.
@param processor: The processor instance.
@type processor: Processor instance
@param batched_memo: The batched memo object.
@type batched_memo: Memo instance
"""
# First check that we are on the master.
processor.assert_on_master()
# Unravel the IO stream data on the master in the correct order.
for line, stream in self.io_data:
if stream == 0:
sys.stdout.write(line)
else:
sys.stderr.write(line)
if batched_memo != None:
msg = "batched result commands shouldn't have memo values, memo: " + repr(batched_memo)
if batched_memo != None:
msg = "batched result commands shouldn't have memo values, memo: " + repr(batched_memo)
raise ValueError(msg)
for result_command in self.result_commands:
processor.process_result(result_command)
class Null_result_command(Result_command):
"""An empty result command.
This command should be returned from slave_command if no other Result_command is returned. This
allows the queue processor to register that the slave processor has completed its processing and
schedule new Slave-commands to it.
"""
def __init__(self, processor, completed=True):
super(Null_result_command, self).__init__(processor=processor, completed=completed)
class Result_exception(Result_command):
"""Return and raise an exception from the salve processor."""
def __init__(self, processor, exception, completed=True):
"""Initialise the result command with an exception.
@param exception: An exception that was raised on the slave processor (note the real
exception will be wrapped in a Capturing_exception.
@type exception: Exception instance
"""
super(Result_exception, self).__init__(processor=processor, completed=completed)
self.exception = exception
def run(self, processor, memo):
"""Raise the exception from the Slave_processor."""
raise self.exception
| [
"bugman@b7916896-f9f9-0310-9fe5-b3996d8957d5"
] | bugman@b7916896-f9f9-0310-9fe5-b3996d8957d5 |
e8bcf51f6b4bea52914dcb421fdbe9d4c297c1e5 | 10b3d1ce02eaa4908dc16ca378ddfb1955b2d625 | /MV3D_TF_release/tests/private/test_save_pretrained_params_from_npy.py | eb62bf9b4a8afd458e9a0bb684262a62725bed00 | [
"MIT",
"BSD-3-Clause"
] | permissive | ZiningWang/Sparse_Pooling | 7281aa0d974849eac8c48faa5ba08519b091ef6e | f46882832d0e2fed5ab4a0af15cead44fd3c6faa | refs/heads/master | 2023-05-26T08:47:16.232822 | 2023-05-20T08:39:11 | 2023-05-20T08:39:11 | 141,640,800 | 56 | 21 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | #This is for transforming the .npy file saved in python2 to python3
import numpy as np
import os
import scipy.io
params = np.load('mscnn_ped_cyc_kitti_trainval_2nd_iter_15000.npy').item()
#make matlab file
scipy.io.savemat('mscnn_ped_cyc_kitti_trainval_2nd_iter_15000.mat',params)
''' #make txt files(failed)
os.mkdir('npy_params_csv')
for layer_name in params.keys():
layer = params[layer_name]
if type(layer) is dict:
os.mkdir(layer_name)
for layer_param_name in layer:
layer_param = layer[layer_param_name]
np.savetxt(layer_param_name+'.csv',layer_param)
else:
np.savetxt(layer_name+'.csv',layer)
'''
| [
"kiwoo.shin@berkeley.edu"
] | kiwoo.shin@berkeley.edu |
2dd1609af2025fa93e9fc6653d6d33aeb97f9b19 | 493431b109586bc199c0094bb6952b359c30777a | /t/step/test_header_step.py | 22289673b733113ef07729325bf5baffc937a5ad | [] | no_license | phonybone/Rnaseq | 3ec92ba79c6772ffb5ac146ee98dad87663f17e7 | c12d5380db2e36f24b6e5cb84c55a984efdd9cd7 | refs/heads/master | 2020-05-31T21:19:40.175543 | 2011-10-06T00:01:49 | 2011-10-06T00:01:49 | 1,409,030 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,833 | py | import unittest, os, sys, re
dir=os.path.normpath(os.path.dirname(os.path.abspath(__file__))+"/../..")
sys.path.append(os.path.join(dir+'/lib'))
sys.path.append(os.path.join(dir+'/ext_libs'))
from Rnaseq import *
from RnaseqGlobals import *
from warn import *
class TestHeaderStep(unittest.TestCase):
def setUp(self):
argv=RnaseqGlobals.initialize(__file__, testing=True) # not to be confused with sys.argv
readset_file=RnaseqGlobals.root_dir()+'/t/fixtures/readsets/readset1.syml'
rlist=Readset.load(readset_file)
self.readset=rlist[0]
self.pipeline=Pipeline.get_pipeline(name='link', readset=self.readset).load_steps()
def test_setup(self):
self.assertEqual(self.readset.name, 'readset1.syml')
self.assertEqual(self.pipeline.name, 'link')
self.assertTrue(self.pipeline.context != None)
#def test_header_script(self):
#header_step=self.pipeline.step_with_name('header')
def test_readset_exports(self):
header_step=self.pipeline.step_with_name('header')
script=header_step.sh_script(self.pipeline.context)
for ex in self.readset.exports:
target='export %s=%s' % (ex, getattr(self.readset, ex))
self.assertRegexpMatches(script, target)
#print >>sys.stderr, "got %s" % target
def test_links(self):
header_step=self.pipeline.step_with_name('header')
script=header_step.sh_script(self.pipeline.context)
for rf in self.readset.reads_files:
target='ln -fs %s %s' % (rf, self.readset.working_dir)
self.assertRegexpMatches(script, target)
#print >>sys.stderr, "got %s" % target
suite = unittest.TestLoader().loadTestsFromTestCase(TestHeaderStep)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"vcassen@bento.systemsbiology.net"
] | vcassen@bento.systemsbiology.net |
d2239c158fa3aa78bbd5bf3e8a5cb23e8b68c2fc | bfe394e1b7d8a2ff34e37ae65df8cc52070c69d8 | /Source/External/TrainUtility/Source/TrainProcess_FileControl.py | 421e6bb0c5f2eb8f3175960b7bb6fcbe8f89070f | [
"MIT"
] | permissive | Jack-GVDL/PredictModel | bb32d37a5c18a656d5ebed36098ba3fac435fb96 | 20495072fb776c31c4bb5f2ddeecda1b43fcc52e | refs/heads/main | 2023-04-30T05:47:34.364328 | 2021-05-11T09:25:13 | 2021-05-11T09:25:13 | 366,314,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,936 | py | import os
from typing import *
from .TrainProcess import TrainProcess
from .ModelInfo import ModelInfo
from .FileControl import FileControl_Local
class TrainProcess_FileControlBuilder(TrainProcess):
def __init__(self):
super().__init__()
# data
self.name = "FileControlBuilder"
self.path_base: str = "" # this should exist
self.path_folder: str = "" # this should not exist
self.path_src: str = ""
# operation
# ...
def __del__(self):
return
# Operation
def setTargetPath(self, base: str, folder: str) -> None:
self.path_base = base
self.path_folder = folder
# operation
def execute(self, stage: int, info: ModelInfo, data: Dict) -> None:
# create the base directory
self.path_src = os.path.join(self.path_base, self.path_folder)
if not os.path.isdir(self.path_src):
os.mkdir(self.path_src)
# create file control
control = FileControl_Local()
control.setLocalRoot(self.path_src)
control.start()
info.file_control = control
# info
def getPrintContent(self, stage: int, info: ModelInfo) -> str:
return self._getContent_(info)
def getLogContent(self, stage: int, info: ModelInfo) -> str:
return self._getContent_(info)
# Protected
def _getContent_(self, info: ModelInfo) -> str:
return "Operation: " + self.name
class TrainProcess_FileControlUpdater(TrainProcess):
def __init__(self):
super().__init__()
# data
self.name = "FileControlUpdater"
# operation
# ...
def __del__(self):
return
# Property
# operation
def execute(self, stage: int, info: ModelInfo, data: Dict) -> None:
info.file_control.update()
# Operation
# ...
# Protected
def getPrintContent(self, stage: int, info: ModelInfo) -> str:
return self._getContent_(info)
def getLogContent(self, stage: int, info: ModelInfo) -> str:
return self._getContent_(info)
# Protected
def _getContent_(self, info: ModelInfo) -> str:
return "Operation: " + self.name
| [
"33114105+Jack-GVDL@users.noreply.github.com"
] | 33114105+Jack-GVDL@users.noreply.github.com |
08b3a3fbc165a4633691df1cd0579378c3fa8569 | a62fe37f8d633cbeb75d8cf2487f24e2bb0c13ce | /test/1.1.0/08/EndnoteObjectTest08.py | 73e2862f293a89600750726f5cfc582610fa3a47 | [
"Apache-2.0"
] | permissive | monperrus/cff-converter-python | ddf4e28329c48b0d3db4709de8765dfbfc94ad0b | b7b789a80415c6020e864782b601f21188a149f4 | refs/heads/master | 2020-11-27T10:40:11.447633 | 2019-12-19T15:58:01 | 2019-12-19T15:58:01 | 229,408,045 | 0 | 0 | Apache-2.0 | 2019-12-21T09:54:20 | 2019-12-21T09:54:19 | null | UTF-8 | Python | false | false | 1,468 | py | from cffconvert import EndnoteObject
import unittest
import os
import ruamel.yaml as yaml
class EndnoteObjectTest(unittest.TestCase):
def setUp(self):
fixture = os.path.join(os.path.dirname(__file__), "CITATION.cff")
with open(fixture, "r") as f:
cffstr = f.read()
cff_object = yaml.safe_load(cffstr)
self.eo = EndnoteObject(cff_object, initialize_empty=True)
def test_check_cff_object(self):
self.eo.check_cff_object()
# doesn't need an assert
def test_author(self):
self.eo.add_author()
self.assertEqual(self.eo.author, '%A Van Zandt, Steven\n%A van Zandt, Steven\n')
def test_doi(self):
self.eo.add_doi()
self.assertIsNone(self.eo.doi)
def test_keyword(self):
self.eo.add_keyword()
self.assertIsNone(self.eo.keyword)
def test_name(self):
self.eo.add_name()
self.assertEqual(self.eo.name, '%T cff-converter-python\n')
def test_print(self):
actual_endnote = self.eo.add_all().print()
fixture = os.path.join(os.path.dirname(__file__), "endnote.enw")
with open(fixture, "r") as f:
expected_endnote = f.read()
self.assertEqual(actual_endnote, expected_endnote)
def test_url(self):
self.eo.add_url()
self.assertIsNone(self.eo.url)
def test_year(self):
self.eo.add_year()
self.assertEqual(self.eo.year, '%D 2018\n')
| [
"j.spaaks@esciencecenter.nl"
] | j.spaaks@esciencecenter.nl |
b30391250e931a835456f54fed3840720a37cb94 | ba0a2b0d2d1534443ea34320675aadfa378457b6 | /Tree/Q776_Split BST.py | cec5e5a356eff9e2668e7a6734676b4d15d9afdc | [] | no_license | Luolingwei/LeetCode | 73abd58af116f3ec59fd6c76f662beb2a413586c | 79d4824879d0faed117eee9d99615cd478432a14 | refs/heads/master | 2021-08-08T17:45:19.215454 | 2021-06-17T17:03:15 | 2021-06-17T17:03:15 | 152,186,910 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 705 | py |
# ๆ่ทฏ: ๅๆๅคงไบๅๅฐไบ็ญไบv็ไธค้จๅ, ๅนถไธไฟๆ็ปๆ, recursive่งฃๅณ
# ๅฆๆvๅคงไบ็ญไบroot.val, ้ฃไนๅทฆ่พนๅroot้ฝๆฏๅฐไบ็ญไบv, splitๅณ่พน, ๆlow็้จๅๆพๅฐroot.right, ่ฟๅrootๅhigh
# ๅฆๆvๅฐไบroot.val, ้ฃไนๅณ่พนๅroot้ฝๆฏๅคงไบv, splitๅทฆ่พน, ๆhigh็้จๅๆพๅฐroot.left, ่ฟๅlowๅroot
class Solution:
def splitBST(self, root, V: int):
if not root:
return None,None
if root.val<=V:
low,high = self.splitBST(root.right,V)
root.right = low
return root,high
else:
low,high = self.splitBST(root.left,V)
root.left = high
return low,root | [
"564258080@qq.com"
] | 564258080@qq.com |
289900379592aeb4d1dacc726059621149bf2852 | c1d5c1285793660982813fd49dfb48620bc95b36 | /linode/commands/urls.py | d058c2eaa689673fe8a916325b5892033662ece2 | [
"MIT"
] | permissive | anvetsu/pylinode | 8e74330a8ae5f789cd9c4512efdc9f20aada61b9 | 3b15a153fa0528df203c0013949827ff836759f5 | refs/heads/master | 2020-04-06T14:05:39.106008 | 2018-11-14T10:06:43 | 2018-11-14T10:06:43 | 157,527,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | # -*- coding: utf-8 -*-
# Linode api urls
LINODE_CREATE = '&api_action=linode.create'
LINODE_CLONE = '&api_action=linode.clone'
LINODE_BOOT = '&api_action=linode.boot'
LINODE_LIST = '&api_action=linode.list'
LINODE_DISK_CREATE = '&api_action=linode.disk.create'
LINODE_DISK_FROM_IMAGE = '&api_action=linode.disk.createfromimage'
LINODE_IP = '&api_action=linode.ip.list'
AVAIL_DATACENTER = '&api_action=avail.datacenters'
AVAIL_DISTRIBUTION = '&api_action=avail.distributions'
AVAIL_PLANS = '&api_action=avail.linodeplans'
JOB_LIST = '&api_action=linode.job.list'
LINODE_CREATE_CONFIG = '&api_action=linode.config.create'
LINODE_UPDATE_CONFIG='&api_action=linode.config.update'
LINODE_GET_CONFIG='&api_action=linode.config.list'
LINODE_UPDATE='&api_action=linode.update'
AVAIL_IMAGE = '&api_action=image.list'
DELETE_LINODE = '&api_action=linode.delete'
| [
"anandpillai@letterboxes.org"
] | anandpillai@letterboxes.org |
0c172bfb2db2013120d8d28c36187938f0d125b1 | afbae53ad471e34eb9918f28cc2e27e1ade6fe93 | /vivo_public_modules/spiders/item_padding_spider.py | 4a418287c83ef08a9839f1c20550b6cb9809452d | [] | no_license | wangsanshi123/spiders | 5436431feec8d9591b1b533c7c6da261142403bd | 1b42878b694fabc65a02228662ffdf819e5dcc71 | refs/heads/master | 2020-03-19T13:49:31.161232 | 2018-06-08T09:24:10 | 2018-06-08T09:24:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,472 | py | # -*- coding: utf-8 -*-
'''
ไฝ่
: ้้
ๆฅๆ: 2018.04.17
็ๆฌ: 1.0
ๅบไบๆฐๆฎๅบ็่ฆๆฑ้่ฆ,ๆไปฌๆฏๆก่ฎฐๅฝ้ฝ้่ฆๅกซๅ
ๆ่
่ฎก็ฎไธไบๅญๆฎต
้่ฆๅกซๅ
็ๆ: website_id, website_type, crawl_time็ญ
้่ฆ่ฎก็ฎ็ๆ: model_id, user_id, content_id, refer_id็ญ
็ฑไบ่ฟๆฏๆๆ็ฌ่ซ้ฝไผๆถๅ็ๆไฝ,ๆ่ฟ้ๅไธไธช็ฌ่ซๅบ็ฑปๅฎๆๅฎๆ่ฟไธชๆไฝ
'''
import hashlib
import datetime
import inspect
from scrapy import Item
from scrapy import Spider
class ItemPaddingSpider(Spider):
'''
ๅฎๆ item ๅกซๅ
็ spider ๅบ็ฑป,้็นๅจไบ็ปไธ็ๅๅฐ่ฟ้
้ฟๅ
ๆฏไธช้กน็ฎๆๅ่ชๅทฑๅไธๅฅ
'''
def __init__(self, name=None, **kwargs):
'''
็ๆ *_id ๅญๆฎต็่ฎก็ฎ้่ฆๅชไบๅ็ๆฏๆ,ๅจๅๅงๅไธญๆ็กฎ
:param self: ็ฑป็ๅฏน่ฑก่ช่บซ
:param name: scrapy ไผๅฐ name ๅฑๆงไผ ้่ฟๆฅ
:param kwargs: ๅญๅ
ธๅฝขๅผ็ๅๆฐ,็จไบๆดๆฐ self.__dict__
:return None
'''
super(ItemPaddingSpider, self).__init__(name, **kwargs)
self.id_field_list = dict()
# ๅ่กจๅฝไธญๅ
็ด ็้กบๅบ้ๅธธ้่ฆ,ไธๅ็้กบๅบไผๅฝขๆไธๅ็ๅญ็ฌฆไธฒ,ๅฏผ่ดๆ ๆณไฝไธบๅป้็ไพๆฎ
# model_id ไพ่ตๅฝไธญ website_id, model_name ไธบๅฟ
ๅกซ้กน, ๅๅฆไธ่ถณไปฅๅบๅไธคๆฌพๆบๅ
# ๅ้่ฆ ๆไพ ram, rom ๅญๆฎต, ไธ้่ฆ ram, rom ็ item ไธๅ
ๅซ ram, rom ๅญๆฎต
# ๆ่
ๅฐ ram, rom ๅญๆฎต็ฝฎไธบ '' ็ฉบๅญ็ฌฆไธฒๅณๅฏ.
self.id_field_list['model_id'] = ['website_id', 'model_name', 'ram', 'rom']
# user_id ไพ่ตๅฝไธญ website_id, user_name ไธบๅฟ
ๅกซ้กน, ๅๅฆไธ่ถณไปฅๅบๅไธคๆฌพๆบๅ
self.id_field_list['user_id'] = ['website_id', 'user_name']
# content_id ไพ่ตๅฝไธญ website_id, main_bodyไธบๅฟ
ๅกซ้กน, user_name, date, time ้่ฆ็้้็็ฝ็ซๆฏๅฆๆฏๆ
# ๅๅฆๆไพไบๆถ้ดไฟกๆฏ,ๅ date, time ไบ้ไธ, ็ฑปๅๅๅซไธบ datetime.datetime ๅ datetime.date
self.id_field_list['content_id'] = ['website_id', 'main_body', 'user_name', 'date', 'time']
# website_id, website_type ้่ฆๅญ็ฑปๅปๅฎๅ
self.website_id = None
self.website_type = None
def padding_item(self, item, refer_id):
'''
ๅฎๆๅญๆฎตๅกซๅ
ๅทฅไฝ,้ฟๅ
ๆฏไธชไบบ้ฝ่ฆๅจ่ชๅทฑ็็ฌ่ซๅฝไธญๅป่ฎพ็ฝฎ
:param self: ๅฏน่ฑก่ช่บซ
:param item: ๅพ
ๅกซๅ
็ item, ๅฏไปฅๆฏ dict ็ๅญ็ฑป, ไนๅฏไปฅๆฏ scrapy.Item็ๅญ็ฑป
่ฝๅค็ดๆฅ้่ฟ่ตๅผๆทปๅ ๅญๆฎต, scrapy.Item ๅ้่ฆๅ
ๆทปๅ ็ธๅบ็ Field ไธ่ฝไฝไธบๅๆฐไผ ่ฟๆฅ
:param refer_id: refer_id ๆฌ่บซๅๅฝๅ item ๅนถๆ ๅ
ณ็ณป,ๅฎไปฃ่กจๅฝๅ item ๆไพ่ต็ๅ
ๅฎน
:return: ๅกซๅ
ๅฎๆด็ item
:raise: AttributeError ๆฅ่กจ่พพ website_type, website_id, user_name ็ญๅฟ
่ฆๅญๆฎต็็ผบๅคฑ
'''
# scrapy.Item ็ๅฎไพๅๅปบๆฐ็ๅญๆฎตๅฎๅจๆฏๅคช้บป็ฆ
# ๆฃๆตๅฐ item ๆฏ Item ๅญ็ฑปๅฐฑๅฐ item ่ฝฌๅไธบ dict ๅฏน่ฑก
if Item in inspect.getmro(item.__class__):
item = {field: item[field] for field in item}
if not self.website_id or not self.website_type:
raise AttributeError('Error: spider object do not have necessary attributes.')
# ๆๆ่ฎฐๅฝ้ฝ้่ฆๅกซๅ็ๅญๆฎต
item['refer_id'] = refer_id
item['crawl_time'] = datetime.datetime.now()
item['website_type'] = self.website_type
item['website_id'] = self.website_id
# ๆฃๆต item ๆฏๅฆๅ
ๅซ ไธไบๅฟ
ๅค็ๅญๆฎต
# item ๆปก่ถณๆไฝๆกไปถ: ไธ่
ๅฝไธญๆไธไธชๅญๆฎต่ฎคไธบ่ณๅฐ่ฝๅคๆๅ
ฅไธๆกไฟกๆฏ
# ๅ ไธบ model info ๆๅๅฝๅ
ฅ็ๆถๅๅฏ่ฝๅนถไธๅญๅจ content, user
# ๅ ไธบ user info ๆๅๅฝๅ
ฅ็ๆถๅๅฏ่ฝๅนถไธๅญๅจ content, model
# ๅ ไธบ content info ๆๅๅฝๅ
ฅ็ๆถๅๅฏ่ฝๅนถไธๅญๅจ user, model
# ๆไปฅ ๆฐๆฎๆฏๅฆๆๆๅนถไธๆฏๅพๅฅฝๆฃๆต,้่ฆๆๅๆ้ซ่ญฆๆ,้ฟๅ
ๆฐๆฎๆผไผ
meet_id_condition = False
for field in item:
if field in ['main_body', 'user_name', 'model_name']:
meet_id_condition = True
break
# ๅๅฆ item ๅนถไธๅ
ๅซไธ่ฟฐ ไธไธชๅฟ
ๅคๅญๆฎต,ๅๆฒกๆๅญๅจๅ
ฅๅบ็ๅฟ
่ฆ
# ่ฟๆถๆๅฐ้่ฏฏ,ๅนถๅฐ None ่ฟๅ
if not meet_id_condition:
raise AttributeError('Error: item does not have necessary field of database.')
for id_field in self.id_field_list:
# ็ๆ model_id, user_id, content_id
valid_field_num = 0
id_component = ''
for field in self.id_field_list[id_field]:
if field in item:
valid_field_num += 1
id_component += str(item[field])
# website_id + model_name
# website_id + user_name
# website_id + main_body
# ่ณๅฐไธคไธชๆๆๅญๆฎต็ hash ๅผๆ่ฝไฝไธบ *_id ๅญๆฎตๅผ
if valid_field_num > 1:
item[id_field] = hashlib.md5(id_component.encode('utf8')).hexdigest()
return item
def parse(self, response):
'''
ๅฎๆๅๅบ็ๆฐๆฎ่งฃๆ
:param self: ็ฑป็ๅฏน่ฑกๆฌ่บซ
:param response: Scrapy ๆกๆถ่ฟๅ็ๅๅบ
:return: item
:raise: NotImplementedError ๆฌ็ฑปๅซไฝไธบๆฝ่ฑก็ฑปไฝฟ็จ,ๅนถไธๅฎไพๅ
'''
raise NotImplementedError
| [
"118319592@qq.com"
] | 118319592@qq.com |
2b242c84ce23da01b9930b418607811c97947727 | 2bb607a8b92cc4014d9e3e4368019a20442fd6ac | /TestRunner.py | 05a21e6e452422bb9d4f6b6da7240b9b68e9a135 | [] | no_license | 164461130/selenium-UI-Python- | 55c47d4dddf2181fb96afcb36d378428fa50681d | 2bf7e65558c825ffd0aef541a14107de27b3de10 | refs/heads/master | 2020-04-14T11:24:20.932244 | 2018-12-12T10:05:50 | 2018-12-12T10:05:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | # -*- encoding: utf-8 -*-
# @Time : 2017/12/25 14:31
# @Author : mike.liu
# @File : TestRunner.py
import unittest
from testcase.test_chromeTest import BrowserTest
if __name__=='__main__':
suite = unittest.TestSuite()
suite.addTest(test_firefox('test_chrome'))
suite.addTest(BrowserTest('test_firefox'))
#ๆง่ก็จไพ
runner=unittest.TextTestRunner()
runner.run(suite) | [
"mike.liu@jinfuzi.com"
] | mike.liu@jinfuzi.com |
7db5a055b747a9055b13854037975cec49b0449a | 960dd60c263cea329e27584b03bb430b025fe05a | /venv/bin/gunicorn | f81ebcc4a2bdfc6d083fcd3c5c9594788335a80b | [] | no_license | RuchiBhardwaj/covid_pipeline | 18b3c0ae5836487b150ad112d86e312544d19f9d | f21a98593383caed532b9e7178e70172984cd635 | refs/heads/master | 2022-12-04T09:02:47.076901 | 2020-06-08T14:12:18 | 2020-06-08T14:12:18 | 268,835,744 | 0 | 2 | null | 2022-11-27T19:32:17 | 2020-06-02T15:17:20 | Python | UTF-8 | Python | false | false | 267 | #!/home/nineleaps/PycharmProjects/COVID19_Airflow/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"ruchi.bhardwaj@nineleaps.com"
] | ruchi.bhardwaj@nineleaps.com | |
da7b85fc67c7bfadaab88cc1238a1038576a4cad | bdda06ffbbd49c63a84fbc3bb26a55842d2b2d0a | /mysite/settings.py | 8de5512eefee0dd550c30e69c67f04291c4e0276 | [] | no_license | todqabb/my-first-blog | f820bef4321e06b20fa1e91dc9c9c5d0aaa7cd82 | 67b4dd4b442094ba372e5e2d36fff005159850f7 | refs/heads/master | 2021-01-20T05:22:08.840767 | 2017-08-25T20:22:16 | 2017-08-25T20:22:16 | 101,439,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,200 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ko)ab0)2k5w-6vsct0j--9sp3811_gsa#l_8xe3q04c@f3u4l7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Budapest'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"you@example.com"
] | you@example.com |
1117dbbdc89478fcc9b3f3855a551f66657818bc | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/30/usersdata/73/9601/submittedfiles/atividade.py | ac54c0c02c33500bfc7f846ac89beb1e400e1c04 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
n = input ('digite o valor de n:')
soma = 0
for i in range (1,n+1,1):
a = i/(n+1-i)
soma = soma + a
print ('%.5f' %soma )
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
b025942a885c51b7cba68983e10da3229b4c6dd2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03474/s139736591.py | 35fd766d01c43ff1779773d47e9f0d978ed03fe3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | a,b = map(int,input().split())
s = list(input())
cnt = 0
num = list("0123456789")
for i in range(a):
if s[i] in num:
cnt += 1
if s[a] == "-":
cnt += 1
for i in range(b):
if s[i+a+1] in num:
cnt += 1
if cnt == a+b+1:
print("Yes")
else:
print("No") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
8d3edda1cc082f4a48eafa8db8ef84b771cd96ae | c73b9c944deb7d0c564dcefc3fe30fbf0f69e47d | /pipeline/pipeline/backend.py | 5a51e23c0d2fbe7572724f72d786690d3045cecf | [
"MIT"
] | permissive | atgenomix/hail | 46e3edcea33a86184462f6ef0f354ea0239cd0dc | ceb85fc87544b5dceabe64213e3c5acaaae6a05e | refs/heads/master | 2021-06-19T07:48:42.503150 | 2019-02-22T04:09:30 | 2019-02-22T04:09:30 | 171,826,476 | 1 | 0 | MIT | 2019-02-21T07:57:38 | 2019-02-21T07:57:37 | null | UTF-8 | Python | false | false | 2,906 | py | import abc
import os
import subprocess as sp
from .resource import Resource, ResourceGroup
from .utils import get_sha, escape_string
class Backend:
@abc.abstractmethod
def tmp_dir(self):
return
@abc.abstractmethod
def run(self, pipeline, dry_run, verbose, bg, delete_on_exit):
return
@abc.abstractmethod
def copy(self, src, dest):
return
class LocalBackend(Backend):
def __init__(self, tmp_dir='/tmp/'):
self._tmp_dir = tmp_dir
def run(self, pipeline, dry_run, verbose, bg, delete_on_exit):
tmpdir = self.tmp_dir()
script = ['#!/bin/bash',
'set -e' + 'x' if verbose else '',
'\n',
'# change cd to tmp directory',
f"cd {tmpdir}",
'\n']
def define_resource(r):
if isinstance(r, str):
r = pipeline._resource_map[r]
if isinstance(r, Resource):
assert r._value is not None
init = f"{r._uid}={escape_string(r._value)}"
else:
assert isinstance(r, ResourceGroup)
init = f"{r._uid}={escape_string(r._root)}"
return init
for task in pipeline._tasks:
script.append(f"# {task._uid} {task._label if task._label else ''}")
resource_defs = [define_resource(r) for _, r in task._resources.items()]
if task._docker:
defs = '; '.join(resource_defs) + '; ' if resource_defs else ''
cmd = "&& ".join(task._command)
image = task._docker
script += [f"docker run "
f"-v {tmpdir}:{tmpdir} "
f"-w {tmpdir} "
f"{image} /bin/bash "
f"-c {escape_string(defs + cmd)}",
'\n']
else:
script += resource_defs
script += task._command + ['\n']
script = "\n".join(script)
if dry_run:
print(script)
else:
try:
sp.check_output(script, shell=True) # FIXME: implement non-blocking (bg = True)
except sp.CalledProcessError as e:
print(e.output)
raise e
finally:
if delete_on_exit:
sp.run(f'rm -r {tmpdir}', shell=True)
def tmp_dir(self):
def _get_random_name():
directory = self._tmp_dir + '/pipeline.{}/'.format(get_sha(8))
if os.path.isdir(directory):
return _get_random_name()
else:
os.makedirs(directory, exist_ok=True)
return directory
return _get_random_name()
def copy(self, src, dest): # FIXME: symbolic links? support gsutil?
return f"cp {src} {dest}"
| [
"daniel.zidan.king@gmail.com"
] | daniel.zidan.king@gmail.com |
19fadc6bd0dcb196c68d7e3ac27319302057be8f | e349a8dba0356f9ba252df905f563944750d989a | /scripts/flix.py | ea8aa9f3358bb44e41dedfe2f0bbe0bfb1387917 | [
"MIT"
] | permissive | jaebradley/flix | 49ebe12ae0b53c8c004d403d424478997085e397 | adc02c2f08d01e1acd6f18065be70a8c87e71e55 | refs/heads/master | 2022-12-12T20:15:18.697554 | 2017-06-13T12:38:17 | 2017-06-13T12:38:17 | 93,294,294 | 1 | 0 | MIT | 2022-12-07T23:58:05 | 2017-06-04T06:17:12 | Python | UTF-8 | Python | false | false | 1,547 | py | import click
from data.exceptions import InvalidDateException
from data.time import get_date
from data.services import fetch_parsed_theater_data
from tables.builders import build_table
MONTH_CHOICES = [
"jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec",
"1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12"
]
DAY_CHOICES = ["mon", "tue", "wed", "thu", "fri", "sat", "sun",
"1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15",
"16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31"]
@click.command()
@click.option("-n", "--name")
@click.option("-t", "--tomorrow", is_flag=True)
@click.option("-m", "--month", type=click.Choice(MONTH_CHOICES))
@click.option("-d", "--day", type=click.Choice(DAY_CHOICES))
@click.option("-l", "--limit", default="2", type=click.Choice(["1", "2", "3", "4", "5"]))
def flix(name, tomorrow, month, day, limit):
try:
try:
date = get_date(use_tomorrow=tomorrow, month=month, day=day)
except InvalidDateException:
click.echo("Invalid date inputs")
return
movie_presentations = fetch_parsed_theater_data(start_date=date, movie_name=name, limit=limit)
if len(movie_presentations.movie_presentations_mapping.keys()) > 0:
click.echo(build_table(movie_presentations))
else:
click.echo("No flix found")
except Exception:
click.echo("Unable to show any flix")
| [
"jae.b.bradley@gmail.com"
] | jae.b.bradley@gmail.com |
2bb2717658bda0e645599dbf83db02d5fce1ebde | 3c934c97bd5748237ac8963c8be779a7d77be629 | /maximumConsecOne.py | 7c8085c860f9b9639124b62b613e6cbd31879ac1 | [] | no_license | Franktian/leetcode | 2b0d0280d18e3401b9f337f027c5d70f26237f02 | 98e7852ba144cefbdb02f705651b1519155ee4d6 | refs/heads/master | 2021-06-12T15:23:09.733650 | 2020-06-17T23:09:18 | 2020-06-17T23:09:18 | 128,710,359 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | def findMaxConsecutiveOnes(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
res = 0
curr = 0
for n in nums:
if n == 1:
curr += 1
else:
curr = 0
if curr >= res:
res = curr
return res
| [
"tianyawen201209@hotmail.com"
] | tianyawen201209@hotmail.com |
2b0c2dda599c2b9101d263bb3608cffc7638a3e8 | 757e3de38040878588bfcc846ec87a34740313a3 | /cap_07_iteracao/Lista_Fรกbio_03_em_for/fabio_iteracao_Q05_fatorial.py | a7ef9b3b83716cd9d39aef8b5d30c39c52b8871b | [] | no_license | Emanuelvss13/ifpi-ads-algoritimos2020 | c0a4a76ce3c41ae945f1ba31719eb68a539a9c9c | ac693feb1eee67f7c816b2ed34d44f3fd939653d | refs/heads/master | 2021-03-06T05:32:37.040171 | 2021-02-03T23:46:24 | 2021-02-03T23:46:24 | 246,182,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | def main():
numero = int(input('Digite um nรบmero: '))
fatorial = 1
numero_print =numero
for i in range(numero):
fatorial = fatorial * numero
numero -= 1
print(f'O fatorial de {numero_print} รฉ : {fatorial}')
main() | [
"noreply@github.com"
] | Emanuelvss13.noreply@github.com |
8c42e5185cd19ce89eb94c84bb3c322b1804aa6a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_histories.py | 17f1b20f237ac5840ec05fd67dc1b6cdf2391ec7 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py |
#calss header
class _HISTORIES():
def __init__(self,):
self.name = "HISTORIES"
self.definitions = history
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['history']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
cced9e47a43e84015736e4fd09d4ccfba0f30baf | fc5734ad9b0dc154b3a36ec2f5d848b3d693473f | /solutions/Hash Tables/dot_product_of_two_sparse_vectors.py | 896d3a57d9ce44a518484cdc2456a6631b02371e | [
"MIT"
] | permissive | aimdarx/data-structures-and-algorithms | 8e51ec2144b6e0c413bc7ef0c46aba749fd70a99 | 1659887b843c5d20ee84a24df152fb4f763db757 | refs/heads/master | 2023-08-28T12:00:33.073788 | 2021-11-07T08:31:28 | 2021-11-07T08:31:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,652 | py | """
Dot Product of Two Sparse Vectors:
Given two sparse vectors, compute their dot product.
Implement class SparseVector:
SparseVector(nums) Initializes the object with the vector nums
dotProduct(vec) Compute the dot product between the instance of SparseVector and vec
A sparse vector is a vector that has mostly zero values, you should store the sparse vector efficiently and compute the dot product between two SparseVector.
Follow up: What if only one of the vectors is sparse?
Example 1:
Input: nums1 = [1,0,0,2,3], nums2 = [0,3,0,4,0]
Output: 8
Explanation: v1 = SparseVector(nums1) , v2 = SparseVector(nums2)
v1.dotProduct(v2) = 1*0 + 0*3 + 0*0 + 2*4 + 3*0 = 8
Example 2:
Input: nums1 = [0,1,0,0,0], nums2 = [0,0,0,0,2]
Output: 0
Explanation: v1 = SparseVector(nums1) , v2 = SparseVector(nums2)
v1.dotProduct(v2) = 0*0 + 1*0 + 0*0 + 0*0 + 0*2 = 0
Example 3:
Input: nums1 = [0,1,0,0,2,0,0], nums2 = [1,0,0,0,3,0,4]
Output: 6
https://leetcode.com/problems/dot-product-of-two-sparse-vectors
"""
class SparseVector:
def __init__(self, nums):
self.non_zero = {}
for idx, num in enumerate(nums):
if num != 0:
self.non_zero[idx] = num
# Return the dotProduct of two sparse vectors
def dotProduct(self, vec: 'SparseVector'):
total = 0
for idx in self.non_zero:
if idx in vec.non_zero:
total += self.non_zero[idx] * vec.non_zero[idx]
return total
# Your SparseVector object will be instantiated and called as such:
# v1 = SparseVector(nums1)
# v2 = SparseVector(nums2)
# ans = v1.dotProduct(v2)
| [
"noreply@github.com"
] | aimdarx.noreply@github.com |
e485686f7b3e2f193e4c57591b0c968748577699 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/portal/v20181001/get_console.py | ddcd939b6a17147c3b434fa54036381df2a55ba9 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,840 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetConsoleResult',
'AwaitableGetConsoleResult',
'get_console',
]
@pulumi.output_type
class GetConsoleResult:
"""
Cloud shell console
"""
def __init__(__self__, properties=None):
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter
def properties(self) -> 'outputs.ConsolePropertiesResponse':
"""
Cloud shell console properties.
"""
return pulumi.get(self, "properties")
class AwaitableGetConsoleResult(GetConsoleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConsoleResult(
properties=self.properties)
def get_console(console_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConsoleResult:
"""
Cloud shell console
:param str console_name: The name of the console
"""
__args__ = dict()
__args__['consoleName'] = console_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:portal/v20181001:getConsole', __args__, opts=opts, typ=GetConsoleResult).value
return AwaitableGetConsoleResult(
properties=__ret__.properties)
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
0b8906daed72ba0039fe0761e437da2e6a1ea053 | 312fe86a9540b7bfabcaadd5f20ba107755b195b | /playbooks/monitoring/kibana/docs_compare.py | f421ce5d3d53e5c2befc2e6f379b42c935ca6489 | [
"Apache-2.0"
] | permissive | isabella232/elastic-stack-testing | fa623cce484ecf870c5e90da6b401b3f4c2ce296 | 1526ab4b4ca187dc5f0eb81be2fed058fc556082 | refs/heads/master | 2023-01-05T13:26:22.430049 | 2020-11-09T18:19:44 | 2020-11-09T18:19:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,709 | py | '''
Usage:
python docs_compare.py /path/to/legacy/docs /path/to/metricbeat/docs
'''
from docs_compare_util import check_parity
import sys
allowed_deletions_from_metricbeat_docs_extra = [
# 'path.to.field'
'kibana_stats.response_times.max',
'kibana_stats.response_times.average'
]
unused_kibana_usage_properties = [
"apm",
"localization",
"lens",
"actions",
"alerts"
]
def handle_special_case_kibana_settings(legacy_doc, metricbeat_doc):
# Legacy collection will index kibana_settings.xpack.default_admin_email as null
# whereas Metricbeat collection simply won't index it. So if we find kibana_settings.xpack.default_admin_email
# is null, we simply remove it
if "xpack" in legacy_doc["kibana_settings"] \
and "default_admin_email" in legacy_doc["kibana_settings"]["xpack"] \
and legacy_doc["kibana_settings"]["xpack"]["default_admin_email"] == None:
legacy_doc["kibana_settings"]["xpack"].pop("default_admin_email")
def handle_special_case_kibana_stats(legacy_doc, metricbeat_doc):
# Special case this until we have resolution on https://github.com/elastic/kibana/pull/70677#issuecomment-662531529
metricbeat_doc["kibana_stats"]["usage"]["search"]["total"] = legacy_doc["kibana_stats"]["usage"]["search"]["total"]
metricbeat_doc["kibana_stats"]["usage"]["search"]["averageDuration"] = legacy_doc["kibana_stats"]["usage"]["search"]["averageDuration"]
# Special case for https://github.com/elastic/kibana/pull/76730
# To be removed if/when https://github.com/elastic/beats/issues/21092 is resolved
metricbeat_doc["kibana_stats"]["os"]["cpuacct"] = legacy_doc["kibana_stats"]["os"]["cpuacct"]
metricbeat_doc["kibana_stats"]["os"]["cpu"] = legacy_doc["kibana_stats"]["os"]["cpu"]
def filter_kibana_usage_stats(legacy_doc, metricbeat_doc):
for i in unused_kibana_usage_properties:
legacy_doc["kibana_stats"]["usage"][i] = metricbeat_doc["kibana_stats"]["usage"][i]
def handle_special_cases(doc_type, legacy_doc, metricbeat_doc):
if doc_type == "kibana_settings":
handle_special_case_kibana_settings(legacy_doc, metricbeat_doc)
if doc_type == "kibana_stats":
# Lens, Actions, and other usage stats might not report consistently.
# https://github.com/elastic/kibana/issues/80983
# https://github.com/elastic/kibana/issues/80986
# https://github.com/elastic/kibana/issues/81944
# so, we filter out w/e we don't use (or might change)
filter_kibana_usage_stats(legacy_doc, metricbeat_doc)
handle_special_case_kibana_stats(legacy_doc, metricbeat_doc)
check_parity(handle_special_cases, allowed_deletions_from_metricbeat_docs_extra=allowed_deletions_from_metricbeat_docs_extra)
| [
"noreply@github.com"
] | isabella232.noreply@github.com |
d0f72f2c74e04492606256a0d23728e050e02a0e | 7a91286a3636ad9606d5933619401cc45bee1b10 | /lib/banking/balance.py | db0904c2b45181d536718a1e76a3f25b00e70e80 | [] | no_license | phriscage/banking_application | d063bbb24c3e1061c2110c1aace1d67ae53e70cc | cc120437213a25e1864db14509c6d143b65d8102 | refs/heads/master | 2019-01-02T04:00:30.034945 | 2015-01-31T03:50:42 | 2015-01-31T03:50:42 | 30,099,098 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | """
balance class
"""
class Balance(object):
""" the balance class includes the balance operations """
def __init__(self):
""" instantiate the class """
self.total = 0
def add(self, value):
""" add value to the total
Args:
value (int): numeric value
"""
value = int(value)
self.total += value
def subtract(self, value):
""" subtract value from the total
Args:
value (int): numeric value
"""
value = int(value)
self.total -= value
| [
"phriscage@gmail.com"
] | phriscage@gmail.com |
019082fa9d75ecfb96a0de970631cd8ecb05eaeb | 98ad5d08eda9dbce55947b53dc8e1627e7be5404 | /backend/blog_api/serializers.py | e57bb6df05843dd07c9c6292467f59f623977218 | [] | no_license | mhkmcp/blog-react-djagno | 651a9f1f65f0c54804ceb8ea51cddf0587ef2c81 | 6d0904a08311590b8d07a99c6c02d8f50bbaa0de | refs/heads/main | 2023-08-03T16:28:52.666182 | 2021-09-20T15:00:48 | 2021-09-20T15:00:48 | 408,421,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | from rest_framework import serializers
from blog.models import Post
class PostSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = ('id', 'title', 'excerpt', 'content', 'author', 'status') | [
"mhkmcp@yahoo.com"
] | mhkmcp@yahoo.com |
5ae88cd77491d8b24fad6d607239ecf25badf6b8 | 31764c3903bd7b7cde4649860eb843bc8545095d | /books/Language/python/geatpy/code/A02_DTLZ1.py | 8161e8d8fd83f130fdf31b1d442681d18bd7f173 | [] | no_license | madokast/madokast.github.io.old | bf3aa967ee7ccdba99ce7d667e02f8672ae8b00e | 3af6570401c9f7f11932cc3bac79f4979507c79b | refs/heads/master | 2023-06-20T07:52:50.119738 | 2021-07-22T01:35:12 | 2021-07-22T01:35:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,670 | py | # -*- coding: utf-8 -*-
""" QuickStart """
import numpy as np
import geatpy as ea
# ่ชๅฎไน้ฎ้ข็ฑป
class MyProblem(ea.Problem): # ็ปงๆฟProblem็ถ็ฑป
def __init__(self, M): # M ็ฎๆ ็ปดๆฐ๏ผๆต่ฏไธญไฝฟ็จ 3
# ้ฎ้ข็ๅๅญ
name = 'DTLZ1' # ๅๅงๅname๏ผๅฝๆฐๅ็งฐ๏ผๅฏไปฅ้ๆ่ฎพ็ฝฎ๏ผ
# ็ฎๆ ๅ้ๆฏๆๅคงๅ่ฟๆฏๆๅฐๅ
maxormins = [1] * M # ๅๅงๅmaxormins๏ผ็ฎๆ ๆๅฐๆๅคงๅๆ ่ฎฐๅ่กจ๏ผ1๏ผๆๅฐๅ่ฏฅ็ฎๆ ๏ผ-1๏ผๆๅคงๅ่ฏฅ็ฎๆ ๏ผ
# ๅณ็ญๅ้็ปดๆฐ - ๅฐฑๆฏๅ้
Dim = M + 4 # ๅๅงๅDim๏ผๅณ็ญๅ้็ปดๆฐ๏ผ
# ๅณ็ญๅ้ๆฏๅฎๆฐ่ฟๆฏๆดๆฐ
varTypes = np.array([0] * Dim) # ๅๅงๅvarTypes๏ผๅณ็ญๅ้็็ฑปๅ๏ผ0๏ผๅฎๆฐ๏ผ1๏ผๆดๆฐ๏ผ
# ๅณ็ญๅ้ไธ็ๅไธ็
lb = [0] * Dim # ๅณ็ญๅ้ไธ็
ub = [1] * Dim # ๅณ็ญๅ้ไธ็
# ๅณ็ญๅ้ๆฏๅฆๅ
ๅซไธ่พน็ๅไธ่พน็๏ผ0่กจ็คบไธๅ
ๅซ่ฏฅๅ้็ไธ่พน็๏ผ1่กจ็คบๅ
ๅซ๏ผ
lbin = [1] * Dim # ๅณ็ญๅ้ไธ่พน็
ubin = [1] * Dim # ๅณ็ญๅ้ไธ่พน็
# ่ฐ็จ็ถ็ฑปๆ้ ๆนๆณๅฎๆๅฎไพๅ
ea.Problem.__init__(self, name, M, maxormins, Dim, varTypes, lb, ub, lbin, ubin)
def aimFunc(self, pop): # ็ฎๆ ๅฝๆฐ
# ๆปๆฏๅฐฑๆฏๅไธไธชๅฝๆฐไป pop.Phen ๅฐ pop.ObjV
Vars = pop.Phen # ๅพๅฐๅณ็ญๅ้็ฉ้ต
XM = Vars[:,(self.M-1):]
g = 100 * (self.Dim - self.M + 1 + np.sum(((XM - 0.5)**2 - np.cos(20 * np.pi * (XM - 0.5))), 1, keepdims = True))
ones_metrix = np.ones((Vars.shape[0], 1))
f = 0.5 * np.fliplr(np.cumprod(np.hstack([ones_metrix, Vars[:,:self.M-1]]), 1)) * np.hstack([ones_metrix, 1 - Vars[:, range(self.M - 2, -1, -1)]]) * np.tile(1 + g, (1, self.M))
pop.ObjV = f # ๆๆฑๅพ็็ฎๆ ๅฝๆฐๅผ่ตๅผ็ป็ง็พคpop็ObjV
def calReferObjV(self): # ่ฎก็ฎๅ
จๅฑๆไผ่งฃ
uniformPoint, ans = ea.crtup(self.M, 10000) # ็ๆ10000ไธชๅจๅ็ฎๆ ็ๅไฝ็ปดๅบฆไธๅๅๅๅธ็ๅ่็น
globalBestObjV = uniformPoint / 2
return globalBestObjV
# ็ผๅๆง่กไปฃ็
"""===============================ๅฎไพๅ้ฎ้ขๅฏน่ฑก=============================="""
M = 3 # ่ฎพ็ฝฎ็ฎๆ ็ปดๆฐ
problem = MyProblem(M) # ็ๆ้ฎ้ขๅฏน่ฑก
"""==================================็ง็พค่ฎพ็ฝฎ================================="""
Encoding = 'RI' # ็ผ็ ๆนๅผ
NIND = 100 # ็ง็พค่งๆจก
Field = ea.crtfld(Encoding, problem.varTypes, problem.ranges, problem.borders) # ๅๅปบๅบๅๆ่ฟฐๅจ
population = ea.Population(Encoding, Field, NIND) # ๅฎไพๅ็ง็พคๅฏน่ฑก๏ผๆญคๆถ็ง็พค่ฟๆฒก่ขซๅๅงๅ๏ผไป
ไป
ๆฏๅฎๆ็ง็พคๅฏน่ฑก็ๅฎไพๅ๏ผ
"""================================็ฎๆณๅๆฐ่ฎพ็ฝฎ==============================="""
myAlgorithm = ea.moea_NSGA3_templet(problem, population) # ๅฎไพๅไธไธช็ฎๆณๆจกๆฟๅฏน่ฑก
myAlgorithm.MAXGEN = 500 # ๆๅคง่ฟๅไปฃๆฐ
myAlgorithm.logTras = 1 # ่ฎพ็ฝฎๆฏๅคๅฐไปฃ่ฎฐๅฝๆฅๅฟ๏ผ่ฅ่ฎพ็ฝฎๆ0ๅ่กจ็คบไธ่ฎฐๅฝๆฅๅฟ
myAlgorithm.verbose = True # ่ฎพ็ฝฎๆฏๅฆๆๅฐ่พๅบๆฅๅฟไฟกๆฏ
myAlgorithm.drawing = 0 # ่ฎพ็ฝฎ็ปๅพๆนๅผ๏ผ0๏ผไธ็ปๅพ๏ผ1๏ผ็ปๅถ็ปๆๅพ๏ผ2๏ผ็ปๅถ่ฟ็จๅจ็ป๏ผ
"""==========================่ฐ็จ็ฎๆณๆจกๆฟ่ฟ่ก็ง็พค่ฟๅ=========================
่ฐ็จrunๆง่ก็ฎๆณๆจกๆฟ๏ผๅพๅฐๅธ็ดฏๆๆไผ่งฃ้NDSetไปฅๅๆๅไธไปฃ็ง็พคใNDSetๆฏไธไธช็ง็พค็ฑปPopulation็ๅฏน่ฑกใ
NDSet.ObjVไธบๆไผ่งฃไธชไฝ็็ฎๆ ๅฝๆฐๅผ๏ผNDSet.Phenไธบๅฏนๅบ็ๅณ็ญๅ้ๅผใ
่ฏฆ่งPopulation.pyไธญๅ
ณไบ็ง็พค็ฑป็ๅฎไนใ
"""
[NDSet, population] = myAlgorithm.run() # ๆง่ก็ฎๆณๆจกๆฟ๏ผๅพๅฐ้ๆฏ้
็ง็พคไปฅๅๆๅไธไปฃ็ง็พค
# NDSet.save() # ๆ้ๆฏ้
็ง็พค็ไฟกๆฏไฟๅญๅฐๆไปถไธญ
"""==================================่พๅบ็ปๆ=============================="""
print('็จๆถ๏ผ%f ็ง' % myAlgorithm.passTime)
print('่ฏไปทๆฌกๆฐ๏ผ%d ๆฌก' % myAlgorithm.evalsNum)
print('้ๆฏ้
ไธชไฝๆฐ๏ผ%d ไธช' % NDSet.sizes) if NDSet.sizes != 0 else print('ๆฒกๆๆพๅฐๅฏ่ก่งฃ๏ผ')
# if myAlgorithm.log is not None and NDSet.sizes != 0:
# print('GD', myAlgorithm.log['gd'][-1])
# print('IGD', myAlgorithm.log['igd'][-1])
# print('HV', myAlgorithm.log['hv'][-1])
# print('Spacing', myAlgorithm.log['spacing'][-1])
# """=========================่ฟๅ่ฟ็จๆๆ ่ฟฝ่ธชๅๆ========================="""
# metricName = [['igd'], ['hv']]
# Metrics = np.array([myAlgorithm.log[metricName[i][0]] for i in range(len(metricName))]).T
# ็ปๅถๆๆ ่ฟฝ่ธชๅๆๅพ
# ea.trcplot(Metrics, labels=metricName, titles=metricName) | [
"578562554@qq.com"
] | 578562554@qq.com |
ff27dbd756b4dde221c7ca61cb5f0e3da88cfb58 | a60bc58d17720a2f1e2f8778146248c01adf8a5a | /post_subjects.py | 3221347af689853f3f40fd00b7bd68bace9316dc | [] | no_license | meau/bentley_scripts | 08bdbc159939731966bdeda396c93022212f922d | e17abff89a775b2b7273661e92b849b85e6a1f8d | refs/heads/master | 2021-01-17T05:46:24.838505 | 2015-09-21T16:17:25 | 2015-09-21T16:17:25 | 42,587,433 | 1 | 0 | null | 2015-09-16T13:03:26 | 2015-09-16T13:03:25 | Python | UTF-8 | Python | false | false | 1,925 | py | """
curl -H "X-ArchivesSpace-Session:$TOKEN" -d '{"source":"lcsh","vocabulary":"/vocabularies/1","terms":[{"term":"Cheese", "term_type":"topical","vocabulary":"/vocabularies/1"},{"term":"Michigan","term_type":"geographic","vocabulary":"/vocabularies/1"}]}' http://localhost:8089/subjects
"""
import requests
import json
import csv
baseURL = 'http://localhost:8089'
user='admin'
password='admin'
auth = requests.post(baseURL + '/users/'+user+'/login?password='+password).json()
session = auth["session"]
headers = {'X-ArchivesSpace-Session':session}
subjects_csv = 'C:/Users/Public/Documents/aspace_subjects.csv'
"""
subject_ids = requests.get(baseURL+'/subjects?all_ids=true').json()
for i in subject_ids:
subject_json = requests.get(baseURL+'/subjects/'+str(i)).json()
print subject_json['title'], subject_json['uri']
"""
with open(subjects_csv,'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
row_indexes = len(row) - 1
source = row[1]
terms_list = []
for row_num in range(3,row_indexes + 1, 2):
term = row[row_num]
term_type = row[row_num+1]
terms_dict = {}
terms_dict["term"] = term
terms_dict["term_type"] = term_type
terms_dict["vocabulary"] = "/vocabularies/1"
terms_list.append(terms_dict)
data = json.dumps({"source":source,"vocabulary":"/vocabularies/1","terms":[i for i in terms_list]})
subjects = requests.post(baseURL+'/subjects', headers=headers, data=data).json()
if 'status' in subjects:
if subjects['status'] == 'Created':
subject_uri = subjects['uri']
row.append(subject_uri)
with open('C:/Users/Public/Documents/posted_subjects.csv','ab') as csv_out:
writer = csv.writer(csv_out)
writer.writerow(row)
print subjects
| [
"djpillen@umich.edu"
] | djpillen@umich.edu |
bce301e020d3e0032f55ca6472c9e2c8cf63c121 | aa54fd5cafc65d18ceac52097237482cec27f674 | /planetary_system_stacker/Test_programs/count_lines_of_code.py | 5f56284c52c4e38aa219fd0850afce7de4024311 | [] | no_license | Rolf-Hempel/PlanetarySystemStacker | 84f6934e6748177fb1aca20b54392dee5c3f2e3c | 304952a8ac8e991e111e3fe2dba95a6ca4304b4e | refs/heads/master | 2023-07-20T04:11:06.663774 | 2023-07-17T15:20:15 | 2023-07-17T15:20:15 | 148,365,620 | 228 | 34 | null | 2023-09-01T16:33:05 | 2018-09-11T19:00:13 | Python | UTF-8 | Python | false | false | 1,442 | py | # -*- coding: utf-8; -*-
import os
def countlines(start, lines=0, header=True, begin_start=None):
if header:
print('{:>10} |{:>10} | {:<20}'.format('ADDED', 'TOTAL', 'FILE'))
print('{:->11}|{:->11}|{:->20}'.format('', '', ''))
for thing in os.listdir(start):
thing = os.path.join(start, thing)
# With the following line only non-GUI code is counted.
if os.path.isfile(thing) and not os.path.isfile(thing[:-3] + '.ui'):
# As an alternative count all files.
# if os.path.isfile(thing):
if thing.endswith('.py'):
with open(thing, 'r') as f:
newlines = f.readlines()
newlines = len(newlines)
lines += newlines
if begin_start is not None:
reldir_of_thing = '.' + thing.replace(begin_start, '')
else:
reldir_of_thing = '.' + thing.replace(start, '')
print('{:>10} |{:>10} | {:<20}'.format(
newlines, lines, reldir_of_thing))
for thing in os.listdir(start):
thing = os.path.join(start, thing)
if os.path.isdir(thing):
lines = countlines(thing, lines, header=False, begin_start=start)
return lines
directory = r'D:\SW-Development\Python\PlanetarySystemStacker\planetary_system_stacker'
lines = countlines(directory)
| [
"rolf6419@gmx.de"
] | rolf6419@gmx.de |
6d907810b817e314675bd275c3313d21111ee5a3 | 781e2692049e87a4256320c76e82a19be257a05d | /intervention/results/control_120802_1449613710_39_13.75.py | cb40cf5e2e0afecd0c7c8544b6db900f76291663 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 227 | py |
def num_common_letters(goal_word, guess):
cache = set('')
counter = 0
for i in goal_word:
if i not in cache:
cache.add(i)
for i in guess:
if i in cache:
counter += 1
cache.remove(i)
return counter | [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
869ca60250a2409dbfa492cac38092f9ed2dd74c | 7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3 | /srm/651-675/660/Coversta.py | 3322d7823065e870dd37931908d60355b24de092 | [] | no_license | roiti46/Contest | c0c35478cd80f675965d10b1a371e44084f9b6ee | c4b850d76796c5388d2e0d2234f90dc8acfaadfa | refs/heads/master | 2021-01-17T13:23:30.551754 | 2017-12-10T13:06:42 | 2017-12-10T13:06:42 | 27,001,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | #SRM660 Div1 Easy
import math,string,itertools as it,fractions,heapq,collections as collect,re,array,bisect,random,sys
class Coversta:
def place(self, a, x, y):
n, m, k = len(a), len(a[0]), len(x)
a = [map(int, list(a[i])) for i in xrange(n)]
dwr = zip(x, y)
scores = []
for w in xrange(n):
for r in xrange(m):
tmp = 0
for dw, dr in dwr:
nw, nr = w + dw, r + dr
if 0 <= nw < n and 0 <= nr < m:
tmp += a[nw][nr]
scores.append([tmp, w, r])
scores = sorted(scores, key = lambda z:z[0], reverse = True)
ans = 0
for i in xrange(min(k * k + 1, n * m)):
s, w, r = scores[i]
cover1 = set((w + dw, r + dr) for dw, dr in dwr)
for j in xrange(i + 1, min(k * k + 1, n * m)):
ss, ww, rr = scores[j]
cover2 = set((ww + dw, rr + dr) for dw, dr in dwr)
tmp = s + ss
for p, q in cover1 & cover2:
if 0 <= p < n and 0 <= q < m:
tmp -= a[p][q]
ans = max(ans, tmp)
return ans
| [
"roiti46@gmail.com"
] | roiti46@gmail.com |
4edc855bd5476a2b9bcab34e8779b09af4dec769 | 485816a0a8b86818e4f2cefec517e6316e2252d6 | /posthog/api/plugin_log_entry.py | 45e70682d86c77170aeadcbe9881f5cb8d48ac7f | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | abhijitghate/posthog | 3647443274aee6431e7fecf6902644a9fa7eb9d8 | 68dc4d2730600efb00d3708fb7fba70d85612760 | refs/heads/master | 2023-04-19T15:17:25.033992 | 2021-05-13T09:48:59 | 2021-05-13T09:48:59 | 279,130,099 | 1 | 0 | MIT | 2020-07-12T19:04:15 | 2020-07-12T19:04:14 | null | UTF-8 | Python | false | false | 2,275 | py | from typing import Optional
from django.utils import timezone
from rest_framework import exceptions, generics, mixins, request, serializers, status, viewsets
from rest_framework.permissions import IsAuthenticated
from posthog.api.plugin import PluginOwnershipPermission, PluginsAccessLevelPermission
from posthog.api.routing import StructuredViewSetMixin
from posthog.models.plugin import PluginLogEntry, fetch_plugin_log_entries
from posthog.permissions import ProjectMembershipNecessaryPermissions
class PluginLogEntrySerializer(serializers.ModelSerializer):
class Meta:
model = PluginLogEntry
fields = ["id", "team_id", "plugin_id", "timestamp", "source", "type", "message", "instance_id"]
read_only_fields = fields
class PluginLogEntryViewSet(StructuredViewSetMixin, mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = PluginLogEntrySerializer
permission_classes = [
IsAuthenticated,
ProjectMembershipNecessaryPermissions,
PluginsAccessLevelPermission,
PluginOwnershipPermission,
]
def get_queryset(self):
limit_raw = self.request.GET.get("limit")
limit: Optional[int]
if limit_raw:
try:
limit = int(limit_raw)
except ValueError:
raise exceptions.ValidationError("Query param limit must be omitted or an integer!")
else:
limit = None
after_raw: Optional[str] = self.request.GET.get("after")
after: Optional[timezone.datetime] = None
if after_raw is not None:
after = timezone.datetime.fromisoformat(after_raw.replace("Z", "+00:00"))
before_raw: Optional[str] = self.request.GET.get("before")
before: Optional[timezone.datetime] = None
if before_raw is not None:
before = timezone.datetime.fromisoformat(before_raw.replace("Z", "+00:00"))
parents_query_dict = self.get_parents_query_dict()
return fetch_plugin_log_entries(
team_id=parents_query_dict["team_id"],
plugin_config_id=parents_query_dict["plugin_config_id"],
after=after,
before=before,
search=self.request.GET.get("search"),
limit=limit,
)
| [
"noreply@github.com"
] | abhijitghate.noreply@github.com |
3049a52f0e3824b7e9183c34e6fbbd82deb72726 | ecf1ce6f8b592f76c7b7c253608c1264ae0676a3 | /days/day023/bite_022_Write_a_decorator_with_argument/test_bite_022.py | cb84a294a8948552c5e32652a8d195cd737cdd88 | [] | permissive | alex-vegan/100daysofcode-with-python-course | 94e99880a50ac412e398ad209ed53796f253641f | b6c12316abe18274b7963371b8f0ed2fd549ef07 | refs/heads/master | 2021-07-20T23:05:59.721661 | 2019-01-21T16:18:25 | 2019-01-21T16:18:25 | 150,115,516 | 0 | 0 | MIT | 2018-09-24T14:28:16 | 2018-09-24T14:28:15 | null | UTF-8 | Python | false | false | 248 | py | from bite_022 import make_html
def test_make_html():
@make_html('p')
@make_html('strong')
def get_text(text='I code with PyBites'):
return text
assert get_text() == '<p><strong>I code with PyBites</strong></p>'
| [
"alex-vegan@outlook.com"
] | alex-vegan@outlook.com |
ddba2951e6d7f604b3af9174b0cd0fd2bc89d4e9 | ecc4e8c9794c8ecddfd3774a396d4a06881c1245 | /test/docker_test.py | 72d60addf7437caf5bb641bfc13d1b97b21c6640 | [
"MIT"
] | permissive | cheminfo-py/crystal_analysis_webservice | d244bd01523c1edfc428458e6dea42b063e3edbd | 444c91adb645aa99401dbb3d8f30d8b98e2343c5 | refs/heads/master | 2023-01-10T20:58:34.389784 | 2020-11-19T07:06:27 | 2020-11-19T07:06:27 | 303,760,806 | 0 | 0 | MIT | 2020-11-19T06:40:05 | 2020-10-13T16:17:15 | Python | UTF-8 | Python | false | false | 670 | py | # -*- coding: utf-8 -*-
"""Test if the API responds in the Docker image"""
import sys
import requests
import os
import json
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
r = requests.get("http://localhost:8091/version/")
keys = r.json().keys()
if "version" in keys:
print("OK")
else:
print("error")
sys.exit(1)
with open(os.path.join(THIS_DIR, "HKUST-1.cif"), "r") as fh:
content = fh.read()
r = requests.post(
"http://localhost:8091/topology/", data=json.dumps({"fileContent": content})
)
response = r.json()
if response["rcsrName"] == "tbo":
print("tbo found for HKUST")
sys.exit(0)
else:
print("error")
sys.exit(1)
| [
"kevin.jablonka@epfl.ch"
] | kevin.jablonka@epfl.ch |
b67776dddfb11a06e921bfc4315136dadb462d10 | e3d6f803beece2ecc2cde8de795fdd20291213ff | /nova/tests/functional/notification_sample_tests/test_aggregate.py | 61a4a20565b9da0c0ca69ba8232bfea12032390e | [
"Apache-2.0"
] | permissive | panguan737/nova | 437c1adb81f3e9ef82c28ad957144623db13ba52 | 0d177185a439baa228b42c948cab4e934d6ac7b8 | refs/heads/main | 2023-01-07T00:08:44.069599 | 2020-11-01T14:00:42 | 2020-11-01T14:00:42 | 309,332,719 | 0 | 0 | Apache-2.0 | 2020-11-02T10:17:13 | 2020-11-02T10:17:13 | null | UTF-8 | Python | false | false | 4,047 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.notification_sample_tests \
import notification_sample_base
from nova.tests.unit import fake_notifier
class TestAggregateNotificationSample(
notification_sample_base.NotificationSampleTestBase):
def test_aggregate_create_delete(self):
aggregate_req = {
"aggregate": {
"name": "my-aggregate",
"availability_zone": "nova"}}
aggregate = self.admin_api.post_aggregate(aggregate_req)
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'aggregate-create-start',
replacements={
'uuid': aggregate['uuid']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'aggregate-create-end',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
self.admin_api.delete_aggregate(aggregate['id'])
self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'aggregate-delete-start',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[2])
self._verify_notification(
'aggregate-delete-end',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[3])
def test_aggregate_add_remove_host(self):
aggregate_req = {
"aggregate": {
"name": "my-aggregate",
"availability_zone": "nova"}}
aggregate = self.admin_api.post_aggregate(aggregate_req)
fake_notifier.reset()
add_host_req = {
"add_host": {
"host": "compute"
}
}
self.admin_api.post_aggregate_action(aggregate['id'], add_host_req)
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'aggregate-add_host-start',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'aggregate-add_host-end',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
remove_host_req = {
"remove_host": {
"host": "compute"
}
}
self.admin_api.post_aggregate_action(aggregate['id'], remove_host_req)
self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'aggregate-remove_host-start',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[2])
self._verify_notification(
'aggregate-remove_host-end',
replacements={
'uuid': aggregate['uuid'],
'id': aggregate['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[3])
self.admin_api.delete_aggregate(aggregate['id'])
| [
"147360410@qq.com"
] | 147360410@qq.com |
37c3c598b02fad75e178c54e31aae90a0d649116 | 8f0ce1be6cc093d962c64179eec99c7ccc20ffc4 | /fabrication/migrations/0007_fabricationprojectpage_items.py | f585382451b27a14eb873908ea552ac48e3a6fb3 | [] | no_license | dmeehan/futuregreenstudio | cf5e12c6ead8f0c7023ba09d5868749888068b72 | e6e2b7f7ffa2ed251d21e6b1d07573ab4f70782f | refs/heads/master | 2023-08-30T20:12:24.814970 | 2023-08-28T14:55:26 | 2023-08-28T14:55:26 | 89,943,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-07-26 04:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fabrication', '0006_auto_20210726_0440'),
]
operations = [
migrations.AddField(
model_name='fabricationprojectpage',
name='items',
field=models.CharField(blank=True, max_length=250),
),
]
| [
"dmeehan@gmail.com"
] | dmeehan@gmail.com |
eab1cf90ff876af4f0683c8f49fa175a879378ac | e0045eec29aab56212c00f9293a21eb3b4b9fe53 | /delivery/tests/__init__.py | 8a969170bf17e9fd0f8173deea33b83546ba2af9 | [] | no_license | tamam001/ALWAFI_P1 | a3a9268081b9befc668a5f51c29ce5119434cc21 | 402ea8687c607fbcb5ba762c2020ebc4ee98e705 | refs/heads/master | 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | # -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing detailsself.
from . import test_delivery_cost, test_delivery_stock_move
| [
"50145400+gilbertp7@users.noreply.github.com"
] | 50145400+gilbertp7@users.noreply.github.com |
aef82e74b3d77b46a8daf0528299813a33884d69 | 1180c0bfe29959d95f3c131e6e839950e528d4ee | /05/bmiraski/similar_tweeters.py | 370a3f3daee4af7e42956a3ee85a630156a63fbe | [] | no_license | pybites/challenges | e3e461accd8e7f890aee8007ba5070086ef983fc | 02b77652d0901e6e06cb9b1e7cb3e59c675445c2 | refs/heads/community | 2023-08-20T18:19:02.982214 | 2022-11-17T09:23:31 | 2022-11-17T09:23:31 | 78,264,928 | 764 | 3,115 | null | 2023-07-21T05:58:19 | 2017-01-07T07:17:50 | Jupyter Notebook | UTF-8 | Python | false | false | 2,284 | py | """Compare tweet similarity from two users."""
from nltk.corpus import stopwords
import spacy
from string import ascii_lowercase
from string import digits
from string import punctuation
import sys
import usertweets
stoplist = set(stopwords.words('english'))
nlp = spacy.load('en_core_web_md')
def similar_tweeters(user1, user2):
"""Output similarity value for two different users based on 200 tweets."""
u1 = usertweets.UserTweets(user1)
u2 = usertweets.UserTweets(user2)
tt_u1 = " ".join(_word_set(
_remove_solo_words(_get_important_tweet_words(u1))))
tt_u2 = " ".join(_word_set(
_remove_solo_words(_get_important_tweet_words(u2))))
doc1 = nlp(tt_u1)
doc2 = nlp(tt_u2)
sim = doc1.similarity(doc2)
print(sim)
def _get_important_tweet_words(user):
"""Return a list of important words in user's tweets."""
tt = [[word for word in tweet.text.lower().split()]
for tweet in user._tweets]
for tweet in tt:
for word, value in enumerate(tweet):
clean_word = ""
for char in value:
if char in punctuation:
continue
elif char in digits:
continue
elif char not in ascii_lowercase:
continue
clean_word += char
tweet[word] = clean_word
return [[word for word in tweet if not word.startswith('http') and
word not in stoplist and len(word) >= 3]
for tweet in tt]
def _remove_solo_words(tt):
"""Remove words occurring once in the array."""
from collections import defaultdict
frequency = defaultdict(int)
for tweet in tt:
for token in tweet:
frequency[token] += 1
return [[token for token in tweet if frequency[token] > 1]
for tweet in tt]
def _word_set(tt):
"""Return a single list of all the words from array."""
words = []
for tweet in tt:
for word in tweet:
words.append(word)
return words
if __name__ == "__main__":
if len(sys.argv) < 3:
print('Usage: {} <user1> <user2>'.format(sys.argv[0]))
sys.exit(1)
user1, user2 = sys.argv[1:3]
similar_tweeters(user1, user2)
| [
"pybites@projects.bobbelderbos.com"
] | pybites@projects.bobbelderbos.com |
0eb176292acf6b719ee3c1745b86634560ae1707 | 54da94dce244ab659c8036cafcdc1b326fbfe490 | /datoteke-s-predavanj/2016-17/02-rekurzija/rekurzija-b.py | aaf7cba7342ff8211217d6cb04f8b72ed61abd67 | [] | no_license | jakamrak/uvod-v-programiranje | 640b2738164e2026308d7e60f1478659df79cc40 | 3c05290f4f23b384ad9063880fffe208c08fc599 | refs/heads/master | 2022-07-17T16:50:18.563453 | 2020-05-18T13:54:13 | 2020-05-18T13:54:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,877 | py | def fibonacci(n):
'''Vrne n-ti ฤlen zaporedja 1, 1, 2, 3, 5, 8, ...'''
if n == 0:
return 1
elif n == 1:
return 1
else:
return fibonacci(n - 1) + fibonacci(n - 2)
def splosni_fibonacci(n, a, b):
'''Vrne n-ti ฤlen zaporedja a, b, a + b, a + 2b, 2a + 3b, ...'''
if n == 0:
return a
elif n == 1:
return b
else:
return splosni_fibonacci(n - 1, b, a + b)
def vsota_stevil(n):
'''Vrne vsoto ลกtevil 1 + 2 + ... + n.'''
if n == 0:
return 0
return vsota_stevil(n - 1) + n
def vsota_stevil(n):
'''Vrne vsoto ลกtevil 1 + 2 + ... + n.'''
if n == 0:
return 0
else:
return vsota_stevil(n - 1) + n
def gcd(m, n):
'''Poiลกฤe najveฤji skupni delitelj ลกtevil m in n.'''
if n == 0:
return m
else:
return gcd(n, m % n)
def poisci_kvadratni_koren(n, a, b, eps):
'''Na eps natanฤno poiลกฤe kvadratni koren ลกtevila n na intervalu [a, b].'''
if eps > b - a:
return (a + b) / 2
else:
c = (a + b) / 2
if c ** 2 < n:
return poisci_kvadratni_koren(n, c, b, eps)
else:
return poisci_kvadratni_koren(n, a, c, eps)
def poisci_koren(n, k, a, b, eps):
'''Na eps natanฤno poiลกฤe k-ti koren ลกtevila n na intervalu [a, b].'''
if eps > b - a:
return (a + b) / 2
else:
c = (a + b) / 2
if c ** k < n:
return poisci_koren(n, k, c, b, eps)
else:
return poisci_koren(n, k, a, c, eps)
def poisci_niclo(f, a, b, eps):
'''Na eps natanฤno poiลกฤe niฤlo funkcije f na intervalu [a, b].'''
if eps > b - a:
return (a + b) / 2
else:
c = (a + b) / 2
if f(a) * f(c) > 0:
return poisci_niclo(f, c, b, eps)
else:
return poisci_niclo(f, a, c, eps)
| [
"matija@pretnar.info"
] | matija@pretnar.info |
f655b27fab13fb11eed3d2d3b61c3cec82c6c715 | e1dd0997239951d4d459b1ba0229493512b0b331 | /mds_py/mds-env/lib/python3.11/site-packages/cleo/loaders/factory_command_loader.py | f47ae186ab1d41b16e0ae4a0c34e9357ad678bb2 | [] | no_license | alexmy21/Octopus | bd17777cf66654c1e7959654f63ca82b716865b5 | 7844ec616376ec6cd9c1a8b73dbcad9c729557ae | refs/heads/master | 2022-12-22T22:42:29.473433 | 2022-12-21T16:52:09 | 2022-12-21T16:52:09 | 61,543,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | from typing import Callable
from typing import Dict
from typing import List
from cleo.commands.command import Command
from ..exceptions import CommandNotFoundException
from .command_loader import CommandLoader
class FactoryCommandLoader(CommandLoader):
"""
A simple command loader using factories to instantiate commands lazily.
"""
def __init__(self, factories: Dict[str, Callable]) -> None:
self._factories = factories
@property
def names(self) -> List[str]:
return list(self._factories.keys())
def has(self, name: str) -> bool:
return name in self._factories
def get(self, name: str) -> Command:
if name not in self._factories:
raise CommandNotFoundException(name)
factory = self._factories[name]
return factory()
| [
"alex.mylnikov@hitachivantara.com"
] | alex.mylnikov@hitachivantara.com |
eb20013d020a237284de499bd0d8625c37888b89 | bebba3fb1dfc13a2220f06997c4bc8da42ef8e87 | /smashlib/ipy3x/core/error.py | fdce2fe18b4821b059eb0b9aa031ff4c40673968 | [
"MIT"
] | permissive | mattvonrocketstein/smash | b48b93c3419637f615c7ac3386b04ae756e1fadc | 98acdc27ab72ca80d9a7f63a54c0d52f126a8009 | refs/heads/master | 2021-01-18T23:23:59.340206 | 2016-07-14T01:28:17 | 2016-07-14T01:28:17 | 2,813,958 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,742 | py | # encoding: utf-8
"""
Global exception classes for IPython.core.
Authors:
* Brian Granger
* Fernando Perez
* Min Ragan-Kelley
Notes
-----
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Exception classes
#-----------------------------------------------------------------------------
class IPythonCoreError(Exception):
pass
class TryNext(IPythonCoreError):
"""Try next hook exception.
Raise this in your hook function to indicate that the next hook handler
should be used to handle the operation.
"""
class UsageError(IPythonCoreError):
"""Error in magic function arguments, etc.
Something that probably won't warrant a full traceback, but should
nevertheless interrupt a macro / batch file.
"""
class StdinNotImplementedError(IPythonCoreError, NotImplementedError):
"""raw_input was requested in a context where it is not supported
For use in IPython kernels, where only some frontends may support
stdin requests.
"""
class InputRejected(Exception):
"""Input rejected by ast transformer.
Raise this in your NodeTransformer to indicate that InteractiveShell should
not execute the supplied input.
"""
| [
"matthewvonrocketstein@gmail-dot-com"
] | matthewvonrocketstein@gmail-dot-com |
966167169ec32aff8bf1faaf037b3ff555065e29 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /8oNKM4osgxYyrFtGL_19.py | 71e9c04b8fa2dd16ba98bba2536e6efa1c716407 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py |
def multiply(l):
n = []
ll = len(l)
for w in l:
i = []
ll2 = ll
while ll2 > 0:
i.append(w)
ll2 = ll2 - 1
n.append(i)
return n
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
14f0ad0a9d89f796416f432792c730a988d1b86d | 8acba76d7e7787c0b2c8270552ef5bc7eb0c7549 | /backend/todo/models.py | 10f3336c4b31594c2c1e77ee6f14a3858b46b33a | [] | no_license | harishtm/djreact | a99e91723fa92b52a5d467f01235ec0fc8eb02bb | 6bec0b89db10b255581a5a9dd1f440e40e8bf784 | refs/heads/master | 2023-01-29T04:44:28.276494 | 2020-03-13T06:54:49 | 2020-03-13T06:54:49 | 201,904,114 | 0 | 0 | null | 2023-01-04T06:57:14 | 2019-08-12T09:58:03 | JavaScript | UTF-8 | Python | false | false | 268 | py | from django.db import models
# Create your models here.
class Todo(models.Model):
title = models.CharField(max_length=255)
description = models.TextField()
completed = models.BooleanField(default=False)
def __str__(self):
return self.title
| [
"you@example.com"
] | you@example.com |
d63abbb48211e4004a4441ab8f36cb5cc3a048f2 | 5182897b2f107f4fd919af59c6762d66c9be5f1d | /.history/src/Matriz_esferica_20200707115539.py | 2a05f90d9392c0a9967f9cb280e6bcd2b930b4d3 | [
"MIT"
] | permissive | eduardodut/Trabalho_final_estatistica_cd | 422b7e702f96291f522bcc68d2e961d80d328c14 | fbedbbea6bdd7a79e1d62030cde0fab4e93fc338 | refs/heads/master | 2022-11-23T03:14:05.493054 | 2020-07-16T23:49:26 | 2020-07-16T23:49:26 | 277,867,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,224 | py | import numpy as np
import random
#regras de movimentacao
class Matriz_esferica():
def __init__(self, tamanho):
self.tamanho = tamanho
def get_vizinho_esquerda(self, ponto):
pass
def get_vizinho_direita(self, ponto):
pass
def get_vizinho_cima(self, ponto):
pass
def get_vizinho_baixo(self, ponto):
pass
def checa_limite_matriz(self, indice):
if indice < 0:
return self.tamanho - indice
elif indice > self.tamanho - 1:
return indice - self.tamanho - 1
else:
return indice
#devolve um ponto aleatรณrio vizinho da matriz a partir do ponto especificado em uma das 8 direรงรตes
def passo(self, indice_x: int, indice_y: int, tamanho_max_passo: int):
passo_x = random.randint(-tamanho_max_passo, tamanho_max_passo)
passo_y = random.randint(-tamanho_max_passo, tamanho_max_passo)
return self.checa_limite_matriz(indice_x + passo_x), self.checa_limite_matriz(indice_y + passo_y)
def passo(self, indice_x: int, indice_y: int):
return self.passo(self, indice_x, indice_y, 1)
matriz = Matriz_esferica(10)
matriz.passo(0,0)
print(matriz.passo(0,0)) | [
"eduardo_dut@edu.unifor.br"
] | eduardo_dut@edu.unifor.br |
4171668175165d80aa71d270e8c75e6894a51269 | f654f5f07dd8109c0ee31ba89dd4804e6b288343 | /src/programy/clients/xmpp.py | 22f1b1c3347d45f3276d071306235db382006fa7 | [
"MIT"
] | permissive | sprinteroz/program-y | 3d1f5f28e4f3be770705d4bef15410b8b78f19da | 454c6bde225dce7c3fb01c549d46249248caf7b5 | refs/heads/master | 2021-01-19T16:05:25.636700 | 2017-08-22T03:56:33 | 2017-08-22T03:56:33 | 100,986,551 | 1 | 0 | null | 2017-08-21T19:43:43 | 2017-08-21T19:43:43 | null | UTF-8 | Python | false | false | 3,522 | py | """
Copyright (c) 2016-17 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
import sleekxmpp
from programy.clients.client import BotClient
from programy.config.sections.client.xmpp import XmppConfiguration
class XmppClient(sleekxmpp.ClientXMPP):
def __init__(self, bot_client, jid, password):
self.bot_client = bot_client
sleekxmpp.ClientXMPP.__init__(self, jid, password)
self.add_event_handler("session_start", self.start)
self.add_event_handler("message", self.message)
def start(self, event):
self.send_presence()
self.get_roster()
def message(self, msg):
if msg['type'] in ('chat', 'normal'):
question = msg['body']
userid = msg['from']
response = self.bot_client.bot.ask_question(userid, question)
msg.reply(response).send()
class XmppBotClient(BotClient):
def __init__(self, argument_parser=None):
BotClient.__init__(self, argument_parser)
def set_environment(self, env='xmpp'):
self.bot.brain.properties.add_property("env", env)
def get_client_configuration(self):
return XmppConfiguration()
def run(self):
logging.debug("%s App Running.."%self.bot.brain.properties.predicate("env"))
username = self.bot.license_keys.get_key("XMPP_USERNAME")
password = self.bot.license_keys.get_key("XMPP_PASSWORD")
server = self.configuration.client_configuration.server
port = self.configuration.client_configuration.port
self._client = XmppClient(self, username, password)
if self.configuration.client_configuration.xep_0030 is True:
self._client.register_plugin('xep_0030')
if self.configuration.client_configuration.xep_0004 is True:
self._client.register_plugin('xep_0004')
if self.configuration.client_configuration.xep_0060 is True:
self._client.register_plugin('xep_0060')
if self.configuration.client_configuration.xep_0199 is True:
self._client.register_plugin('xep_0199')
if self._client.connect((server, port)):
print("Connected, running...")
self._client.process(block=True)
else:
print("Failed to connect, exiting...")
if __name__ == '__main__':
def run():
print("Loading Xmpp client, please wait. See log output for progress...")
xmpp_app = XmppBotClient()
xmpp_app.run()
run() | [
"keith@keithsterling.com"
] | keith@keithsterling.com |
183383359d833fb0a64220802c19ab444a1e24ba | d8e4ffa9daaeca95cf52d3a9e3ca1cff865357bf | /section 7/5. ๋์ ๋ถ๋ฐฐํ๊ธฐ/aa.py | 04a5b8df849fde24fe520978eff8954f916f87e2 | [] | no_license | mike-bskim/coding_test | ed4e3790b5b41ed62b922ba9d04149d1cd335c84 | 430445c3ad0ddb4435a5fd244ba3a86677cd00ff | refs/heads/master | 2023-05-06T00:04:24.090152 | 2021-05-13T19:33:01 | 2021-05-13T19:33:01 | 365,245,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | import sys
# sys.stdin=open('in1.txt', 'rt')
# input=sys.stdin.readline
def DFS(L):
global min1
# if max(money)> total/2: # ํ๊ฐ์ ์ซ์๊ฐ ๋ค๋ฅธ ์ซ์๋ณด๋ค ์๋ฑํ ํฌ๋ฉด ๋ก์ง์ค๋ฅ ๋ฐ์.
# return
if L == n:
dif = max(money) - min(money)
if (dif) < min1:
tmp = set(money)
if len(tmp) == 3:
min1 = dif
# print(money, min1)
else:
for i in range(3):
money[i]+=coin[L]
DFS(L+1)
money[i]-=coin[L]
if __name__ == '__main__':
n = int(input())
coin=list()
money=[0]*3
min1=2147000000
x=list()
for _ in range(n):
coin.append(int(input()))
total=sum(coin)
# print(n, coin, total)
DFS(0)
print(min1)
| [
"mike-bskim@gmail.com"
] | mike-bskim@gmail.com |
e27a4dbde2694d7944a84c853f4374204a7e4703 | cc896a233121c4dc158210fae0588e7bdb63b9ff | /rtcbproj/rtcb/authentication/schema.py | b5af07989b4abed79691dda4ba9e29b3de4ded4c | [
"Apache-2.0"
] | permissive | arsenico13/rtcb-backend | 2b99a0fbb6a07bb0a9ef6652603c6e914ccd4525 | f097eae54a12ba4f3983869fef627ea1d55a37d1 | refs/heads/master | 2023-04-07T04:30:33.874569 | 2020-10-27T00:06:16 | 2020-10-27T00:06:16 | 151,688,197 | 0 | 0 | null | 2018-10-05T07:54:18 | 2018-10-05T07:54:17 | null | UTF-8 | Python | false | false | 554 | py | import graphene
from graphene_django import DjangoObjectType
from .models import User as UserModel
class ChoicesRoles(graphene.Enum):
Defender = 'D'
Striker = 'S'
class User(DjangoObjectType):
role = ChoicesRoles()
class Meta:
model = UserModel
interfaces = (graphene.Node, )
exclude_fields = ['password']
id_db = graphene.ID()
def resolve_id_db(self, info, **input):
""" Ritorna l'ID del db """
return self.id
def resolve_role(self, info, **kwargs):
return self.role
| [
"andrea.cecchi85@gmail.com"
] | andrea.cecchi85@gmail.com |
6c856cdbc50ea95865105c3aa077d96b3ea9ad24 | 56ab1f0aa30dc0357e84fa11640c9353fd5deeed | /set7/52/set7_pro52.py | 01d24fe9296d4e88110b36a1d56cb8993b0cb214 | [] | no_license | zjwzcn07/cryptopals | c9c11a7627222291e7f5c7a2cfffc4a55b548dc5 | d44c9e8b8f1527e244de6a5425b8d4f46ef98e49 | refs/heads/master | 2021-06-10T13:35:48.517737 | 2016-11-30T11:29:59 | 2016-11-30T11:29:59 | 74,258,995 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,019 | py | import hmac
import sys
def bad_hash_block(msg,iv):
mac = str2bin(hmac.new(iv,msg).hexdigest())[:16]
#print mac
return mac
def str2bin(string):
string_bin = ""
for x in string:
the_bin = bin(ord(x))[2:]
if((8 - len(the_bin))>0):
the_bin = '0'*(8 - len(the_bin)) + the_bin
string_bin = string_bin + the_bin
return string_bin
def padding(msg):
over_pad = len(msg)%16
msg = msg + chr(1) + chr(0)*((over_pad - 2)%16) + chr(len(msg))
return msg
def Merkle_Damgard(msg,iv):# iv:16 Byte blank_length:16 Byte
m_block = []
if(len(iv)<8):
iv = '0'*(8 - len(iv)) + iv
msg = padding(msg)
for m in (m_block.append(msg[x:x + 16]) for x in xrange(0,len(msg), 16)):
iv = bad_hash_block(m,iv)
print iv
return iv#mac
def find_a_collision(iv):
H = {}
collision_space1 = "abcdefghijklmnop"
collision_space2 = "ABCDEFGHIJKLMNOP"
for x in xrange(0,2**16):
choice_base = bin(x)[2:]
if(len(choice_base)<16):
choice_base = '0'*(16 - len(choice_base)) + choice_base
test_string = ''
for index,x in enumerate(choice_base):
if (x == '0'):
test_string = test_string + collision_space1[index]
else:
test_string = test_string + collision_space2[index]
h = bad_hash_block(test_string,iv)
if(H.has_key(h)):
#print (h,test_string,H[h])
return (h,test_string,H[h])
H[h] = test_string
print "cant find a collision"
sys.exit(0)
def extend_collision(iv,collision):
h,co1,co2 = find_a_collision(iv)
return h, [x + co for x in collision for co in [co1, co2]]
def Merkle_Damgard_collision(iv,num):
collision = []
iv,co1,co2 = find_a_collision(iv)
collision = [co1,co2]
print "The first collision:"
print collision
for i in xrange(0,num):
iv,collision = extend_collision(iv,collision)
print "The " + str(i + 1) + " times extend:"
print collision
print len(collision)
return collision
if __name__ == '__main__':
iv = "0000000000000000"
#Merkle_Damgard("aaaaaaa",iv)
collision = Merkle_Damgard_collision(iv,3)
for x in collision:
Merkle_Damgard(x,iv) | [
"1106911190@qq.com"
] | 1106911190@qq.com |
21ce8f7efc7d9750a693501c0a68db78036b44c3 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/_operations_mixin.py | 5631cad2bfb87a3b4ce5a3932a1c06cdb5d2fd42 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 3,410 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from ._serialization import Serializer, Deserializer
from io import IOBase
from typing import Any, IO, Optional, Union
from . import models as _models
class SubscriptionClientOperationsMixin(object):
def check_resource_name(
self,
resource_name_definition: Optional[Union[_models.ResourceName, IO]] = None,
**kwargs: Any
) -> _models.CheckResourceNameResult:
"""Checks resource name validity.
A resource name is valid if it is not a reserved word, does not contains a reserved word and
does not start with a reserved word.
:param resource_name_definition: Resource object with values for resource name and resource
type. Is either a ResourceName type or a IO type. Default value is None.
:type resource_name_definition:
~azure.mgmt.resource.subscriptions.v2022_12_01.models.ResourceName or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckResourceNameResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.subscriptions.v2022_12_01.models.CheckResourceNameResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
api_version = self._get_api_version('check_resource_name')
if api_version == '2016-06-01':
from .v2016_06_01.operations import SubscriptionClientOperationsMixin as OperationClass
elif api_version == '2018-06-01':
from .v2018_06_01.operations import SubscriptionClientOperationsMixin as OperationClass
elif api_version == '2019-06-01':
from .v2019_06_01.operations import SubscriptionClientOperationsMixin as OperationClass
elif api_version == '2019-11-01':
from .v2019_11_01.operations import SubscriptionClientOperationsMixin as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import SubscriptionClientOperationsMixin as OperationClass
elif api_version == '2022-12-01':
from .v2022_12_01.operations import SubscriptionClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'check_resource_name'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._config.api_version = api_version
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.check_resource_name(resource_name_definition, **kwargs)
| [
"noreply@github.com"
] | Azure.noreply@github.com |
73d0d1b1c8b75b5f329b134be4f9f52dae01419b | bc5dd7be84a43ec53f8e4215761badb9b61a13ad | /kurs_2/vertualenv/Lib/site-packages/django/contrib/messages/storage/base.py | d1f9b274323aa72fc4ab49364e624f39becd2095 | [] | no_license | MaximMak/DL_Academy_Lessons | ef4758be02e43954748031ac95c970077f71cd7e | 427576859657e88fd81683494397af3df920c674 | refs/heads/master | 2023-01-29T19:53:11.650096 | 2020-12-13T21:40:58 | 2020-12-13T21:40:58 | 276,397,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,679 | py | from django.conf import settings
from django.contrib.messages import constants, utils
LEVEL_TAGS = utils.get_level_tags()
class Message:
"""
Represent an actual message that can be stored in any of the supported
storage classes (typically session- or cookie-based) and rendered in a view
or template.
"""
def __init__(self, level, message, extra_tags=None):
self.level = int(level)
self.message = message
self.extra_tags = extra_tags
def _prepare(self):
"""
Prepare the message for serialization by forcing the ``message``
and ``extra_tags`` to str in case they are lazy translations.
"""
self.message = str(self.message)
self.extra_tags = str(self.extra_tags) if self.extra_tags is not None else None
def __eq__(self, other):
if not isinstance(other, Message):
return NotImplemented
return self.level == other.level and self.message == other.message
def __str__(self):
return str(self.message)
@property
def tags(self):
return ' '.join(tag for tag in [self.extra_tags, self.level_tag] if tag)
@property
def level_tag(self):
return LEVEL_TAGS.get(self.level, '')
class BaseStorage:
"""
This is the base profiles for temporary message storage.
This is not a complete class; to be a usable storage profiles, it must be
subclassed and the two methods ``_get`` and ``_store`` overridden.
"""
def __init__(self, request, *args, **kwargs):
self.request = request
self._queued_messages = []
self.used = False
self.added_new = False
super().__init__(*args, **kwargs)
def __len__(self):
return len(self._loaded_messages) + len(self._queued_messages)
def __iter__(self):
self.used = True
if self._queued_messages:
self._loaded_messages.extend(self._queued_messages)
self._queued_messages = []
return iter(self._loaded_messages)
def __contains__(self, item):
return item in self._loaded_messages or item in self._queued_messages
@property
def _loaded_messages(self):
"""
Return a list of loaded messages, retrieving them first if they have
not been loaded yet.
"""
if not hasattr(self, '_loaded_data'):
messages, all_retrieved = self._get()
self._loaded_data = messages or []
return self._loaded_data
def _get(self, *args, **kwargs):
"""
Retrieve a list of stored messages. Return a tuple of the messages
and a flag indicating whether or not all the messages originally
intended to be stored in this storage were, in fact, stored and
retrieved; e.g., ``(messages, all_retrieved)``.
**This method must be implemented by a subclass.**
If it is possible to tell if the profiles was not used (as opposed to
just containing no messages) then ``None`` should be returned in
place of ``messages``.
"""
raise NotImplementedError('subclasses of BaseStorage must provide a _get() method')
def _store(self, messages, response, *args, **kwargs):
"""
Store a list of messages and return a list of any messages which could
not be stored.
One type of object must be able to be stored, ``Message``.
**This method must be implemented by a subclass.**
"""
raise NotImplementedError('subclasses of BaseStorage must provide a _store() method')
def _prepare_messages(self, messages):
"""
Prepare a list of messages for storage.
"""
for message in messages:
message._prepare()
def update(self, response):
"""
Store all unread messages.
If the profiles has yet to be iterated, store previously stored messages
again. Otherwise, only store messages added after the last iteration.
"""
self._prepare_messages(self._queued_messages)
if self.used:
return self._store(self._queued_messages, response)
elif self.added_new:
messages = self._loaded_messages + self._queued_messages
return self._store(messages, response)
def add(self, level, message, extra_tags=''):
"""
Queue a message to be stored.
The message is only queued if it contained something and its level is
not less than the recording level (``self.level``).
"""
if not message:
return
# Check that the message level is not less than the recording level.
level = int(level)
if level < self.level:
return
# Add the message.
self.added_new = True
message = Message(level, message, extra_tags=extra_tags)
self._queued_messages.append(message)
def _get_level(self):
"""
Return the minimum recorded level.
The default level is the ``MESSAGE_LEVEL`` setting. If this is
not found, the ``INFO`` level is used.
"""
if not hasattr(self, '_level'):
self._level = getattr(settings, 'MESSAGE_LEVEL', constants.INFO)
return self._level
def _set_level(self, value=None):
"""
Set a custom minimum recorded level.
If set to ``None``, the default level will be used (see the
``_get_level`` method).
"""
if value is None and hasattr(self, '_level'):
del self._level
else:
self._level = int(value)
level = property(_get_level, _set_level, _set_level)
| [
"54116778+MaximMak@users.noreply.github.com"
] | 54116778+MaximMak@users.noreply.github.com |
4e6c5b0a5c1fc42f8b453fe5a38fd77ed0d32ef8 | 1f8d05b774271e6c9321b23093baa1dda3dae650 | /utils_app/website_settings.py | 1ad5af2e18bf0beded6e8b31a84024909eaa29ea | [] | no_license | pockerman/python_lms | 5ca2f0d90692bf6c9bc7c00d0ffd59db084ead3f | 327dbbd9e5ad96c143c8abe60cf8a2cbc3d5dac0 | refs/heads/master | 2020-07-14T01:07:27.826127 | 2019-08-29T16:08:37 | 2019-08-29T16:08:37 | 205,197,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py |
site_index_title='Ustdy'
navigation_elements = ["Join","Services","Courses","Contact"]
main_page_buttons =["Courses Catalog","Request Info","Tests Catalog","Library","Learn More"]
nav_page_title=["Ustdy-Join","Ustdy-Services","Ustdy-Courses","Ustdy-Contact"]
| [
"a.giavaras@gmail.com"
] | a.giavaras@gmail.com |
6216ac8e2467c29bdefb4a44159e403691e89f88 | 9a5438bdb8e84d0167ddea5458a7f729fdd54121 | /MetaDataApi/tests/test_utils/test_json_utils.py | d364134e98feae48c1bf4083a71f2715fcc53c32 | [] | no_license | Grusinator/MetaDataApi | 740fd2be4cb97b670f827a071a0ac8c50f79f8ff | 081f881c735466ed1dbbd68646b821299c5168f8 | refs/heads/master | 2023-07-25T23:58:22.179717 | 2020-03-15T09:36:05 | 2020-03-15T09:36:05 | 149,087,967 | 5 | 1 | null | 2023-07-25T15:39:12 | 2018-09-17T07:45:09 | CSS | UTF-8 | Python | false | false | 366 | py | import django
from django.test import TransactionTestCase
class TestJsonUtils(TransactionTestCase):
# Django requires an explicit setup() when running tests in PTVS
@classmethod
def setUpClass(cls):
super(TestJsonUtils, cls).setUpClass()
django.setup()
def test_identify_json_data_sample(self):
self.assertEqual(1 + 1, 2)
| [
"grusinator@gmail.com"
] | grusinator@gmail.com |
c8b71e3b91a871f2dc99627693d550ff4be408fe | e4ec5b6cf3cfe2568ef0b5654c019e398b4ecc67 | /azure-cli/2.0.18/libexec/lib/python3.6/site-packages/azure/mgmt/network/v2017_09_01/models/express_route_circuits_routes_table_summary_list_result.py | db62dab03a898b918438cd3d316dd87a3d2c32b5 | [] | no_license | EnjoyLifeFund/macHighSierra-cellars | 59051e496ed0e68d14e0d5d91367a2c92c95e1fb | 49a477d42f081e52f4c5bdd39535156a2df52d09 | refs/heads/master | 2022-12-25T19:28:29.992466 | 2017-10-10T13:00:08 | 2017-10-10T13:00:08 | 96,081,471 | 3 | 1 | null | 2022-12-17T02:26:21 | 2017-07-03T07:17:34 | null | UTF-8 | Python | false | false | 1,251 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitsRoutesTableSummaryListResult(Model):
"""Response for ListRoutesTable associated with the Express Route Circuits
API.
:param value: A list of the routes table.
:type value: list of :class:`ExpressRouteCircuitRoutesTableSummary
<azure.mgmt.network.v2017_09_01.models.ExpressRouteCircuitRoutesTableSummary>`
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCircuitRoutesTableSummary]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, value=None, next_link=None):
self.value = value
self.next_link = next_link
| [
"Raliclo@gmail.com"
] | Raliclo@gmail.com |
1d5a9de4a037605a35872b62a9cd7204b3ab99ae | a12d46e439d81ba0a60776f46eb60b1a158b8ac1 | /pfrl/utils/__init__.py | d632886211326ad92635ea4bae4dc51d21237a3a | [
"MIT"
] | permissive | TMats/pfrl | 1b2a529aec210b0e3064182433797460a64f79f5 | 9c591657a5433b8f23c589e01751408ee5d1dde5 | refs/heads/master | 2022-12-21T01:27:07.625802 | 2020-10-07T02:32:20 | 2020-10-07T02:32:20 | 284,470,130 | 0 | 0 | MIT | 2020-08-02T13:46:29 | 2020-08-02T13:46:28 | null | UTF-8 | Python | false | false | 548 | py | from pfrl.utils.batch_states import batch_states # NOQA
from pfrl.utils.conjugate_gradient import conjugate_gradient # NOQA
from pfrl.utils import env_modifiers # NOQA
from pfrl.utils.is_return_code_zero import is_return_code_zero # NOQA
from pfrl.utils.random_seed import set_random_seed # NOQA
from pfrl.utils.pretrained_models import download_model # NOQA
from pfrl.utils.contexts import evaluating # NOQA
from pfrl.utils.stoppable_thread import StoppableThread # NOQA
from pfrl.utils.clip_l2_grad_norm import clip_l2_grad_norm_ # NOQA
| [
"muupan@gmail.com"
] | muupan@gmail.com |
96c96efe9ecb296cc2033b388891cae9edfce3eb | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_stooges.py | 1d4b04a9490a2094070e8d4dab9ac1b03b79dfd3 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.nouns._stooge import _STOOGE
#calss header
class _STOOGES(_STOOGE, ):
def __init__(self,):
_STOOGE.__init__(self)
self.name = "STOOGES"
self.specie = 'nouns'
self.basic = "stooge"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
ea00ac9cf0ba38ce3d14ec9351fb5ce1d8ba4246 | a72f39b82966cd6e2a3673851433ce7db550429a | /openchat/openchat/models/imagemodel.py | f1310a355504ba28f45bdb8db17b6ed4d232bf6c | [
"Apache-2.0"
] | permissive | linxi1158/iMIX | 85841d6b95e1d99ed421a1ac3667658e49cae6fc | af87a17275f02c94932bb2e29f132a84db812002 | refs/heads/master | 2023-06-09T23:37:46.534031 | 2021-06-30T12:09:42 | 2021-06-30T12:09:42 | 381,608,650 | 0 | 0 | Apache-2.0 | 2021-06-30T07:08:40 | 2021-06-30T07:08:39 | null | UTF-8 | Python | false | false | 2,534 | py | import torch
from openchat.models.base_model import BaseModel
import openchat.config as cfg
from openchat.utils.prepare_image import detect_objects_on_single_image
from openchat.utils.transforms import build_transforms
import cv2
import sys
sys.path.insert(0, '/home/datasets/mix_data/openchat/scene_graph_benchmark-main')
class LxmertBot(BaseModel):
def __init__(self, env, device, max_context_length):
super().__init__('imagemodel', env)
# self.model = MobileNetV2(num_classes=5)
self.devices = device.lower()
self.max_context_length = max_context_length
# self.tokenizer = .from_pretrained()
self.eos = '</s><s>'
self.lxmert_model = torch.load(cfg.lxmert_weight_path)
self.transforms = build_transforms()
self.detect_model = torch.load(cfg.detect_weight_path)
# self.model.to(device)
@torch.no_grad()
def predict(self, image_id: str, text: str) -> str:
torch.cuda.empty_cache()
input_ids_list: list = []
num_of_stacked_tokens: int = 0
print(text)
if image_id not in self.env.histories.keys():
self.env.clear(image_id, text)
user_histories = reversed(self.env.histories[image_id]['user'])
bot_histories = reversed(self.env.histories[image_id]['bot'])
for user, bot in zip(user_histories, bot_histories):
user_tokens = self.tokenizer.encode(user, return_tensors='pt')
bot_tokens = self.tokenizer.encode(bot, return_tensors='pt')
num_of_stacked_tokens += user_tokens.shape[-1] + bot_tokens.shape[-1]
if num_of_stacked_tokens <= self.max_context_length:
input_ids_list.append(bot_tokens)
input_ids_list.append(user_tokens)
else:
break
img_path = cfg.image_path + image_id
img = cv2.imread(img_path)
dets = detect_objects_on_single_image(self.detect_model, self.transforms, img)
data = {}
data['feats'] = torch.stack([det['features'] for det in dets]).unsqueeze(dim=0)
data['boxes'] = torch.stack([torch.tensor(det['rect'], dtype=torch.float32) for det in dets]).unsqueeze(dim=0)
feats = data['feats'].to('cuda')
boxes = data['boxes'].to('cuda')
sent = [text]
output_dict = self.lxmert_model.model(feats, boxes, sent)
max_score = output_dict['scores'].argmax(dim=-1)
print(max_score)
ans = cfg.answer_table[max_score]
return ans
| [
"hsslab.inspur@gmail.com"
] | hsslab.inspur@gmail.com |
0b25ae4c71c7ade397cad1698cadef5233b1b817 | ecb7e109a62f6a2a130e3320ed1fb580ba4fc2de | /reference-code/lambda/cm-premembers-backend/premembers/check/logic/check_asc/asc_item_16_logic.py | 8cec4f13cbcf037fbbe58d663cd87397596d2cb4 | [] | no_license | nisheeth84/prjs_sample | df732bc1eb58bc4fd4da6e76e6d59a2e81f53204 | 3fb10823ca4c0eb3cd92bcd2d5d4abc8d59436d9 | refs/heads/master | 2022-12-25T22:44:14.767803 | 2020-10-07T14:55:52 | 2020-10-07T14:55:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,861 | py | import inspect
from premembers.repository.const import CheckResult
from premembers.common import common_utils, S3Utils
from premembers.const.const import CommonConst
from premembers.exception.pm_exceptions import PmError
from premembers.common import FileUtils, date_utils, aws_common
from premembers.check.logic.check_asc import asc_item_common_logic
LOG_DELIVERY_URI = "http://acs.amazonaws.com/groups/s3/LogDelivery"
def check_asc_item_16_01(trace_id, check_history_id, organization_id,
project_id, aws_account, session, result_json_path):
cw_logger = common_utils.begin_cw_logger(trace_id, __name__,
inspect.currentframe())
check_results = []
is_authorized = True
# ๅๅพใใใฏใฌใใณใทใฃใซๆ
ๅ ฑใไฝฟ็จใใฆใS3ใฏใฉใคใขใณใใไฝๆใใพใใ
try:
s3_client = S3Utils.get_s3_client(trace_id, session, aws_account,
is_cw_logger=True)
except PmError as e:
raise common_utils.write_log_pm_error(e, cw_logger)
# S3ใใฑใใไธ่ฆงใๅๅพใใพใใ
try:
list_buckets = asc_item_common_logic.get_list_buckets(
trace_id, check_history_id, organization_id, project_id, s3_client,
aws_account)
except PmError as e:
return CheckResult.Error
for bucket in list_buckets["Buckets"]:
bucket_name = bucket['Name']
region_name = None
try:
# ๅๅพใใS3ใใฑใใไธ่ฆงๆ
ๅ ฑใใกใคใซใใใจใซใๅใใฑใใใฎใชใผใธใงใณใๅๅพใใใ
region_name = S3Utils.get_bucket_location(trace_id, s3_client,
bucket_name, aws_account,
is_cw_logger=True)
if region_name is None:
region_name = CommonConst.US_EAST_REGION
# ๅๅพใใS3ใใฑใใๆ
ๅ ฑใใกใคใซใใใจใซใ่ฉฒๅฝใฎS3ใใฑใใใฎใขใฏใปในใณใณใใญใผใซใชในใใๅๅพใใใ
bucket_acl = get_bucket_acl(
trace_id, check_history_id, organization_id, project_id,
aws_account, region_name, bucket_name, s3_client)
# ๅๅพใใS3ใใฑใใๆ
ๅ ฑใใใจใซใS3ใฎใญใฎใณใฐๆ
ๅ ฑใๅๅพใใใ
bucket_logging = S3Utils.get_bucket_logging(
trace_id, aws_account, s3_client, bucket_name, region_name,
is_cw_logger=True)
except PmError as e:
if e.cause_error.response['Error'][
'Code'] in CommonConst.S3_SKIP_EXCEPTION:
error_operation = e.cause_error.operation_name,
error_code = e.cause_error.response['Error']['Code'],
error_message = e.cause_error.response['Error']['Message']
if region_name is None:
region_name = CommonConst.ERROR
check_results.append(
asc_item_common_logic.get_error_authorized_result(
region_name, bucket_name, error_operation, error_code,
error_message))
is_authorized = False
continue
else:
return CheckResult.Error
# ๅๅพใใS3ใญใฎใณใฐๆ
ๅ ฑใS3ใซไฟๅญใใ๏ผใชใฝใผในๆ
ๅ ฑใใกใคใซ๏ผใ
try:
s3_file_name = CommonConst.PATH_CHECK_RAW.format(
check_history_id, organization_id, project_id, aws_account,
"ASC/S3_ClientLogging_" + region_name + "_" + bucket_name +
".json")
FileUtils.upload_json(trace_id, "S3_CHECK_BUCKET", bucket_logging,
s3_file_name, is_cw_logger=True)
except PmError as e:
cw_logger.error("[%s] S3ใใฑใใใญใฎใณใฐๆ
ๅ ฑใฎๅๅพใซๅคฑๆใใพใใใ๏ผ%s/%s๏ผ", aws_account,
region_name, bucket_name)
return CheckResult.Error
# ใใงใใฏๅฆ็
bucket_abnormity = True
try:
# Check-1. ACLใซใใLogDeliveryใซๆไฝๆจฉ้ใไธใใใใS3ใใฑใใใๅญๅจใใใ
for grant in bucket_acl["Grants"]:
if (common_utils.check_key("URI", grant['Grantee']) and
grant['Grantee']["URI"] == LOG_DELIVERY_URI):
bucket_abnormity = False
break
# Check-2. S3ใใฑใใใงใญใฐ่จ้ฒใๆๅนใซใชใฃใฆใใชใใใฎใฏๅญๅจใใใ
if bucket_abnormity is True and len(bucket_logging) == 0:
result = {
'Region': region_name,
'Level': CommonConst.LEVEL_CODE_21,
'DetectionItem': {
'BucketName': bucket_name
}
}
check_results.append(result)
except Exception as e:
cw_logger.error("[%s] ใใงใใฏๅฆ็ไธญใซใจใฉใผใ็บ็ใใพใใใ๏ผ%s/%s๏ผ", aws_account,
region_name, bucket_name)
return CheckResult.Error
# Export File CHECK_ASC_ITEM_16_01.json
try:
current_date = date_utils.get_current_date_by_format(
date_utils.PATTERN_YYYYMMDDHHMMSS)
check_asc_item_16_01 = {
'AWSAccount': aws_account,
'CheckResults': check_results,
'DateTime': current_date
}
FileUtils.upload_s3(trace_id, check_asc_item_16_01, result_json_path,
format_json=True, is_cw_logger=True)
except Exception as e:
cw_logger.error("[%s] ใใงใใฏ็ตๆJSONใใกใคใซใฎไฟๅญใซๅคฑๆใใพใใใ", aws_account)
return CheckResult.Error
# ใใงใใฏ็ตๆ
if is_authorized is False:
return CheckResult.Error
if len(check_results) > 0:
return CheckResult.CriticalDefect
return CheckResult.Normal
def get_bucket_acl(trace_id, check_history_id, organization_id, project_id,
aws_account, region_name, bucket_name, s3_client):
pm_logger = common_utils.begin_logger(trace_id, __name__,
inspect.currentframe())
s3_file_name = CommonConst.PATH_CHECK_RAW.format(
check_history_id, organization_id, project_id, aws_account,
"ASC/S3_ACL_" + region_name + "_" + bucket_name + ".json")
# ใชใฝใผในๆ
ๅ ฑๅๅพ
if (aws_common.check_exists_file_s3(trace_id, "S3_CHECK_BUCKET",
s3_file_name,
is_cw_logger=True)) is True:
try:
bucket_acl = FileUtils.read_json(trace_id, "S3_CHECK_BUCKET",
s3_file_name, is_cw_logger=True)
except PmError as e:
raise common_utils.write_log_pm_error(e, pm_logger)
else:
try:
bucket_acl = S3Utils.get_bucket_acl(
trace_id, s3_client, bucket_name, aws_account, region_name,
is_cw_logger=True)
except PmError as e:
raise common_utils.write_log_pm_error(e, pm_logger)
# ๅๅพใใS3ใใฑใใใฎใขใฏใปในใณใณใใญใผใซใชในใๆ
ๅ ฑใS3ใซไฟๅญใใใ๏ผใขใฏใปในใณใณใใญใผใซใชในใๆ
ๅ ฑ๏ผ
try:
FileUtils.upload_json(trace_id, "S3_CHECK_BUCKET", bucket_acl,
s3_file_name, is_cw_logger=True)
except PmError as e:
pm_logger.error("[%s] S3ใใฑใใACLๆ
ๅ ฑใฎS3ไฟๅญใซๅคฑๆใใพใใใ๏ผ%s๏ผ/๏ผ%s๏ผ",
aws_account, region_name, bucket_name)
return bucket_acl
| [
"phamkhachoabk@gmail.com"
] | phamkhachoabk@gmail.com |
fad8cd1b400beabb441d442165b3a8bf0df6762a | cbf9f600374d7510988632d7dba145c8ff0cd1f0 | /virtual/XMLProContest/07/b.py | 35635e38d107de7b0178f53764dc410a73266496 | [] | no_license | sakakazu2468/AtCoder_py | d0945d03ad562474e40e413abcec39ded61e6855 | 34bdf39ee9647e7aee17e48c928ce5288a1bfaa5 | refs/heads/master | 2022-04-27T18:32:28.825004 | 2022-04-21T07:27:00 | 2022-04-21T07:27:00 | 225,844,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | s = input()
if s[0] != "A":
print("WA")
else:
c_count = 0
for i in range(2, len(s)-1):
if s[i] == "C":
c_count += 1
if c_count == 1:
upper = 0
lower = 0
for i in s:
if 97 <= ord(i) <= 122:
lower += 1
else:
upper += 1
if upper == 2:
print("AC")
else:
print("WA")
# if upper != 2:
# print("WA")
# else:
# print("AC")
else:
print("WA")
| [
"sakakazu2468@icloud.com"
] | sakakazu2468@icloud.com |
a015ebe3fa675abc13b391e986aad5bcb118daa4 | 996bc055b89b5e103f345dd26b0883caffd1c9bc | /openstack/identity/v3/region.py | 1fdd1c20cda5780cde2af826cfa5caf2581b8f71 | [
"Apache-2.0"
] | permissive | starlingx-staging/stx-openstacksdk | be633cedf4740e8788b814eca68039df3de6fe4b | 93d4e95536c4824e2197b2a63a4438ff5c60b653 | refs/heads/master | 2020-03-18T03:00:29.254952 | 2018-11-23T18:27:59 | 2018-11-24T01:17:45 | 134,218,298 | 1 | 4 | Apache-2.0 | 2018-11-24T01:17:46 | 2018-05-21T04:34:40 | Python | UTF-8 | Python | false | false | 1,277 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.identity import identity_service
from openstack import resource2 as resource
class Region(resource.Resource):
resource_key = 'region'
resources_key = 'regions'
base_path = '/regions'
service = identity_service.IdentityService()
# capabilities
allow_create = True
allow_get = True
allow_update = True
allow_delete = True
allow_list = True
patch_update = True
# Properties
#: User-facing description of the region. *Type: string*
description = resource.Body('description')
#: The links for the region resource.
links = resource.Body('links')
#: ID of parent region, if any. *Type: string*
parent_region_id = resource.Body('parent_region_id')
| [
"tengqim@cn.ibm.com"
] | tengqim@cn.ibm.com |
563575c004cb97e9beef32766003f08b7e584b18 | da386754e12ed3e251d5fb9091d9416b9f97edc7 | /sfepy/discrete/common/extmods/cmesh.pxd | 120eceb44f387f87854279f1d9895b7f2a036891 | [
"BSD-3-Clause"
] | permissive | nasseralkmim/sfepy | 5b5642f084b62632c1ca48035e510f27728e25ab | 647f1754bcd4fd103cd19a03ed36cb10ebc8fd15 | refs/heads/master | 2020-04-06T04:57:21.589694 | 2016-08-03T12:38:31 | 2016-08-03T12:38:31 | 65,736,316 | 2 | 1 | null | 2016-08-15T13:58:01 | 2016-08-15T13:58:01 | null | UTF-8 | Python | false | false | 3,893 | pxd | # -*- Mode: Python -*-
"""
C Mesh data structures and functions.
"""
cimport numpy as np
from libc.stdio cimport FILE, stdout
from types cimport uint32, int32, float64, complex128
cdef extern from 'string.h':
void *memcpy(void *dest, void *src, size_t n)
cdef extern from 'common.h':
void *pyalloc(size_t size)
void pyfree(void *pp)
void mem_statistics(int lineNo, char *funName,
char *fileName, char *dirName)
size_t mem_get_cur_usage()
size_t mem_get_max_usage()
size_t mem_get_n_frags()
cdef extern from 'mesh.h':
ctypedef struct Indices:
uint32 *indices
uint32 num
ctypedef struct Mask:
char *mask
uint32 num
uint32 n_true
ctypedef struct MeshGeometry:
uint32 num
uint32 dim
float64 *coors
ctypedef struct MeshTopology:
uint32 max_dim
uint32 num[4]
uint32 *cell_types
uint32 *face_oris
uint32 *edge_oris
MeshConnectivity *conn[16]
ctypedef struct MeshConnectivity:
uint32 num
uint32 n_incident
uint32 *indices
uint32 *offsets
uint32 offset
ctypedef struct LocalEntities:
uint32 num
MeshConnectivity **edges
MeshConnectivity **faces
ctypedef struct Mesh:
MeshGeometry geometry[1]
MeshTopology topology[1]
LocalEntities entities[1]
cdef int32 mesh_init(Mesh *mesh)
cdef int32 mesh_free(Mesh *mesh)
cdef int32 mesh_print(Mesh *mesh, FILE *file, int32 header_only)
cdef int32 conn_alloc(MeshConnectivity *conn,
uint32 num, uint32 n_incident)
cdef int32 conn_free(MeshConnectivity *conn)
cdef int32 conn_print(MeshConnectivity *conn, FILE *file)
cdef int32 mesh_set_coors(Mesh *mesh, float64 *coors, int32 num, int32 dim,
int32 tdim)
cdef int32 mesh_setup_connectivity(Mesh *mesh, int32 d1, int32 d2)
cdef int32 mesh_free_connectivity(Mesh *mesh, int32 d1, int32 d2)
cdef uint32 mesh_count_incident(Mesh *mesh, int32 dim,
Indices *entities, int32 dent)
cdef int32 mesh_get_incident(Mesh *mesh,
MeshConnectivity *incident, int32 dim,
Indices *entities, int32 dent)
cdef int32 mesh_get_local_ids(Mesh *mesh, Indices *local_ids,
Indices *entities, int32 dent,
MeshConnectivity *incident, int32 dim)
cdef int32 mesh_select_complete(Mesh *mesh, Mask *mask, int32 dim,
Indices *entities, int32 dent)
cdef int32 mesh_get_centroids(Mesh *mesh, float64 *ccoors, int32 dim)
cdef int32 mesh_get_facet_normals(Mesh *mesh, float64 *normals,
int32 which)
cdef class CConnectivity:
"""
Notes
-----
The memory is allocated/freed in C - this class just wraps NumPy arrays
around that data without copying.
"""
cdef MeshConnectivity *conn
cdef public np.ndarray indices
cdef public np.ndarray offsets
cdef public int num, n_incident, offset
cdef _set_conn(self, MeshConnectivity *conn)
cdef class CMesh:
cdef Mesh mesh[1]
cdef readonly np.ndarray coors
cdef readonly np.ndarray vertex_groups
cdef readonly np.ndarray cell_types
cdef readonly np.ndarray cell_groups # ig for each cell.
cdef readonly list conns
cdef readonly dict entities
cdef readonly int n_coor, dim, n_el, tdim
cdef readonly np.ndarray num # Numbers of topological entities.
cdef readonly np.ndarray face_oris # Allocated in C.
cdef readonly np.ndarray edge_oris # Allocated in C.
cdef readonly np.ndarray facet_oris # face_oris in 3D, edge_oris in 2D
cdef readonly dict key_to_index
| [
"cimrman3@ntc.zcu.cz"
] | cimrman3@ntc.zcu.cz |
7d84de5d28cd4b40bb4268adbdf0cb11c4199569 | 8cb8bfd2dae516612251039e0632173ea1ea4c8a | /modules/analyzes/employment/models.py | 05c8ebbf391dc038ba5a93dbda7e187bdd3c6fba | [] | no_license | nyzsirt/lift-prod | 563cc70700d26a5812a1bce0bd9795998dce6e99 | 9a5f28e49ad5e80e422a5d5efee77a2d0247aa2b | refs/heads/master | 2020-04-22T01:05:42.262876 | 2019-02-09T13:31:15 | 2019-02-09T13:31:15 | 170,003,361 | 1 | 0 | null | 2019-02-10T17:11:50 | 2019-02-10T17:11:50 | null | UTF-8 | Python | false | false | 1,074 | py | import datetime
from mongoengine import Document
from mongoengine import StringField
from mongoengine import DateTimeField
from mongoengine import ObjectIdField
from mongoengine import DecimalField
from mongoengine import ReferenceField
from modules.analyzes.employment.kat_hesap_tipleri.models import KatHesapTipleri
from modules.analyzes.employment.pursantaj_oranlari.models import PursantajOranlari
from modules.organization.models import Organization
class EmploymentAnalysis(Document):
_created_date = DateTimeField(default=datetime.datetime.utcnow)
_key_created_user = ObjectIdField()
_last_modified_date = DateTimeField(default=datetime.datetime.utcnow)
_key_last_modified_user = ObjectIdField()
_key_owner_user = ObjectIdField()
analysis_date = StringField(required=True)
_key_organization = ReferenceField(Organization, required=True)
_key_kat_hesap_tipi = ReferenceField(KatHesapTipleri, required=True)
_key_pursantaj_orani = ReferenceField(PursantajOranlari, required=True)
birim_fiyat = DecimalField(required=True)
| [
"mutlu.erdem@soft-nec.com"
] | mutlu.erdem@soft-nec.com |
36e370f7a59eb4a0f324af58795cd112dc7ceb73 | 9a819fc91e17ef9a44e45cf68e76cf696381d06d | /cdk_examples/cdk.out/asset.50ce0df99f68b771fed9c79fa8d531712d8d56eadb6b19697b42dfa31931f8ff/chardet/cli/chardetect.py | 2f1156136c9af217a753fafdb0dd5b3a7cf52a9b | [] | no_license | Gautam3994/Dark-Knight | aef1d6383e0785130db75e80ed40f544a120579e | 327b2d58851a42da1b707addea73e40fac6a61cc | refs/heads/master | 2022-12-01T11:58:39.857379 | 2020-09-05T18:07:51 | 2020-09-05T18:07:55 | 203,866,327 | 0 | 1 | null | 2022-11-24T09:16:18 | 2019-08-22T20:14:43 | Python | UTF-8 | Python | false | false | 2,764 | py | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from functions.Canary.chardet import __version__
from functions.Canary.chardet import PY2
from functions.Canary.chardet import UniversalDetector
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in lines:
line = bytearray(line)
u.feed(line)
# shortcut out of the loop to save reading further - particularly useful if we read a BOM.
if u.done:
break
u.close()
result = u.result
if PY2:
name = name.decode(sys.getfilesystemencoding(), 'ignore')
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no result'.format(name)
def main(argv=None):
"""
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
"""
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings")
parser.add_argument('input',
help='File whose encoding we would like to determine. \
(default: stdin)',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin if PY2 else sys.stdin.buffer])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':
main()
| [
"gautam3994@gmail.com"
] | gautam3994@gmail.com |
9a0dd6f10c652e2de3589f21c50e5edd3f286226 | 389569a591284a2adcdc38046114e7b1038afd94 | /python-script/trax/remove_duplicate.py | 01e34ae3b7890c0987c7795f288b8816e515b37c | [] | no_license | xytysingle/AnnotationTool | b797daf2fd472f602341b16f24fb1ed9b702aef1 | a217d4376ceee739e0d8c43515c403133982e86e | refs/heads/master | 2020-04-11T18:16:10.438919 | 2019-07-31T10:21:18 | 2019-07-31T10:21:18 | 161,992,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: Kong Haiyang
@Date: 2018-05-21 09:40:31
"""
from __future__ import absolute_import, division, print_function
import hashlib
import os
from os import walk
import time
from PIL import Image
from tqdm import tqdm
path1 = '/Users/lingmou/Desktop/ๅ ๅคด้กน็ฎ/duitou2/'
# path2 = '/home/konghaiyang/kong/scene_classifier/data/0827ๅพ
่ทๅบๆฏๅ็ฑปๅพ็_results_results/ๅฏๅฃๅฏไนๆไธๅคงๅ
่ฃ
ๅ ๅคดๅธฆๆ็'
tt = time.time()
files = []
for dirpath, dirnames, filenames in walk(path1):
if dirnames:
continue
_files = [os.path.join(dirpath, f) for f in os.listdir(dirpath)
if f.endswith('.jpg') and not f.startswith('.')]
files.extend(_files)
# for dirpath, dirnames, filenames in walk(path2):
# if dirnames:
# continue
# _files = [os.path.join(dirpath, f) for f in os.listdir(dirpath)
# if f.endswith('.jpg') and not f.startswith('.')]
# files.extend(_files)
md5_set = set()
files_dict = {}
for f in tqdm(files):
im = Image.open(f)
md5 = hashlib.md5(im.tobytes()).hexdigest()
files_dict.setdefault(md5, []).append(f)
if md5 in md5_set:
print('\n'+'='*20+md5+'='*20)
for fd in files_dict[md5]:
print(fd)
files_dict[md5].remove(f)
os.remove(f)
print('Remove {}.'.format(f))
else:
md5_set.add(md5)
print(time.time()-tt)
| [
"2463072824@qq.com"
] | 2463072824@qq.com |
d3c4e601c45ab7daa7422417c26f7028a4043b80 | cd78d84441e69c1fc40b6a6e9e235e7cf6882454 | /python/110.balanced_binary_tree.py | e5b1cf1d1f013903993e66591250ea684ad21e5e | [] | no_license | buy/leetcode | 53a12d4e0298284a5a2034c88353d0dc195aa66c | da0e834e3f2e3016396fffc96ef943ab9ec58ea4 | refs/heads/master | 2021-01-13T01:48:01.176632 | 2015-06-14T06:17:17 | 2015-06-14T06:17:17 | 31,863,627 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | # Given a binary tree, determine if it is height-balanced.
# For this problem, a height-balanced binary tree is defined as a binary tree in which the depth of the two subtrees of every node never differ by more than 1.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {boolean}
def isBalanced(self, root):
if not root:
return True
return abs(self.getHeight(root.left) - self.getHeight(root.right)) < 2 and self.isBalanced(root.left) and self.isBalanced(root.right)
def getHeight(self, root):
if not root:
return 0
return 1 + max(self.getHeight(root.left), self.getHeight(root.right))
| [
"cliu@groupon.com"
] | cliu@groupon.com |
74fca21f256479a3865581a72ae58c5bc5d98d3b | 8268afb15bebb260a65c9b4c54ff3a6eb709c7c3 | /denoiser/lib_denoiser.py | 6d5d8e12ffdb9c77dea02f213f5c263bd84144f5 | [] | no_license | edublancas/yass-private | 2dc4b5b520c7effbece6f93bf74d39af918b5e29 | 33e9e83ccd6cc7a333b0199c11cdd81363ced846 | refs/heads/master | 2021-03-24T12:48:12.715117 | 2018-12-24T20:22:29 | 2018-12-24T20:22:29 | 108,217,478 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,548 | py | import numpy as np
from yass.templates import TemplatesProcessor
from yass.templates import util as templates_util
class SpikeIndex:
"""Wraps spike index logic
"""
def __init__(self, spike_index_array, channel_index=None, geometry=None,
neighbors=None):
self.arr = spike_index_array
self.channel_index = channel_index
self.geometry = geometry
self.neighbors = neighbors
def get_spike_index_for_channel(self, channel):
sub = self.arr[self.arr[:, 1] == channel]
return SpikeIndex(sub, self.channel_index, self.geometry,
self.neighbors)
def get_times_from_channel(self, channel, max_ptp=None, rec=None,
waveform_length=51):
if max_ptp is None:
return self.arr[self.arr[:, 1] == channel][:, 0]
else:
times = self.arr[self.arr[:, 1] == channel][:, 0]
spikes = read_waveforms(rec, times, waveform_length,
random_shift=False,
add_offset=True)
ptps = templates_util.ptps(spikes)
return times[ptps <= max_ptp]
def get_times(self):
return self.arr[:, 0]
@property
def shape(self):
return self.arr.shape
@property
def n_unique_channels(self):
return len(np.unique(self.arr[:, 1]))
def count_spikes_per_channel(self):
chs, counts = np.unique(self.arr[:, 1], return_counts=True)
return {ch: count for ch, count in zip(chs, counts)}
def read_waveforms_from_channel(self, rec, channel, waveform_length=51,
random_shift=False,
only_neighbors=True):
"""Read waveforms from rec using an array of spike times
"""
n_obs, n_channels = rec.shape
if channel is not None:
idxs = self.get_times_from_channel(channel)
# print(idxs, idxs.shape)
else:
idxs = self.get_times()
# print(idxs, idxs.shape)
out = np.empty((len(idxs), waveform_length, 7 if only_neighbors else n_channels))
half = int((waveform_length - 1)/2)
for i, idx in enumerate(idxs):
if random_shift:
offset = np.random.randint(-half, half)
else:
offset = 0
if only_neighbors:
out[i, :] = rec[idx-half + offset:idx+half+1 + offset, self.channel_index[channel]]
else:
out[i, :] = rec[idx-half + offset:idx+half+1 + offset]
return out
# FIXME: remove duplicated logic
def read_waveforms(rec, times, waveform_length, random_shift=False, add_offset=False):
"""Read waveforms from rec using an array of spike times
"""
n_obs, n_channels = rec.shape
out = np.empty((len(times), waveform_length, n_channels))
valid_time = np.ones(len(times))
half = int((waveform_length - 1)/2)
for i, time in enumerate(times):
if add_offset:
offset = -20
else:
offset = 0
if random_shift:
offset += np.random.randint(-3, 3)
s = slice(time - half + offset, time + half + 1 + offset)
if s.start >= 0 and s.stop <= n_obs:
out[i] = rec[s]
else:
valid_time[i] = 0
print('invalid time')
return out[valid_time.astype(bool)] | [
"fkq8@blancas.io"
] | fkq8@blancas.io |
0130e5fb78d51a55447ac03623a3d816d1707af0 | c1ee8f22ece4fc39cb94fe19832fcba8e45cf5bc | /ํ๋ก๊ทธ๋๋จธ์ค/์ฒด์ก๋ณต.py | 6418eba937f097af343006fbd58235dfaf80fd2e | [] | no_license | JeongHanJun/BOJ | ae6b1c64c5b3226deef2708ae447aa1225333a92 | a865624fb0a9291b68f99af8535f708554fa0b41 | refs/heads/master | 2023-03-31T02:22:58.974437 | 2021-04-02T02:43:57 | 2021-04-02T02:43:57 | 258,809,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,921 | py | # ์ฒด์ก๋ณต
'''
# testcase #11 ์คํจ ์ฝ๋
def solution(n, lost, reserve):
answer = 0
students = [1] * (n+2)
for i in reserve:
students[i] += 1
for i in lost:
students[i] -= 1
for i in range(1, n+1):
if students[i] == 2:
if students[i-1] == 0:
students[i] -= 1
students[i-1] += 1
if students[i+1] == 0:
students[i] -= 1
students[i+1] += 1
answer = n - students.count(0)
return answer
'''
'''
# ์ฑ๊ณต์ฝ๋ 1
def solution(n, lost, reserve):
for i in reserve:
if i in lost:
lost.remove(i)
reserve.remove(i)# ์ฌ์ ๋ถ์ด ์๋ ํ์์ด ์ฒด์ก๋ณต์ ์์ด๋ฒ๋ ธ์ ๊ฒฝ์ฐ์ ์ค๋ณต์ ์ ์ธ
answer = n - len(lost)# answer ์๋ ์ค๋ณต์์ด ์ ์ฒด ํ์์ค์ ์ฒด์ก๋ณต์ ์์ด ๋ฒ๋ฆฌ์ง ์์ ํ์์๊ฐ ์ ์ฅ๋์์
for i in lost:
if i in reserve:
answer += 1
reserve.remove(i)
elif i-1 in reserve:# ๋จ๋ ํ์์ด ์๊ธฐ ์์ฌ๋์๊ฒ
answer += 1
reserve.remove(i-1)
elif i+1 in reserve:# ๋จ๋ ํ์์ด ์๊ธฐ ๋ท์ฌ๋์๊ฒ
answer += 1
reserve.remove(i+1)
return answer
'''
# ์ฑ๊ณต์ฝ๋ 2 set์ ํ์ฉํ ๋น ๋ฅธ ์ฝ๋
# lost set์ ๋ํด ๋ชจ๋ ์์๋ค์ +0, +1, -1 ์ ์์ฐจ์ ์ผ๋ก ๊ณ์ฐํ๊ณ , ์ฐจ์งํฉ์ผ๋ก ์ค๋ณต์ ๋ฒ๋ฆฌ๋๊ฒ์ ๋ฐ๋ณต
def solution(n, lost, reserve):
reserve = set(reserve)
for size in [0, 1, -2]:
lost = set(map(lambda x: x+size, lost))
reserve, lost = reserve - lost, lost - reserve
return n - len(lost)
n1 = 5
l1 = [2, 4]
r1 = [1, 3, 5]
n2 = 5
l2 = [2, 4]
r2 = [3]
n3 = 3
l3 = [3]
r3 = [1]
print(solution(n1, l1, r1))
print(solution(n2, l2, r2))
print(solution(n3, l3, r3)) | [
"noreply@github.com"
] | JeongHanJun.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.