blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7b74ecf9cdd1b909e24e5e576f8de6229d90b73b
|
74cab51fa52be54dfd720ba130f0c0435d99c8cd
|
/xray/scratch/00-VICTRE_pipeline/01-pipeline_codes/x-ray_runs/run_xray_jobs.py
|
9d528cc33e7ea4057dd677b530e765a726b82271
|
[] |
no_license
|
shenghh2015/fda_breast_phantom
|
a57f82fc280783b1c3ca48308565beeff87e6530
|
dfb9d8613ed5cf0e5812a674027a5d200ffc91bd
|
refs/heads/main
| 2023-06-04T10:51:09.736674
| 2021-06-17T19:21:27
| 2021-06-17T19:21:27
| 377,733,048
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
import os
import argparse
#os.chdir('/scratch/00-VICTRE_pipeline/00-base_input_files/x-ray_runs/')
parser = argparse.ArgumentParser()
parser.add_argument("--num_jobs",type=int)
parser.add_argument("--gpu_num",type=int)
args = parser.parse_args()
path = os.getcwd()
print('The current path:{}'.format(path))
os.chdir('/scratch/00-VICTRE_pipeline/01-pipeline_codes/x-ray_runs')
cmd = 'python2 run_xray_alone.py {}'.format(args.gpu_num)
#cmd = 'python2 /scratch/00-VICTRE_pipeline/01-pipeline_codes/x-ray_runs/run_xray_SP.py {}'.format(args.gpu_num)
#nb_iter = 10000
nb_iter = args.num_jobs
for i in range(nb_iter):
os.system(cmd)
|
[
"shenghh2015@gmail.com"
] |
shenghh2015@gmail.com
|
57e3696503c3443d6e57294c7da7aefd27b261f9
|
244ecfc2017a48c70b74556be8c188e7a4815848
|
/res/scripts/client/gui/shared/utils/requesters/requestscontroller.py
|
be2c8caecc9dbab9b1fc757b83af66b425b517d0
|
[] |
no_license
|
webiumsk/WOT-0.9.12
|
c1e1259411ba1e6c7b02cd6408b731419d3174e5
|
5be5fd9186f335e7bae88c9761c378ff5fbf5351
|
refs/heads/master
| 2021-01-10T01:38:36.523788
| 2015-11-18T11:33:37
| 2015-11-18T11:33:37
| 46,414,438
| 1
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 5,567
|
py
|
# 2015.11.18 11:57:02 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/shared/utils/requesters/RequestsController.py
from functools import partial
import BigWorld
from debug_utils import LOG_ERROR, LOG_DEBUG
from shared_utils import safeCancelCallback
from gui.shared.rq_cooldown import RequestCooldownManager, REQUEST_SCOPE
class _NoCooldownsManager(RequestCooldownManager):
def __init__(self):
super(_NoCooldownsManager, self).__init__(REQUEST_SCOPE.GLOBAL)
def lookupName(self, rqTypeID):
return str(rqTypeID)
def getDefaultCoolDown(self):
return 0.0
class RequestsController(object):
def __init__(self, requester, cooldowns = _NoCooldownsManager()):
self._requester = requester
self._cooldowns = cooldowns
self._waiters = {}
self._rqQueue = []
self._rqCallbackID = None
self._rqCtx = None
self._rqHandler = None
return
def fini(self):
self.stopProcessing()
if self._requester:
self._requester.fini()
self._requester = None
return
def stopProcessing(self):
self._rqQueue = []
self._clearWaiters()
self._clearDelayedRequest()
if self._requester is not None:
self._requester.stopProcessing()
return
def request(self, ctx, callback = lambda *args: None, allowDelay = None):
LOG_DEBUG('Send server request', self.__class__.__name__, ctx, callback, allowDelay)
if allowDelay is None:
allowDelay = bool(self._cooldowns._commonCooldown)
requestType = ctx.getRequestType()
handler = self._getHandlerByRequestType(requestType)
if handler:
cooldown = ctx.getCooldown()
def _doRequest():
self._clearDelayedRequest()
cb = partial(self._callbackWrapper, requestType, callback, cooldown)
if handler(ctx, callback=cb):
self._waiters[requestType] = BigWorld.callback(self._getRequestTimeOut(), partial(self._onTimeout, cb, requestType, ctx))
self._cooldowns.process(requestType, cooldown)
if not allowDelay:
if self._cooldowns.validate(requestType, cooldown):
self._doRequestError(ctx, 'cooldown', callback)
else:
_doRequest()
else:
self._rqQueue.append((requestType, ctx, _doRequest))
self._doNextRequest()
else:
self._doRequestError(ctx, 'handler not found', callback)
return
def isInCooldown(self, requestTypeID):
return self._cooldowns.isInProcess(requestTypeID)
def getCooldownTime(self, requestTypeID):
return self._cooldowns.getTime(requestTypeID)
def isProcessing(self, requestTypeID):
return requestTypeID in self._waiters
def hasHandler(self, requestTypeID):
return self._getHandlerByRequestType(requestTypeID) is not None
def _doNextRequest(self, adjustCooldown = None):
if len(self._rqQueue) and self._rqCallbackID is None:
requestType, ctx, request = self._rqQueue.pop(0)
cooldownLeft = self._cooldowns.getTime(requestType)
if cooldownLeft:
self._loadDelayedRequest(cooldownLeft, ctx, request)
else:
request()
elif adjustCooldown and self._rqCallbackID is not None:
self._loadDelayedRequest(adjustCooldown, self._rqCtx, self._rqHandler)
return
def _getHandlerByRequestType(self, requestTypeID):
raise NotImplementedError
def _getRequestTimeOut(self):
return 30.0
def _callbackWrapper(self, requestType, callback, cooldown, *args):
callbackID = self._waiters.pop(requestType, None)
if callbackID is not None:
safeCancelCallback(callbackID)
self._cooldowns.adjust(requestType, cooldown)
if callback:
callback(*args)
self._doNextRequest(adjustCooldown=cooldown)
return
def _clearWaiters(self):
if self._waiters is not None:
while len(self._waiters):
_, callbackID = self._waiters.popitem()
safeCancelCallback(callbackID)
return
def _onTimeout(self, cb, requestType, ctx):
LOG_ERROR('Request timed out', self, requestType, ctx)
self._doRequestError(ctx, 'time out', cb)
def _doRequestError(self, ctx, msg, callback = None):
if self._requester:
self._requester._stopProcessing(ctx, msg, callback)
LOG_ERROR(msg, ctx)
return False
def _loadDelayedRequest(self, seconds, ctx, request):
self._clearDelayedRequest()
self._rqCtx = ctx
self._rqHandler = request
self._rqCtx.startProcessing()
self._rqCallbackID = BigWorld.callback(seconds, request)
def _clearDelayedRequest(self):
if self._rqCallbackID is not None:
safeCancelCallback(self._rqCallbackID)
self._rqCallbackID = None
if self._rqCtx is not None:
self._rqCtx.stopProcessing()
self._rqCtx = None
if self._rqHandler is not None:
self._rqHandler = None
return
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\shared\utils\requesters\requestscontroller.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 11:57:03 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
95ce44a1ea35c2f7d70624237f2a1f58ee321bed
|
05d84ca54c3866a546dc9eb03bf904552845b114
|
/main/migrations/0005_auto_20181110_0140.py
|
30b6a19f4a470da99c33289b5c2a8bf8c9fb0dfe
|
[] |
no_license
|
chriscauley/ur-conf
|
43ba6793995b4a17308844c6ccaeb7e92f50eb1e
|
17819b750117ce3cb8808d4a6c8702b5234c8dbb
|
refs/heads/master
| 2022-12-13T08:38:05.026612
| 2020-05-11T13:10:53
| 2020-05-11T13:10:53
| 155,024,014
| 0
| 0
| null | 2022-12-09T20:29:49
| 2018-10-28T01:13:40
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 368
|
py
|
# Generated by Django 2.1.2 on 2018-11-10 01:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0004_auto_20181109_1624'),
]
operations = [
migrations.RenameField(
model_name='achievement',
old_name='description',
new_name='text',
),
]
|
[
"chris@lablackey.com"
] |
chris@lablackey.com
|
cf90adffa041ea62709451a2f6cf519949fce841
|
b677894966f2ae2d0585a31f163a362e41a3eae0
|
/ns3/ns-3.26/src/olsr/examples/wscript
|
35a0d90e4cb07c3aaf83654d02db5ca1a0ebcde7
|
[
"LicenseRef-scancode-free-unknown",
"GPL-2.0-only",
"Apache-2.0"
] |
permissive
|
cyliustack/clusim
|
667a9eef2e1ea8dad1511fd405f3191d150a04a8
|
cbedcf671ba19fded26e4776c0e068f81f068dfd
|
refs/heads/master
| 2022-10-06T20:14:43.052930
| 2022-10-01T19:42:19
| 2022-10-01T19:42:19
| 99,692,344
| 7
| 3
|
Apache-2.0
| 2018-07-04T10:09:24
| 2017-08-08T12:51:33
|
Python
|
UTF-8
|
Python
| false
| false
| 466
|
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
def build(bld):
obj = bld.create_ns3_program('simple-point-to-point-olsr',
['point-to-point', 'internet', 'olsr', 'applications', 'wifi'])
obj.source = 'simple-point-to-point-olsr.cc'
obj = bld.create_ns3_program('olsr-hna',
['core', 'mobility', 'wifi', 'csma', 'olsr'])
obj.source = 'olsr-hna.cc'
|
[
"you@example.com"
] |
you@example.com
|
|
8a90f1562daaf6f29ced5e9afb0f9c2d898e3578
|
7a5a3eb831825fb0c0e80957278d95332e5f2258
|
/core/RIG/Control/mirrorCtlShp/mirrorCtlShp.py
|
cf45e216ce822ca1791c86f01caf11de51c3ea90
|
[] |
no_license
|
lefan2016/MPToolkit
|
e76cee2be10558dd97f228789d0e05bca4deebfc
|
5b3d1cf7a83b9eeda57919722e353ada7a4f7f6b
|
refs/heads/master
| 2020-04-23T11:05:02.816596
| 2015-12-17T06:26:32
| 2015-12-17T07:08:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,722
|
py
|
#========================================
# author: changlong.zang
# mail: zclongpop@163.com
# date: Tue, 05 May 2015 11:44:00
#========================================
import os.path, re, pymel.core
import maya.cmds as mc
from mpUtils import scriptTool, uiTool, mayaTool
#--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
windowClass, baseClass = uiTool.loadUi(os.path.join(scriptTool.getScriptPath(), 'mirrorCtlShp.ui'))
class MirrorControlShp(windowClass, baseClass):
def __init__(self, parent=uiTool.getMayaWindow()):
if uiTool.windowExists('mirrorControlShapeUI'):return
super(MirrorControlShp, self).__init__(parent)
self.setupUi(self)
self.show()
def on_btn_mirror_clicked(self, click=None):
if click == None:return
controlType = str(self.fld_controlType.text())
flipAxis = 'X'
if self.rdn_filpX.isChecked():
flipAxis = 'X'
elif self.rdn_filpY.isChecked():
flipAxis = 'Y'
else:
flipAxis = 'Z'
if self.rdn_lefttoright.isChecked():
mirrorControlShape(controlType, 'L', 'R', flipAxis)
else:
mirrorControlShape(controlType, 'R', 'L', flipAxis)
@mayaTool.undo_decorator
def mirrorControlShape(typ, source, targent, flipAxis):
if len(typ) == 0:return
if source not in 'LR':return
if source == targent:return
#- get source side controls
all_controls = ' '.join(mc.listRelatives(mc.ls(type='nurbsCurve'), p=True, path=True))
matched_controls = re.findall('\S*%s_\w+_%s_\d+'%(source, typ), all_controls)
for ctl in matched_controls:
#- get targent control
targentControl = re.sub('%s_'%source, '%s_'%targent, ctl)
if not mc.objExists(targentControl):continue
#- duplicate shape
tempx = mc.duplicate(ctl, po=True)
mc.parent(mc.listRelatives(ctl, s=True, path=True), tempx, s=True, add=True)
#- make Temp
Temp = pymel.core.PyNode(mc.duplicate(tempx, rc=True)[0])
for a in 'trs':
for b in 'xyz':
attr = a + b
mc.setAttr('%s.%s'%(Temp, attr), l=False, k=True, cb=False)
#- close max min value controler
mc.transformLimits(Temp.name(), etx=(0, 0),ety=(0, 0),etz=(0, 0),erx=(0, 0),ery=(0, 0),erz=(0, 0))
mc.parent(Temp.name(), w=True)
#- filp
grp = mc.createNode('transform')
sourcePosi = mc.xform(ctl, q=True, ws=True, rp=True)
targenPosi = mc.xform(targentControl, q=True, ws=True, rp=True)
midPoint = [(sourcePosi[0] + targenPosi[0]) / 2,
(sourcePosi[0] + targenPosi[0]) / 2,
(sourcePosi[0] + targenPosi[0]) / 2]
mc.move(midPoint[0], midPoint[1], midPoint[2], grp, a=True)
mc.parent(Temp.name(), grp)
mc.setAttr('%s.s%s'%(grp, flipAxis.lower()), -1)
#- freeze transformations
mc.parent(Temp.name(), targentControl)
mc.makeIdentity(Temp.name(), apply=True, t=True, r=True, s=True)
#- get original shapes
originalShapes = mc.listRelatives(targentControl, s=True, path=True, type='nurbsCurve')
#- parent new shapes
shapes = mc.listRelatives(Temp.name(), s=True, path=True, type='nurbsCurve')
for shp in shapes:
mc.setAttr('%s.ovc'%shp, mc.getAttr('%s.ovc'%originalShapes[0]))
mc.delete(originalShapes)
mc.parent(shapes, targentControl, s=True, r=True)
for shp in shapes:
mc.rename(shp, '%sShape'%targentControl)
#- delete temp
mc.delete(tempx, Temp.name(), grp)
|
[
"zclongpop123@163.com"
] |
zclongpop123@163.com
|
35a1716ba124832d9905c592d9e96d33a27971ee
|
eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7
|
/google/cloud/dialogflow/v2/dialogflow-v2-py/google/cloud/dialogflow_v2/services/intents/pagers.py
|
f5030b36777fe129d348bab6f6e1d7c6da9472c0
|
[
"Apache-2.0"
] |
permissive
|
Tryweirder/googleapis-gen
|
2e5daf46574c3af3d448f1177eaebe809100c346
|
45d8e9377379f9d1d4e166e80415a8c1737f284d
|
refs/heads/master
| 2023-04-05T06:30:04.726589
| 2021-04-13T23:35:20
| 2021-04-13T23:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,626
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.cloud.dialogflow_v2.types import intent
class ListIntentsPager:
"""A pager for iterating through ``list_intents`` requests.
This class thinly wraps an initial
:class:`google.cloud.dialogflow_v2.types.ListIntentsResponse` object, and
provides an ``__iter__`` method to iterate through its
``intents`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListIntents`` requests and continue to iterate
through the ``intents`` field on the
corresponding responses.
All the usual :class:`google.cloud.dialogflow_v2.types.ListIntentsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., intent.ListIntentsResponse],
request: intent.ListIntentsRequest,
response: intent.ListIntentsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.dialogflow_v2.types.ListIntentsRequest):
The initial request object.
response (google.cloud.dialogflow_v2.types.ListIntentsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = intent.ListIntentsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[intent.ListIntentsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[intent.Intent]:
for page in self.pages:
yield from page.intents
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListIntentsAsyncPager:
"""A pager for iterating through ``list_intents`` requests.
This class thinly wraps an initial
:class:`google.cloud.dialogflow_v2.types.ListIntentsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``intents`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListIntents`` requests and continue to iterate
through the ``intents`` field on the
corresponding responses.
All the usual :class:`google.cloud.dialogflow_v2.types.ListIntentsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[intent.ListIntentsResponse]],
request: intent.ListIntentsRequest,
response: intent.ListIntentsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.dialogflow_v2.types.ListIntentsRequest):
The initial request object.
response (google.cloud.dialogflow_v2.types.ListIntentsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = intent.ListIntentsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[intent.ListIntentsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[intent.Intent]:
async def async_generator():
async for page in self.pages:
for response in page.intents:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
6d4f64ec2045276b56dba14ee6b4ad2b1dff62c4
|
dab891f601d33dbab869672eb035f6bb1c79c8d8
|
/dirs/crons.py
|
683aabcdf865b6fbc0b22a2872b37ecf2551f851
|
[] |
no_license
|
codeforcauseorg-archive/Py-Boot-2021
|
c8835ed4af2b2cdd1f80d5dff9a12676ef613ea6
|
6734e7f9b0a8e3c1eb46de11d095fae1c4c49ec7
|
refs/heads/main
| 2023-07-17T23:14:47.204035
| 2021-08-24T06:26:02
| 2021-08-24T06:26:02
| 392,699,382
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
from crontab import CronTab
cron = CronTab(user='ganga')
job = cron.new(command='python file-dispenser.py')
job.minute.every(1)
cron.write()
|
[
"anujgargcse@gmail.com"
] |
anujgargcse@gmail.com
|
9ad6be4c78e74806f22c40d8736604e560f03069
|
59f635ca237f829c9f420ea7e97e150845e7c131
|
/src/fuzzyLogicRule/variable/variable.py
|
36d970ec8f610c82ef214c5a451c40efefa22bff
|
[] |
no_license
|
Lukeeeeee/FuzzyInferenceWithDRL
|
38256d4d5f01b552779765a5016ef444977114a4
|
5dbe4ded544d779a81d72917f2cef03c67b0c4d6
|
refs/heads/master
| 2021-03-24T02:48:17.175094
| 2017-09-12T01:19:12
| 2017-09-12T01:19:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 737
|
py
|
class Variable(object):
def __init__(self, name, mf, range):
self.name = name
self.mf = mf
self.linguistic_label = []
for mf_i in mf:
self.linguistic_label.append(mf_i.name)
self.upper_range = range[1]
self.lower_range = range[0]
self._value = {}
self._degree = {}
for linguistic_i in self.linguistic_label:
self.degree[linguistic_i] = 0.0
@property
def value(self):
return self._value
@value.setter
def value(self, new_val):
self._value = new_val
@property
def degree(self):
return self._degree
@degree.setter
def degree(self, new_degree):
self._degree = new_degree
|
[
"lukedong123@gmail.com"
] |
lukedong123@gmail.com
|
eb15b4125424c6865b42ca535067bf7e590e2d1e
|
c697122555fa82d993f37c89f17ba98dd5fe502f
|
/src/pyams_table/tests/__init__.py
|
7432d6657437b20da080ab490faf7c03d172abd7
|
[
"ZPL-2.1"
] |
permissive
|
Py-AMS/pyams-table
|
d9cdc9019a9aebc3e685eef243214f0ebc65301c
|
5b94a4cb2fa8874372f5ea40a202940881ba86b9
|
refs/heads/master
| 2021-07-11T11:31:36.772976
| 2021-03-09T12:49:10
| 2021-03-09T12:49:10
| 234,533,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 800
|
py
|
#
# Copyright (c) 2015-2019 Thierry Florac <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""
Generic test cases for pyams_table doctests
"""
__docformat__ = 'restructuredtext'
import os
import sys
def get_package_dir(value):
"""Get package directory"""
package_dir = os.path.split(value)[0]
if package_dir not in sys.path:
sys.path.append(package_dir)
return package_dir
|
[
"thierry.florac@onf.fr"
] |
thierry.florac@onf.fr
|
73a8edbc10cf9a95bdf7fe0998a45479bc51ee6a
|
2e9e994e17456ed06970dccb55c18dc0cad34756
|
/atcoder/abc/075/D/D.py
|
bb36bb97e08ed60220e3bd3641780bbfbf444e97
|
[] |
no_license
|
ksomemo/Competitive-programming
|
a74e86b5e790c6e68e9642ea9e5332440cb264fc
|
2a12f7de520d9010aea1cd9d61b56df4a3555435
|
refs/heads/master
| 2020-12-02T06:46:13.666936
| 2019-05-27T04:08:01
| 2019-05-27T04:08:01
| 96,894,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,883
|
py
|
def main():
N, K = map(int, input().split())
x, y = zip(*(
map(int, input().split())
for _ in range(N)
))
f(N, K, x, y)
def f(N, K, x, y):
"""
制約
* 2≦K≦N≦50
* −10^9≦xi,yi≦10^9 (1≦i≦N)
* xi≠xj (1≦i<j≦N)
* yi≠yj (1≦i<j≦N)
* 入力値はすべて整数である。(21:50 追記)
長方形の内部にN点のうち
K個以上の点を含みつつ、
それぞれの辺がX軸かY軸に平行な長方形を考えます。
→最小の面積
制約より、
xyの範囲を全探索はTLE
点は重ならないかつ、50以下
→点の座標から求めれば間に合いそう
例題1より、
全ての点を含む場合、
abs(x_max-x_min) * abs(y_max-y_min)
=> 追記:
これを分かったのに図示しなかったため、
点が角ではなく辺上にのることに気づかず…
点同士の組合せになってしまった(座標同士の組合せと気づけたはず)
例題2より
1つの場合1、ただし1*1の長方形より最大4点含む
→この例の場合、4点から2点以上で面積1
(x_2nd-x_min)*(y_max-y_min)
→x_maxを除いた点が含まれる
→50*50では済まない
y
↑
|*...
|.*..
|...*
|*...
------→x
(0,0)
(0,4)
(1,3)
(3,1)
各点を結んだときの面積の個数:O(N*N)
面積求めたあとの点が含まれる個数の計算量: O(N)
=> O(N^3) => 50^3 => 1.25 * 10^5 => OK
点を必ず含む場所からの面積
=> そこからでなくてよい, むしろ広くなる
=>
"""
ans = abs(max(x) - min(x)) * abs(max(y) - min(y))
if K == N:
print(ans)
return
def r(x, y, x_start=0, y_start=0):
for xi in range(x_start, N):
for yi in range(y_start, N):
yield x[xi], y[yi], xi, yi
# for x1, y1 in zip(x, y):
# for x2, y2 in zip(x, y):
# for x1, y1 in r(x, y):
# for x2, y2 in r(x, y):
sorted_x = sorted(x)
sorted_y = sorted(y)
for min_x, min_y, xi, yi in r(sorted_x, sorted_y):
for max_x, max_y, _, _ in r(sorted_x, sorted_y, x_start=xi+1, y_start=yi+1):
# s = abs(x1-x2) * abs(y1-y2)
# if s == 0:
# continue
# min_x = min(x1, x2)
# max_x = max(x1, x2)
# min_y = min(y1, y2)
# max_y = max(y1, y2)
contains_count = 0
for x3, y3 in zip(x, y):
if min_x <= x3 <= max_x and min_y <= y3 <= max_y:
contains_count += 1
if contains_count >= K:
s = (max_x - min_x) * (max_y - min_y)
ans = min(ans, s)
print(ans)
if __name__ == "__main__":
main()
|
[
"kntsmy@hotmail.co.jp"
] |
kntsmy@hotmail.co.jp
|
5145be87d31768c82f2e05727c0de10bf98fbc32
|
1a4bc1a11fdb3f714f22f5e0e826b47aa0569de2
|
/projects/project03/tests/q3_1_9.py
|
7ba9201ae87f09d05ffc4fe63d8fde616069e11c
|
[] |
no_license
|
taylorgibson/ma4110-fa21
|
201af7a044fd7d99140c68c48817306c18479610
|
a306e1b6e7516def7de968781f6c8c21deebeaf5
|
refs/heads/main
| 2023-09-05T21:31:44.259079
| 2021-11-18T17:42:15
| 2021-11-18T17:42:15
| 395,439,687
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 649
|
py
|
test = { 'name': 'q3_1_9',
'points': None,
'suites': [ { 'cases': [ { 'code': ">>> genre_and_distances.take(np.arange(7)).group('Genre').index_by('Genre')[my_assigned_genre][0].item('count') >= 4\nTrue",
'hidden': False,
'locked': False},
{'code': ">>> my_assigned_genre_was_correct == (my_assigned_genre == 'thriller')\nTrue", 'hidden': False, 'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
|
[
"taylorgibson@gmail.com"
] |
taylorgibson@gmail.com
|
29c53c9dd989b92da644998540979d855bafd45c
|
e262e64415335060868e9f7f73ab8701e3be2f7b
|
/.history/Test002/数据类型_20201205183838.py
|
68110a4f1f601b98f39c8628e07060a8e42e6692
|
[] |
no_license
|
Allison001/developer_test
|
6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63
|
b8e04b4b248b0c10a35e93128a5323165990052c
|
refs/heads/master
| 2023-06-18T08:46:40.202383
| 2021-07-23T03:31:54
| 2021-07-23T03:31:54
| 322,807,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,818
|
py
|
# fruits = ['orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana']
# print(fruits.count("apple"))
# a = fruits.index("banana",4)
# print(a)
# fruits.reverse()
# print(fruits)
# fruits.append("daka")
# print(fruits)
# print(fruits.sort)
# a = fruits.pop(0)
# print(a)
# print(fruits)
# number = [1,2,45,3,7,24,3]
# print(number.sort(reverse=True))
# from collections import deque
# queue = deque(["Eric", "John", "Michael"])
# queue.append("Terry")
# queue.append("Graham")
# a= queue.popleft()
# print(a)
# b = queue.popleft()
# print(b)
# print(queue)
# number = [1,2,3,4]
# number.append(5)
# number.append(6)
# print(number)
# number.pop()
# number.pop()
# print(number)
# lista = []
# for i in range(1,10):
# lista.append(i**2)
# print(lista)
# number = list(map(lambda x: x**2, range(1,10)))
# print(number)
# number = [i**2 for i in range(1,10)]
# print(number)
# number1= [(x,y) for x in [1,2,3] for y in [3,1,4] if x != y]
# print(number1)
# lis2 = []
# for x in [1,2,3]:
# for y in [3,1,4]:
# if x != y:
# lis2.append(x,y)
# print(number1)
# ver = [1,2,3]
# lista = [i**2 for i in ver]
# print(lista)
# ver1 = [-1,-2,3,4,-5]
# list2 = [i**2 for i in ver1 if i>0]
# print(list2)
# list3 = [abs(i) for i in ver1]
# print(list3)
# freshfruit = [' banana', ' loganberry ', 'passion fruit ']
# ab = [i.strip() for i in freshfruit]
# print(ab)
# list4 =[(x,x**2) for x in range(10)]
# print(list4)
# ver =[[1,2,3],[4,5,6],[7,8,9]]
# list5 = [y for i in ver for y in i]
# print(list5)
# from math import pi
# pia = 1.1323123
# for i in range(6):
# print(round(pia,i))
# list6 = [round(pia,i) for i in range(6)]
# print(list6)
#交换行和列
row_col = [
[1,4,7],
[2,5,8],
[3,6,9]
]
list9 = []
for i in row_col:
|
[
"zhangyingxbba@gmail.com"
] |
zhangyingxbba@gmail.com
|
fb9cbb94cc255f67675688194b2756a15c9c4dca
|
9e20dd7cd0105122f0e164aa0966fc37691d5042
|
/card.py
|
be56121910a6aa12020005a8ca6c92c4d01710da
|
[] |
no_license
|
RocketMirror/AtCoder_Practice
|
c650fd3ea1322ecda535e40f6ab63251085cecbe
|
32e4c346b65d670ba6642f815b1a04e8b7c5f241
|
refs/heads/master
| 2023-07-09T14:25:41.052647
| 2021-08-08T17:40:33
| 2021-08-08T17:40:33
| 394,016,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
n = int (input())
dic = {}
for _ in range (n):
s = str (input())
if s in dic.keys():
dic[s] += 1
else:
dic[s] = 1
m = int (input())
for _ in range (m):
t = str (input())
if t in dic.keys():
dic[t] -= 1
else:
dic[t] = -1
dic = sorted (dic.values(), reverse= True)
if dic[0] < 0:
print (0)
else:
print (dic[0])
|
[
"matsuedanaoki@matsuedanaokinoMacBook-Air.local"
] |
matsuedanaoki@matsuedanaokinoMacBook-Air.local
|
ca6d6152b3ba3adb92fb1a3b4b6d72c5875271d5
|
ec5e4dacb30800828ae4d68f9d87db523293ab65
|
/fb_post/views/create_comment/tests/test_case_01.py
|
d3a4ca4366ac755f7266092eef1c459667e60275
|
[] |
no_license
|
raviteja1766/fb_post_learning
|
54022066ba727220433cb72c43458f9cb6164b24
|
889718fc8f138888aea1b66455fa68c000a02091
|
refs/heads/master
| 2022-11-08T17:39:40.275922
| 2020-06-25T11:19:28
| 2020-06-25T11:19:28
| 274,897,124
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 952
|
py
|
"""
# TODO: Update test case description
"""
from django_swagger_utils.utils.test import CustomAPITestCase
from . import APP_NAME, OPERATION_NAME, REQUEST_METHOD, URL_SUFFIX
REQUEST_BODY = """
{
"content": "string"
}
"""
TEST_CASE = {
"request": {
"path_params": {"post_id": "1234"},
"query_params": {},
"header_params": {},
"securities": {"oauth": {"tokenUrl": "http://auth.ibtspl.com/oauth2/", "flow": "password", "scopes": ["superuser"], "type": "oauth2"}},
"body": REQUEST_BODY,
},
}
class TestCase01CreateCommentAPITestCase(CustomAPITestCase):
app_name = APP_NAME
operation_name = OPERATION_NAME
request_method = REQUEST_METHOD
url_suffix = URL_SUFFIX
test_case_dict = TEST_CASE
def test_case(self):
self.default_test_case() # Returns response object.
# Which can be used for further response object checks.
# Add database state checks here.
|
[
"ravitejak125@gmail.com"
] |
ravitejak125@gmail.com
|
8d2be2f7615f7caa3925f9200b966752a6b53993
|
f878260bbca242a15cd3b069ed40f5c0704a771b
|
/datacat/db/__init__.py
|
7eb57874459ae91af67887207204bc5d2d170520
|
[] |
no_license
|
rshk-archive/datacat-poc-140825
|
2485c561746ea70852fdbfd9b46eee97edc6cda7
|
626ad8b36655c69e93093be1d74cfb503ec9ba19
|
refs/heads/master
| 2020-06-01T04:16:32.752067
| 2014-09-26T14:02:03
| 2014-09-26T14:02:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,146
|
py
|
from collections import MutableMapping
import json
import functools
from flask import g
import psycopg2
import psycopg2.extras
from werkzeug.local import LocalProxy
def connect(database, user=None, password=None, host='localhost', port=5432):
conn = psycopg2.connect(database=database, user=user, password=password,
host=host, port=port)
conn.cursor_factory = psycopg2.extras.DictCursor
conn.autocommit = False
return conn
def create_tables(conn):
"""
Create database schema for a given connection.
"""
# We need to be in autocommit mode (i.e. out of transactions)
# in order to create tables / do administrative stuff..
if not conn.autocommit:
raise ValueError("Was expecting a connection with autocommit on")
# ------------------------------------------------------------
# See this: http://stackoverflow.com/questions/18404055
# for creating indices on JSON field items.
#
# We will need to allow defining such indices in the configuration
# but maybe a plugin should be used to handle that..
# ------------------------------------------------------------
with conn.cursor() as cur:
cur.execute("""
CREATE TABLE info (
key CHARACTER VARYING (256) PRIMARY KEY,
value TEXT);
CREATE TABLE dataset (
id SERIAL PRIMARY KEY,
configuration JSON,
ctime TIMESTAMP WITHOUT TIME ZONE,
mtime TIMESTAMP WITHOUT TIME ZONE);
CREATE TABLE resource (
id SERIAL PRIMARY KEY,
metadata JSON,
auto_metadata JSON,
mimetype CHARACTER VARYING (128),
data_oid INTEGER,
ctime TIMESTAMP WITHOUT TIME ZONE,
mtime TIMESTAMP WITHOUT TIME ZONE,
hash VARCHAR(128));
""")
def drop_tables(conn):
if not conn.autocommit:
raise ValueError("Was expecting a connection with autocommit on")
with conn.cursor() as cur:
cur.execute("""
DROP TABLE info;
DROP TABLE dataset;
DROP TABLE resource;
""")
def _cached(key_name):
def decorator(func):
@functools.wraps(func)
def wrapped():
if not hasattr(g, key_name):
setattr(g, key_name, func())
return getattr(g, key_name)
return wrapped
return decorator
# def get_db():
# from flask import current_app
# if not hasattr(g, 'database'):
# g.database = connect(**current_app.config['DATABASE'])
# g.database.autocommit = False
# return g.database
# def get_admin_db():
# from flask import current_app
# if not hasattr(g, 'admin_database'):
# g.admin_database = connect(**current_app.config['DATABASE'])
# g.admin_database.autocommit = True
# return g.admin_database
@_cached('_database')
def get_db():
from flask import current_app
c = connect(**current_app.config['DATABASE'])
c.autocommit = False
return c
@_cached('_admin_database')
def get_admin_db():
from flask import current_app
c = connect(**current_app.config['DATABASE'])
c.autocommit = True
return c
class DbInfoDict(MutableMapping):
def __init__(self, db):
self._db = db
def __getitem__(self, key):
with self._db.cursor() as cur:
cur.execute("""
SELECT * FROM info WHERE "key" = %s;
""", (key,))
row = cur.fetchone()
if row is None:
raise KeyError(key)
return json.loads(row['value'])
def __setitem__(self, key, value):
# Note that the update would be void if anybody deleted
# the key between the two queries! -- but we can be optimistic
# as key deletes are quite infrequent..
value = json.dumps(value)
try:
with self._db, self._db.cursor() as cur:
cur.execute("""
INSERT INTO info (key, value) VALUES (%s, %s)
""", (key, value))
except psycopg2.IntegrityError:
with self._db, self._db.cursor() as cur:
cur.execute("""
UPDATE info SET value=%s WHERE key=%s
""", (value, key))
def __delitem__(self, key):
with self._db, self._db.cursor() as cur:
cur.execute("""
DELETE FROM info WHERE key=%s
""", (key,))
def __iter__(self):
with self._db.cursor() as cur:
cur.execute("SELECT key FROM info;")
for row in cur:
yield row['key']
def iteritems(self):
with self._db.cursor() as cur:
cur.execute("SELECT key, value FROM info;")
for row in cur:
yield row['key'], json.loads(row['value'])
def __len__(self):
with self._db.cursor() as cur:
cur.execute("SELECT count(*) AS count FROM info;")
row = cur.fetchone()
return row['count']
db = LocalProxy(get_db)
admin_db = LocalProxy(get_admin_db)
db_info = LocalProxy(lambda: DbInfoDict(get_db()))
|
[
"redshadow@hackzine.org"
] |
redshadow@hackzine.org
|
13d04fcb0c47f0b5eaa3044125faa9f70fe6f6cf
|
f441d86d1de8e1d75057f5c8c92ae012c2e35b92
|
/GEOS_Util/coupled_diagnostics/verification/levitus/__init__.py
|
23c96548ce4b269997fde00dd498a1783bf51537
|
[] |
no_license
|
ddlddl58/GMAO_Shared
|
95f992e12b926cf9ec98163d6c62bac78e754efa
|
e16ddde5c8fab83429d312f5cff43643d9f84c94
|
refs/heads/master
| 2021-05-20T20:46:26.035810
| 2020-04-01T20:32:10
| 2020-04-01T20:32:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,574
|
py
|
import os
import scipy as sp
from my_lib import dset, grid
import datetime
from dateutil import rrule
__all__=['ctl']
class Ctl(dset.NCDset):
def __init__(self):
name='Levitus'
oceanval=os.environ.get('OCEANVAL',
'/discover/nobackup/projects/gmao/oceanval/verification')
flist=[oceanval+'/levitus/levitus_grd.nc']
t=rrule.rrule(rrule.MONTHLY,dtstart=datetime.date(0001,1,1),count=12)
time=sp.array(t[:],dtype='|O')
# super(Ctl,self).__init__(flist,levname='depth',\
# time=time,name=name,undef=10e11)
super(Ctl,self).__init__(flist,\
time=time,name=name,undef=10e11)
# Something wrong with levels in a datafile
lev=sp.array((0, 10, 20, 30, 50, 75, 100, 125, 150, 200, 250, 300, 400, 500, 600,\
700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1750, 2000, 2500,\
3000, 3500, 4000, 4500, 5000, 5500))
lon=self.grid['lon']; lat=self.grid['lat']
self.grid=grid.Grid(lon=lon,lat=lat,lev=lev)
def fromfile(self,varname,iind=slice(None),jind=slice(None),kind=slice(None),\
tind=slice(None), dtype=sp.float32):
var=super(Ctl,self).fromfile(varname,iind=iind,jind=jind,kind=kind,\
tind=tind,maskandscale=False,dtype=dtype)
# Applly land mask
var.data=sp.ma.masked_invalid(var.data)
return var
ctl=Ctl()
|
[
"yury.v.vikhliaev@nasa.gov"
] |
yury.v.vikhliaev@nasa.gov
|
a135db2c9afff8bc59d6785b2b646725fb751016
|
d1376938086c65e8b7d5d0d9618278fd8dfb1cfd
|
/Python/Scripts/j1.py
|
4611b83b23d87d43cd126568360077f5a97b5f03
|
[] |
no_license
|
DataEdgeSystems/Project
|
a6d235b1c0800a9ffb19e6163e69f07918ebce29
|
09ec31b385c7be62275079a7fb1826377d861365
|
refs/heads/master
| 2022-11-20T18:39:22.850454
| 2020-07-17T06:46:16
| 2020-07-17T06:46:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
import turtle
turtle.write("welcome")
turtle.showturtle()
turtle.forward(300)
turtle.down()
turtle.right(90)
turtle.showturtle()
turtle.color("green")
turtle.circle(50)
turtle.home
turtle.goto(100,100)
turtle.showturtle()
|
[
"ripattna@gmail.com"
] |
ripattna@gmail.com
|
e845609c5e19bc9814ba6a8e189484b2eb4d5df5
|
0a07d0f59cd7eb8ce817c74cc114d177f56306e3
|
/06_nested_loops/exercise/01_number_pyramid.py
|
3a08d4af43ad1af6f05e9b5e8569beb0b3d261cf
|
[] |
no_license
|
M0673N/Programming-Basics-with-Python
|
10c777cec5ed4fcbf9f18dc7c81daa3c7bd406ad
|
cd23423d49f7fb0423d9f87c5b23dce3275bac21
|
refs/heads/main
| 2023-05-27T16:13:11.329033
| 2021-06-06T19:00:24
| 2021-06-06T19:00:24
| 360,933,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
n = int(input())
number = 1
for row in range(1, n + 1):
for col in range(1, row + 1):
if number == n + 1:
break
print(number, end=" ")
number += 1
if number == n + 1:
break
print()
|
[
"m0673n@abv.bg"
] |
m0673n@abv.bg
|
e04a8b7c58fa1ab8782199327a6a27fa183ded49
|
7832e7dc8f1583471af9c08806ce7f1117cd228a
|
/aliyun-python-sdk-rds/aliyunsdkrds/request/v20140815/StartArchiveSQLLogRequest.py
|
dea0e8656104d0cc61b24edf93aa28bcc7e5a2f3
|
[
"Apache-2.0"
] |
permissive
|
dianplus/aliyun-openapi-python-sdk
|
d6494850ddf0e66aaf04607322f353df32959725
|
6edf1ed02994245dae1d1b89edc6cce7caa51622
|
refs/heads/master
| 2023-04-08T11:35:36.216404
| 2017-11-02T12:01:15
| 2017-11-02T12:01:15
| 109,257,597
| 0
| 0
|
NOASSERTION
| 2023-03-23T17:59:30
| 2017-11-02T11:44:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,742
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class StartArchiveSQLLogRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'StartArchiveSQLLog','rds')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Database(self):
return self.get_query_params().get('Database')
def set_Database(self,Database):
self.add_query_param('Database',Database)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_DBInstanceId(self):
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self,DBInstanceId):
self.add_query_param('DBInstanceId',DBInstanceId)
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_User(self):
return self.get_query_params().get('User')
def set_User(self,User):
self.add_query_param('User',User)
def get_QueryKeywords(self):
return self.get_query_params().get('QueryKeywords')
def set_QueryKeywords(self,QueryKeywords):
self.add_query_param('QueryKeywords',QueryKeywords)
|
[
"haowei.yao@alibaba-inc.com"
] |
haowei.yao@alibaba-inc.com
|
b0489bd24caeaaf0ce9996ca35046cfccf718050
|
ac8b725681e25177c5de3daf58afe00135241d0f
|
/leetcode/0136_single_number.py
|
ce6423738f0aa2d3b5644e971dda3130faedfdde
|
[
"MIT"
] |
permissive
|
jacquerie/leetcode
|
7af100ea1d7292c8c3da34210cf04d891be5561b
|
0cb213b9c7bcb6efa11210e9ebc291befb560bb9
|
refs/heads/master
| 2022-05-19T22:19:46.284065
| 2022-03-27T02:41:58
| 2022-03-27T02:41:58
| 129,323,741
| 3
| 0
|
MIT
| 2021-01-04T01:41:50
| 2018-04-12T23:51:56
|
Python
|
UTF-8
|
Python
| false
| false
| 330
|
py
|
# -*- coding: utf-8 -*-
class Solution:
def singleNumber(self, nums):
result = 0
for num in nums:
result ^= num
return result
if __name__ == "__main__":
solution = Solution()
assert 1 == solution.singleNumber([2, 2, 1])
assert 4 == solution.singleNumber([4, 1, 2, 1, 2])
|
[
"jacopo.notarstefano@gmail.com"
] |
jacopo.notarstefano@gmail.com
|
322be270a39cddcf47218ccadcbd9963f2929efc
|
6c512b7d2ae4b1ad713a57f74a4816e1291ba7a1
|
/python_3/experiments/expr_generate_random_email_addr.py
|
ffdb0114212f3743389a9430fcc54266bf9dc738
|
[
"MIT"
] |
permissive
|
duttashi/applied-machine-learning
|
451389e8f27931f32132a148e93effa7c6352536
|
ff3267b97d9dd7122400754798e06fb493daa40a
|
refs/heads/master
| 2021-12-17T19:12:39.531717
| 2021-12-04T09:36:46
| 2021-12-04T09:36:46
| 169,368,684
| 0
| 2
|
MIT
| 2021-12-04T09:36:47
| 2019-02-06T07:19:08
|
R
|
UTF-8
|
Python
| false
| false
| 1,002
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 21 09:02:51 2021
Objective: To generate a random list of email addresses and write to file
Reference: https://codereview.stackexchange.com/questions/58269/generating-random-email-addresses
@author: Ashish
"""
import random, string
domains = ["hotmail.com", "gmail.com", "aol.com", "mail.com", "mail.kz", "yahoo.com"]
letters = string.ascii_lowercase[:12]
def get_random_domain(domains):
return random.choice(domains)
def get_random_name(letters, length):
return "".join(random.choice(letters) for i in range(length))
def generate_random_emails(nb, length):
return [
get_random_name(letters, length) + "@" + get_random_domain(domains)
for i in range(nb)
]
def main():
# 7 refers to the number of chars in username part of the email id
# 100 referes to the number of email address required
print(generate_random_emails(100, 7))
if __name__ == "__main__":
main()
|
[
"ashish.dutt8@gmail.com"
] |
ashish.dutt8@gmail.com
|
1f4a96aadea9ab8facaad238c1994ab40c709058
|
f771e83756436594a145bd7b80e5e5d8bca53268
|
/djangocms_baseplugins/spacer/migrations/0001_initial.py
|
966d6afda4ff196593e37d0468afda831e2ebd4b
|
[
"MIT"
] |
permissive
|
bnzk/djangocms-baseplugins
|
b76ed75460fbeacb62366935824d2bcfac52b25e
|
98e390482aa4facc35efe2412ff1603d85e2c8ba
|
refs/heads/develop
| 2023-06-17T23:55:41.574828
| 2023-06-09T09:22:01
| 2023-06-09T09:22:01
| 68,296,521
| 2
| 0
|
MIT
| 2023-04-17T09:18:11
| 2016-09-15T13:32:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,206
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-09 11:32
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('cms', '0016_auto_20160608_1535'),
]
operations = [
migrations.CreateModel(
name='Spacer',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True, primary_key=True,
related_name='spacer_spacer',
serialize=False, to='cms.CMSPlugin')),
('title',
models.CharField(blank=True, default='', max_length=256, verbose_name='Title')),
('published', models.BooleanField(default=True, verbose_name='Published?')),
('published_from_date', models.DateTimeField(blank=True, default=None, null=True,
verbose_name='Published from')),
('published_until_date', models.DateTimeField(blank=True, default=None, null=True,
verbose_name='Published until')),
('in_menu', models.BooleanField(default=False, verbose_name='In Menu?')),
('layout',
models.CharField(blank=True, default='', max_length=64, verbose_name='Layout')),
('background', models.CharField(blank=True, default='', max_length=64,
verbose_name='Background')),
('color',
models.CharField(blank=True, default='', max_length=64, verbose_name='Color')),
('anchor', models.SlugField(blank=True, default='', verbose_name='Anchor')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
|
[
"bnzk@bnzk.ch"
] |
bnzk@bnzk.ch
|
9e54269c162580b6585706bcfc401a469f121965
|
807305b8aefbd7aac4f44c67deed06c059ca02d9
|
/tests/databases/value/test_database.py
|
3f26650ec4063233ba8ff239621aca52b79828e1
|
[
"MIT"
] |
permissive
|
supramolecular-toolkit/stk
|
c40103b4820c67d110cbddc7be30d9b58d85f7af
|
46f70cd000890ca7c2312cc0fdbab306565f1400
|
refs/heads/master
| 2022-11-27T18:22:25.187588
| 2022-11-16T13:23:11
| 2022-11-16T13:23:11
| 129,884,045
| 22
| 5
|
MIT
| 2019-08-19T18:16:41
| 2018-04-17T09:58:28
|
Python
|
UTF-8
|
Python
| false
| false
| 862
|
py
|
def test_database(case_data):
"""
Test a database.
Parameters
----------
case_data : :class:`.CaseData`
A test case. Holds the database to test and the value to put
into it.
Returns
-------
None : :class:`NoneType`
"""
_test_database(
database=case_data.database,
molecule=case_data.molecule,
value=case_data.value,
)
def _test_database(database, molecule, value):
"""
Test a database.
Parameters
----------
database : class:`.ValueDatabase`
The database to test.
molecule : :class:`.Molecule`
The molecule to test.
value : :class:`object`
The value to put into the database.
Returns
-------
None : :class:`NoneType`
"""
database.put(molecule, value)
assert database.get(molecule) == value
|
[
"noreply@github.com"
] |
supramolecular-toolkit.noreply@github.com
|
f223475d9486a080b7b1aca585647310abe8c018
|
2e6248663931cac90404e7ed63cb905ff1854b90
|
/sycomore/rf_spoiling.py
|
eb11c03f22a8b95749edcaed5c904f5849b3e1c5
|
[] |
no_license
|
ruojianhua1/sycomore-web
|
30b2a69843667b07a55af620ba78864bc227fd4d
|
a3fb38d7548939c3fa33ab838aa658cc0b89a9a9
|
refs/heads/master
| 2023-03-22T03:59:07.543015
| 2020-01-22T09:10:06
| 2020-01-22T09:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 844
|
py
|
import numpy
import sycomore
from sycomore.units import *
def rf_spoiling(
model, flip_angle, TE, TR, slice_thickness, phase_step, repetitions):
t_readout = TR-TE
G_readout = (2*numpy.pi*rad / (sycomore.gamma*slice_thickness))/(TR-TE)
echoes = numpy.zeros(repetitions, dtype=complex)
for r in range(0, repetitions):
phase = (phase_step * 1/2*(r+1)*r)
model.apply_pulse(flip_angle, phase)
model.apply_time_interval(TE)
echoes[r] = model.echo*numpy.exp(-1j*phase.convert_to(rad))
model.apply_time_interval(t_readout, G_readout)
return echoes
def compute_ideal_spoiling(species, flip_angle, TR):
alpha = flip_angle.convert_to(rad)
E1 = numpy.exp((-TR/species.T1))
signal = numpy.sin(alpha)*(1-E1)/(1-numpy.cos(alpha)*E1)
return float(signal)
|
[
"lamy@unistra.fr"
] |
lamy@unistra.fr
|
51b36e9e923120f85678fa88d62f02b2e0c61d2c
|
1e11d6f9245c55e21edfb24f4340d52e3f7f327f
|
/dillo/migrations/0059_image_entity.py
|
15048319182249db28d4ac935bc231ce24a67f27
|
[] |
no_license
|
armadillica/dillo
|
996e8462f4f76349ecc49ecb08cdd6c8c66e072b
|
960aed85f8438109bed9883321891305e1db8b10
|
refs/heads/main
| 2023-08-04T06:45:34.570071
| 2023-06-04T00:07:57
| 2023-06-04T00:07:57
| 30,461,275
| 79
| 18
| null | 2023-08-02T00:22:40
| 2015-02-07T16:17:43
|
Python
|
UTF-8
|
Python
| false
| false
| 508
|
py
|
# Generated by Django 2.2.14 on 2021-04-11 20:57
import dillo.models.mixins
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dillo', '0058_rename_icon_to_image'),
]
operations = [
migrations.AddField(
model_name='post',
name='image',
field=models.ImageField(blank=True, help_text='A preview image for the entity', upload_to=dillo.models.mixins.get_upload_to_hashed_path),
),
]
|
[
"francesco.siddi@gmail.com"
] |
francesco.siddi@gmail.com
|
139dfac249b6f7b80b7a95457f89dea57fcbe4b2
|
ffb05b145989e01da075e2a607fb291955251f46
|
/pypers/oxford/metatracer.py
|
253fb7ce096b902041cfe72b29f2096d1d49f442
|
[] |
no_license
|
micheles/papers
|
a5e7f2fa0cf305cd3f8face7c7ecc0db70ce7cc7
|
be9070f8b7e8192b84a102444b1238266bdc55a0
|
refs/heads/master
| 2023-06-07T16:46:46.306040
| 2018-07-14T04:17:51
| 2018-07-14T04:17:51
| 32,264,461
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
# metatracer.py
import inspect
from decorators import decorator
@decorator
def traced(meth, *args, **kw):
cls = meth.__cls__
modname = meth.__module__ or cls.__module__
print "calling %s.%s.%s" % (modname, cls.__name__, meth.__name__)
return meth(*args, **kw)
class MetaTracer(type):
def __init__(cls, name, bases, dic):
super(MetaTracer, cls).__init__(name, bases, dic)
for k, v in dic.iteritems():
if inspect.isfunction(v):
v.__cls__ = cls # so we know in which class v was defined
setattr(cls, k, traced(v))
|
[
"michele.simionato@gmail.com"
] |
michele.simionato@gmail.com
|
5d3eb74722808ffd0192f32a87c42dc32849375a
|
409829dfa1c9758ac67190fe76fea3746106bbad
|
/setup.py
|
3b7e08ef66822de90ff5fd52631aa58f7305cdb6
|
[
"MIT"
] |
permissive
|
eagleflo/python-sc2
|
916d1df613190dbc5da0883d2c3c6275350c8f88
|
1bd24e0b7d3200df7fb7ef02256753c45fea0b32
|
refs/heads/master
| 2020-04-01T05:16:42.683300
| 2018-10-13T10:33:31
| 2018-10-13T10:33:31
| 152,896,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,356
|
py
|
from setuptools import setup, find_packages
from pipenv.project import Project
from pipenv.utils import convert_deps_to_pip
pfile = Project(chdir=False).parsed_pipfile
requirements = convert_deps_to_pip(pfile['packages'], r=False)
test_requirements = convert_deps_to_pip(pfile['dev-packages'], r=False)
setup(
name = "sc2",
packages = find_packages(),
version = "0.10.1",
description = "A StarCraft II API Client for Python 3",
license="MIT",
author = "Hannes Karppila",
author_email = "hannes.karppila@gmail.com",
url = "https://github.com/Dentosal/python-sc2",
keywords = ["StarCraft", "StarCraft 2", "StarCraft II", "AI", "Bot"],
setup_requires=["pipenv"],
install_requires=requirements,
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Topic :: Games/Entertainment",
"Topic :: Games/Entertainment :: Real Time Strategy",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
]
)
|
[
"hannes.karppila@gmail.com"
] |
hannes.karppila@gmail.com
|
6c8c160c9634381508ee010a7ceeae9177ffd6ec
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/python/python_all/112_5.py
|
dc004ce6416dbe458336c86465138549e9215466
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965
| 2021-06-15T08:39:26
| 2021-06-15T08:39:26
| 349,059,725
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,837
|
py
|
Python – Filter unequal elements of two lists corresponding same index
Sometimes, while working with Python data, we can have a problem in which we
require to extract the values across multiple lists which are unequal and have
similar index. This kind of problem can come in many domains. Let’s discuss
certain ways in which this problem can be solved.
**Method #1 : Using loop +zip()**
The combination of above functions can be used to solve this problem. In this,
we extract combine the index elements using zip and then extract and check for
dissimilarity using conditional statement in loop.
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Unequal Equi-index elements
# using loop + zip()
# initialize lists
test_list1 = ["a", "b", "c", "d"]
test_list2 = ["g", "b", "s", "d"]
# printing original lists
print("The original list 1 : " + str(test_list1))
print("The original list 2 : " + str(test_list2))
# Unequal Equi-index elements
# using loop + zip()
res = []
for i, j in zip(test_list1, test_list2):
if i != j:
res.append(i)
# printing result
print("Unequal index elements in lists : " + str(res))
---
__
__
**Output :**
The original list 1 : ['a', 'b', 'c', 'd']
The original list 2 : ['g', 'b', 's', 'd']
Unequal index elements in lists : ['a', 'c']
**Method #2 : Usingzip() \+ list comprehension**
Combination of these functionalities can also be used to solve this problem.
In this, we use similar method as above, just a shorthand logic compressed
using list comprehension.
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Unequal Equi-index elements
# using list comprehension + zip()
# initialize lists
test_list1 = ["a", "b", "c", "d"]
test_list2 = ["g", "b", "s", "d"]
# printing original lists
print("The original list 1 : " + str(test_list1))
print("The original list 2 : " + str(test_list2))
# Unequal Equi-index elements
# using list comprehension + zip()
res = [i for i, j in zip(test_list1, test_list2) if i !=
j]
# printing result
print("Unequal index elements in lists : " + str(res))
---
__
__
**Output :**
The original list 1 : ['a', 'b', 'c', 'd']
The original list 2 : ['g', 'b', 's', 'd']
Unequal index elements in lists : ['a', 'c']
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
|
[
"qmnguyenw@gmail.com"
] |
qmnguyenw@gmail.com
|
0b5f1d72d0ce03d8535066049aea2e6e91ad8bbf
|
965e163df916b01d647953f2b1431d265683f6ca
|
/test/test_helpers.py
|
e839094edc9afd8bc16d27b5c41e69a3377b702d
|
[
"MIT"
] |
permissive
|
expressvpn/expressvpn_leak_testing
|
6505c39228d396caff0c2df3777009c6fbdf3127
|
9e4cee899ac04f7820ac351fa55efdc0c01370ba
|
refs/heads/master
| 2023-08-18T06:33:33.931040
| 2021-10-11T03:02:50
| 2021-10-11T03:02:50
| 112,572,905
| 244
| 48
|
MIT
| 2021-01-19T16:02:18
| 2017-11-30T06:18:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,912
|
py
|
import sys
import unittest
import mock
from parameterized import parameterized
from xv_leak_tools.helpers import current_os
from xv_leak_tools.helpers import merge_two_dicts
from xv_leak_tools.helpers import other_oses
class TestOSHelpers(unittest.TestCase):
def test_current_os(self):
for plat in ['linux', 'linux2']:
with mock.patch.object(sys, 'platform', plat):
self.assertEqual(current_os(), 'linux')
with mock.patch.object(sys, 'platform', 'darwin'):
self.assertEqual(current_os(), 'macos')
for plat in ['win32', 'cygwin']:
with mock.patch.object(sys, 'platform', plat):
self.assertEqual(current_os(), 'windows')
with mock.patch.object(sys, 'platform', 'unknown'):
with self.assertRaises(Exception):
current_os()
def test_other_oses(self):
plat_and_others = [
('linux', ['windows', 'macos']),
('darwin', ['windows', 'linux']),
('win32', ['linux', 'macos']),
]
for plat, others in plat_and_others:
with mock.patch.object(sys, 'platform', plat):
self.assertEqual(set(others), set(other_oses()))
class TestMergeTwoDicts(unittest.TestCase):
DICT1 = {'a': 1, 'b': 2, 'c': 3}
DICT2 = {'d': 4, 'e': 5, 'f': 6}
DICT3 = {'a': 4, 'b': 5, 'c': 6}
DICT4 = {'c': 4, 'd': 5, 'e': 6}
MERGED1_2 = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6}
MERGED1_4 = {'a': 1, 'b': 2, 'c': 4, 'd': 5, 'e': 6}
@parameterized.expand([
(DICT1, DICT2, MERGED1_2),
(DICT1, DICT3, DICT3),
({}, DICT1, DICT1),
(DICT1, {}, DICT1),
(DICT1, DICT4, MERGED1_4),
])
def test_merge_two_dicts(self, dict1, dict2, merged):
self.assertEqual(merge_two_dicts(dict1, dict2), merged)
if __name__ == '__main__':
unittest.main()
|
[
"leakproofing@expressvpn.com"
] |
leakproofing@expressvpn.com
|
38e53983d55b9890e6298bca5e315690dd08829a
|
9e3f1fc1935ac1dcdce7c8d39776de14ec49b5e6
|
/aptechapp/apps.py
|
bc8776a83e61e36a73718deed4ee603c13447ab8
|
[] |
no_license
|
irchriscott/Aptech-Connect-Web
|
f03072bcc790ef1d72b6e609fa99656f0e45fcba
|
7ebe1919ffacd9e826e2d86c778f4546072738e9
|
refs/heads/master
| 2020-05-02T07:19:05.867858
| 2019-03-29T12:46:22
| 2019-03-29T12:46:22
| 177,814,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class AptechappConfig(AppConfig):
name = 'aptechapp'
|
[
"irchristianscott@gmail.com"
] |
irchristianscott@gmail.com
|
a31878733c748d9fc436e92e0c85e1f95a4a753b
|
3cef51c71fdb19d326bea2a76bce03b6e04a69cb
|
/2016/22 Grid Computing/solutionb.py
|
476a47fa34b3476665f34fb9ac2b6ae703eda0a0
|
[] |
no_license
|
janezd/advent-of-code
|
fe2819c667a2f9309fe3c50e5c234a98855f26b8
|
700b09894eb6b8de4324304e99be17ca664a7c5b
|
refs/heads/master
| 2021-01-10T08:53:31.721953
| 2017-12-26T10:33:11
| 2017-12-26T10:33:11
| 48,594,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,091
|
py
|
import numpy as np
import re
from hashlib import sha1
class Hashable:
def __init__(self, a):
self.a = a
self._hash = int(sha1(np.ascontiguousarray(self.a)).hexdigest(), 16)
print(self._hash)
def __hash__(self):
return self._hash
def __eq__(self, other):
return np.array_equal(self.a, other)
data = []
x_prev = -1
re_node = re.compile("/dev/grid/node-x(\d+)-y(\d+) +(\d+)T +(\d+)T +(\d+)T")
for line in open("input.txt"):
mo = re_node.search(line)
if mo:
x, y, *c = [int(x) for x in mo.groups()]
if x != x_prev:
data.append([])
x_prev = x
data[-1].append(c)
for x in data:
print([t for t in x][:7])
sizex, sizey = len(data), len(data[0])
check_next = [(np.array(data, dtype=np.uint16), len(data) - 1, 0)]
steps = 0
seen = set()
while check_next:
steps += 1
print(steps, len(check_next))
to_check = check_next
check_next = []
while to_check:
data, xpos, ypos = to_check.pop()
for x in range(sizex):
for y in range(sizey):
for nx, ny in (x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1):
if 0 <= nx < sizex and 0 <= ny < sizey and \
data[x, y, 1] <= data[nx, ny, 2]:
print("{}, {} -> {}, {}".format(x, y, nx, ny))
exit()
new = data.copy()
new[nx, ny, 2] -= new[x, y, 1]
new[nx, ny, 1] += new[x, y, 1]
new[x, y, 1] = 0
new[x, y, 2] = new[x, y, 0]
nposx = nx if x == xpos else xpos
nposy = ny if y == ypos else ypos
if nposx == nposy == 0:
print(steps)
exit()
mat = (Hashable(new), nposx, nposy)
if mat not in seen:
check_next.append((new, nposx, nposy))
seen.add(mat)
|
[
"janez.demsar@fri.uni-lj.si"
] |
janez.demsar@fri.uni-lj.si
|
8a745f4689ea9fda6a7da7c444c8f3eef0616059
|
52f4426d2776871cc7f119de258249f674064f78
|
/misc/algorithm/shortest_path/floyd_warshall.py
|
69f03a396a9041e4d03aae9d449722638becbffb
|
[] |
no_license
|
namhyun-gu/algorithm
|
8ad98d336366351e715465643dcdd9f04eeb0ad2
|
d99c44f9825576c16aaca731888e0c32f2ae6e96
|
refs/heads/master
| 2023-06-06T02:28:16.514422
| 2021-07-02T10:34:03
| 2021-07-02T10:34:03
| 288,646,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 954
|
py
|
from math import inf
from typing import List
graph: List[List[int]] = [
[0, 7, inf, inf, 3, 10, inf],
[7, 0, 4, 10, 2, 6, inf],
[inf, 4, 0, 2, inf, inf, inf],
[inf, 10, 2, 0, 11, 9, 4],
[3, 2, inf, 11, 0, inf, 5],
[10, 6, inf, 9, inf, 0, inf],
[inf, inf, inf, 4, 5, inf, 0],
]
def floyd_warshall() -> List[List[int]]:
vertex_size = len(graph)
w = [[0 for i in range(vertex_size)] for j in range(vertex_size)]
for i in range(vertex_size):
for j in range(vertex_size):
w[i][j] = graph[i][j]
for k in range(vertex_size):
for i in range(vertex_size):
for j in range(vertex_size):
w[i][j] = min(w[i][j], w[i][k] + w[k][j])
return w
if __name__ == "__main__":
print("--- floyd_warshall ---")
dists = floyd_warshall()
for i in range(len(dists)):
for j in range(len(dists)):
print(dists[i][j], end="\t")
print()
|
[
"mnhan0403@gmail.com"
] |
mnhan0403@gmail.com
|
d174d21032804d359d6c11940c1d34010ee9f1d4
|
1915774790a77a630c00e70738ac41a315f5a2cb
|
/doorscalc/migrations/0038_auto_20190828_0818.py
|
700027d8f8f6d981755119764f7adca3d7b95bad
|
[] |
no_license
|
coconutcake/hajduktools
|
842948646d2e8d3368b4d420d73bba981d649d43
|
6f9e678a1168195d77d1163bc9145205d03bb141
|
refs/heads/master
| 2020-07-02T20:02:19.914649
| 2019-09-13T17:44:05
| 2019-09-13T17:44:05
| 201,648,138
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 573
|
py
|
# Generated by Django 2.1.11 on 2019-08-28 06:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('doorscalc', '0037_auto_20190822_1052'),
]
operations = [
migrations.AlterField(
model_name='order',
name='status',
field=models.CharField(blank=True, choices=[('Pending', 'Pending'), ('Accepted', 'Accepted'), ('Producing', 'Producing')], default='Pending', help_text='Status zamówienia', max_length=50, null=True, verbose_name='Status'),
),
]
|
[
"contact@mign.pl"
] |
contact@mign.pl
|
adfd67c20156cb8b8ad74e8c8a720e9e2e48f5eb
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/QiPr3M5tsqfsbYcCQ_6.py
|
971a4a4b867272607ab5fce9aa8d69b8c201cd28
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
def square_digits(n):
curdigit = 0
final = ""
while n > 0:
curdigit = n % 10
n = n // 10
final = str(curdigit ** 2) + final
final = int(final)
print(final)
return final
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
e30f9dfb96f688439caf3db1cd6882c6de1688f0
|
81c6278a9d50e04794eb4b0fe35ec0d595ca60d1
|
/cerulean/test/test_torque_scheduler.py
|
cee58c7bb4fb372013b4807428628b71862ece59
|
[
"Apache-2.0"
] |
permissive
|
romulogoncalves/cerulean
|
c5c028328873d49b326d5bba1520c6de41b23f16
|
c61b4848a46f93e47653a474771ee6c443727c79
|
refs/heads/master
| 2020-08-02T09:31:15.982745
| 2019-01-21T09:26:38
| 2019-01-21T09:26:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,124
|
py
|
from cerulean import JobDescription
from cerulean.torque_scheduler import (_get_field_from_qstat_xml,
_job_desc_to_job_script,
_seconds_to_time)
def test_job_script_name() -> None:
job_desc = JobDescription()
job_desc.name = 'test_name'
script = _job_desc_to_job_script(job_desc)
assert '#PBS -N test_name' in script
def test_job_script_working_directory() -> None:
# Note: doesn't test that it works, that's what test_scheduler is for
job_desc = JobDescription()
job_desc.working_directory = '/home/user/workdir'
script = _job_desc_to_job_script(job_desc)
assert '/home/user/workdir' in script
def test_job_script_command_args() -> None:
# Note: doesn't test that it works, that's what test_scheduler is for
job_desc = JobDescription()
job_desc.command = 'echo'
job_desc.arguments = ['-n', 'Hello world', 'testing']
script = _job_desc_to_job_script(job_desc)
assert "echo -n Hello world testing" in script
def test_job_script_stdout_file() -> None:
# Note: doesn't test that it works, that's what test_scheduler is for
job_desc = JobDescription()
job_desc.stdout_file = '/home/user/test.out'
script = _job_desc_to_job_script(job_desc)
assert '/home/user/test.out' in script
def test_job_script_stderr_file() -> None:
# Note: doesn't test that it works, that's what test_scheduler is for
job_desc = JobDescription()
job_desc.stderr_file = '/home/user/test.err'
script = _job_desc_to_job_script(job_desc)
assert '/home/user/test.err' in script
def test_job_script_queue_name() -> None:
# Note: doesn't test that it works, that's what test_scheduler is for
job_desc = JobDescription()
job_desc.queue_name = 'testing_queue'
script = _job_desc_to_job_script(job_desc)
assert '#PBS -q testing_queue' in script
def test_job_script_time_reserved() -> None:
# Note: doesn't test that it works, that's what test_scheduler is for
job_desc = JobDescription()
job_desc.time_reserved = 70
script = _job_desc_to_job_script(job_desc)
assert '00:00:01:10' in script
def test_job_script_num_nodes() -> None:
# Note: doesn't test that it works, that's what test_scheduler is for
job_desc = JobDescription()
job_desc.num_nodes = 42
script = _job_desc_to_job_script(job_desc)
assert 'nodes=42' in script
def test_job_script_processes_per_node() -> None:
job_desc = JobDescription()
job_desc.mpi_processes_per_node = 4
script = _job_desc_to_job_script(job_desc)
assert 'ppn=4' in script
def test_job_script_extra_scheduler_options() -> None:
job_desc = JobDescription()
job_desc.extra_scheduler_options = '-p 10'
script = _job_desc_to_job_script(job_desc)
assert '#PBS -p 10' in script
def test_seconds_to_time() -> None:
time = (2 * 24 * 60 * 60) + (13 * 60 * 60) + (7 * 60) + 48
time_str = _seconds_to_time(time)
assert time_str == '02:13:07:48'
time_str = _seconds_to_time(2)
assert time_str == '00:00:00:02'
|
[
"l.veen@esciencecenter.nl"
] |
l.veen@esciencecenter.nl
|
e9f18e7cf838a0d9ad2b3ff178bfda18e06dd1ec
|
ceb75e50d77b962edbe866b0640271cdd4721be9
|
/hash.py
|
d986345513156fa9df62d089ac9bfcc250367c63
|
[] |
no_license
|
rocket3989/hashCode2020
|
9394dc52e66c085e14fdd6f4152d6605f426b79a
|
6fd287381c3b32813eb3f63558d3afd83bd0ebbf
|
refs/heads/master
| 2021-01-08T11:14:36.956131
| 2020-02-27T02:16:50
| 2020-02-27T02:16:50
| 242,014,611
| 3
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,693
|
py
|
B, L, D = [int(x) for x in input().split()]
score = [int(x) for x in input().split()]
freq = [0 for i in range(B)]
from heapq import heappop, heappush, heapify
import random
calls = 0
class library:
def __init__(self, count, time, rate, books):
self.count = count
self.time = time
self.rate = rate
self.books = books
# random.shuffle(self.books)
def bookOrder(self):
bookList = []
for book in self.books:
bookList.append((-score[book], book))
bookList.sort()
for i in range(len(bookList)):
bookList[i] = bookList[i][1]
self.books = bookList
def heuristic(self):
count = (D - self.time) * self.rate
if count <= 0: return 0
self.bookOrder()
sumOf = 0
for book in self.books:
if seen[book]: continue
sumOf += score[book]
count -= 1
if count == 0: break
return (sumOf) / self.time
libs = []
h = []
seen = [False for b in range(B)]
for l in range(L):
N, T, M = [int(x) for x in input().split()] # books, time, books/day
ids = [int(x) for x in input().split()] # ids of books
for i in ids:
freq[i] += 1
libs.append(library(N, T, M, ids))
for l in range(L):
h.append((-libs[l].heuristic(), l))
heapify(h)
sumOf = 0
output = []
while len(h):
val, l = heappop(h)
val = -val
newScore = libs[l].heuristic()
if libs[l].time >= D:
continue
if val > newScore:
heappush(h, (-newScore, l))
continue
if D < 0: break
D -= libs[l].time
findable = D * libs[l].rate
books = libs[l].books
out = []
for book in books:
if seen[book]: continue
seen[book] = True
out.append(book)
sumOf += score[book]
findable -= 1
if findable == 0: break
if len(out) == 0:
D += libs[l].time
continue
else:
output.append((l, len(out)))
output.append(out)
for book in libs[l].books:
freq[book] -= 1
print(sumOf)
print(len(output) // 2)
for line in output:
print(*line)
"""
python3 hash.py < a.txt > ao.txt && python3 hash.py < b.txt > bo.txt && python3 hash.py < c.txt > co.txt && python3 hash.py < d.txt > do.txt && python3 hash.py < e.txt > eo.txt && python3 hash.py < f.txt > fo.txt
"""
|
[
"rocket3989@gmail.com"
] |
rocket3989@gmail.com
|
f8a0b1b30cd967fa9b3450c9cfb4207194c8ec86
|
8c680fc22c005aadc84deff33850d819e943124a
|
/open-test-data/rfc4475-sip-torture-test/unksm2.dat.py
|
a19935571b6edd931465bffe6ed24bb66262a1db
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
bobjects/BobStack
|
abc824b8513d826cbc725e8fbd3837ae8e03d646
|
c177b286075044832f44baf9ace201780c8b4320
|
refs/heads/master
| 2020-04-06T05:12:35.041273
| 2017-10-08T23:39:35
| 2017-10-08T23:39:35
| 50,403,711
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
messageString = (
'REGISTER sip:example.com SIP/2.0\r\n'
'To: isbn:2983792873\r\n'
'From: <http://www.example.com>;tag=3234233\r\n'
'Call-ID: unksm2.daksdj@hyphenated-host.example.com\r\n'
'CSeq: 234902 REGISTER\r\n'
'Max-Forwards: 70\r\n'
'Via: SIP/2.0/UDP 192.0.2.21:5060;branch=z9hG4bKkdjuw\r\n'
'Contact: <name:John_Smith>\r\n'
'l: 0\r\n'
'\r\n'
)
|
[
"bob@bobjectsinc.com"
] |
bob@bobjectsinc.com
|
cdeb4e8690477000342cd59097c9cc23be076db6
|
4e5b233311bdb5872fd5f862f90b3c6e56f56bc2
|
/Day_16_CoffeeMachine.py
|
d342630146136aa15eca126c5e01696d1bebea25
|
[] |
no_license
|
asher-lab/100daysofpython
|
02b7c6703b82683f824e3a5b21d41233141827df
|
046f16fddb1073195f320e0f0f6e091c35f10314
|
refs/heads/main
| 2023-07-01T17:55:57.654898
| 2021-08-11T02:38:59
| 2021-08-11T02:38:59
| 388,103,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
from menu import Menu, MenuItem
from coffee_maker import CoffeeMaker
from money_machine import MoneyMachine
#declaring objects
menu = Menu()
coffee_maker = CoffeeMaker()
money_machine = MoneyMachine()
is_on = True
while is_on:
options = menu.get_items()
choice = input(f"What would you like? {options}:")
if choice == "off":
is_on = False
elif choice == "report":
coffee_maker.report()
money_machine.report()
else:
drink = menu.find_drink(choice)
if coffee_maker.is_resource_sufficient(drink):
if money_machine.make_payment(drink.cost):
coffee_maker.make_coffee(drink)
|
[
"noreply@github.com"
] |
asher-lab.noreply@github.com
|
bc9ae930f9b3a8b12ccb821e89bb277f81c1a19d
|
76d3ec49d75ca5cef755e612a97ddcbc34f62bf3
|
/data_integration/parallel_tasks/sql.py
|
724456ec4dcef6e187d023a7fe4298906fd58d8a
|
[
"MIT"
] |
permissive
|
ierosodin/data-integration
|
8f7e9500bda03c8316061452bc502d68cfcd33da
|
f3ee414e8b8994e5b740a374c0594e40862ff6e9
|
refs/heads/master
| 2021-01-02T14:58:10.752745
| 2020-02-13T00:58:58
| 2020-02-13T00:58:58
| 239,665,119
| 0
| 0
|
MIT
| 2020-02-11T03:11:37
| 2020-02-11T03:11:36
| null |
UTF-8
|
Python
| false
| false
| 3,315
|
py
|
import inspect
import re
import typing
from mara_page import _, html
from .. import config, pipelines
from ..commands import sql
class ParallelExecuteSQL(pipelines.ParallelTask, sql._SQLCommand):
def __init__(self, id: str, description: str, parameter_function: typing.Callable, parameter_placeholders: [str],
max_number_of_parallel_tasks: int = None, sql_statement: str = None, file_name: str = None,
commands_before: [pipelines.Command] = None, commands_after: [pipelines.Command] = None,
db_alias: str = None, echo_queries: bool = True, timezone: str = None,
replace: {str: str} = None) -> None:
if (not (sql_statement or file_name)) or (sql_statement and file_name):
raise ValueError('Please provide either sql_statement or file_name (but not both)')
pipelines.ParallelTask.__init__(self, id=id, description=description,
max_number_of_parallel_tasks=max_number_of_parallel_tasks,
commands_before=commands_before, commands_after=commands_after)
sql._SQLCommand.__init__(self, sql_statement, file_name, replace)
self.parameter_function = parameter_function
self.parameter_placeholders = parameter_placeholders
self._db_alias = db_alias
self.timezone = timezone
self.echo_queries = echo_queries
@property
def db_alias(self):
return self._db_alias or config.default_db_alias()
def add_parallel_tasks(self, sub_pipeline: 'pipelines.Pipeline') -> None:
parameters = self.parameter_function()
if not isinstance(parameters, list) or not all(isinstance(item, tuple) for item in parameters):
raise ValueError(f'parameter function should return a list of tuples, got "{repr(parameters)}"')
for parameter_tuple in parameters:
id = '-'.join([re.sub('[^0-9a-z\-_]+', '', str(x).lower().replace('-', '_')) for x in parameter_tuple])
replace = self.replace.copy()
for placeholder, param in zip(self.parameter_placeholders, parameter_tuple):
replace[placeholder] = param
sub_pipeline.add(pipelines.Task(
id=id, description=f'Execute SQL for parameters {repr(parameter_tuple)}',
commands=[
sql.ExecuteSQL(sql_file_name=self.sql_file_name, db_alias=self.db_alias,
echo_queries=self.echo_queries, timezone=self.timezone, replace=replace)
if self.sql_file_name else
sql.ExecuteSQL(sql_statement=self.sql_statement, db_alias=self.db_alias,
echo_queries=self.echo_queries, timezone=self.timezone, replace=replace)]))
def html_doc_items(self) -> [(str, str)]:
return [('db', _.tt[self.db_alias])] \
+ sql._SQLCommand.html_doc_items(self, self.db_alias) \
+ [('parameter function', html.highlight_syntax(inspect.getsource(self.parameter_function), 'python')),
('parameter placeholders', _.tt[repr(self.parameter_placeholders)]),
('echo queries', _.tt[str(self.echo_queries)]),
('timezone', _.tt[self.timezone or ''])]
|
[
"martin.loetzsch@gmail.com"
] |
martin.loetzsch@gmail.com
|
0384d7dfb9a7e53271b18c52096a581c20d45dc2
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_46/71.py
|
24e9421799bdf1037f657a345661908082b4f683
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,005
|
py
|
input_file = "A-large.in";
output_file = "0.out";
input = open(input_file,"r");
output = open(output_file,"w+");
num_tests = int(input.readline());
for test in range(num_tests):
size = int(input.readline());
matrix = []
a = []
for i in range(size):
gg = input.readline()
matrix.append(gg.strip());
a.append(len(gg.rstrip('\n0')))
if test != 2:
#continue
pass
#print(matrix)
n = 0
for i in range(size):
idx = i + 1
found = i;
for j in range(i,size):
if (a[j] <= idx):
found = j
break
# swap
if i != found:
s = a[i:found]
a[i] = a[found]
a[i+1:found+1] = s
n = n + found - i
#print("{0} -> {1}".format(i,found))
print("Case #{0}: {1}".format(test+1,n));
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
81fa627e293605e74746676da48b221cdaa59e9d
|
ce0a3a73c7825f7327b8319fb2593b6b01659bb0
|
/webtest03/evaluation/migrations/0002_auto_20180802_0151.py
|
5bb7bf88cabc2d1779cc24878b96578183d001a5
|
[] |
no_license
|
soccergame/deeplearning
|
28b0a6ed85df12e362b3a451050fab5a2a994be7
|
cbc65d3eba453992a279cfd96a9d3640d8fe6b9f
|
refs/heads/master
| 2020-03-28T22:38:26.085464
| 2018-08-31T11:22:39
| 2018-08-31T11:22:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-08-02 01:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('evaluation', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='media',
name='name',
field=models.CharField(max_length=50),
),
]
|
[
"18811442380@163.com"
] |
18811442380@163.com
|
66322e287c6b4586776e85303fbc8f20198de35f
|
cf6c67790393b4d1ad75f5f30fef00455bc9a541
|
/icgc_dataobjects/run.py
|
c71e438d674f2c7beed3bc51313bfcf68ef4a785
|
[
"MIT"
] |
permissive
|
ohsu-comp-bio/icgc_dataobjects
|
fe3b231838875c9dadb9eacb17004fc59790689c
|
3cbb676d2e1a9a21e03f3cfb8a3853752aa97916
|
refs/heads/master
| 2022-10-09T01:04:14.259700
| 2017-05-05T20:28:50
| 2017-05-05T20:28:50
| 90,410,194
| 0
| 0
|
MIT
| 2022-09-23T20:52:26
| 2017-05-05T19:33:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,301
|
py
|
#!/usr/bin/env python
"""
Proxy front end to the dcc server
"""
import os
from flask import request, jsonify, Response, abort, Flask
from flask_cors import CORS
# our utilities
import dcc_proxy
def _configure_app():
""" set app wide config """
# start the app
app = Flask(__name__)
# allow cross site access
CORS(app)
# after commit, publish
return app
# main configuration
app = _configure_app()
# https://github.com/ohsu-comp-bio/data-object-schemas/blob/feature/gdc/proto/data_objects.proto
@app.route('/api/v1/data/object/search', methods=['POST'])
def data_object_search():
"""
ga4gh::data-object-schemas data/object/search
"""
app.logger.debug(request.data)
return dcc_proxy.data_object_search()
# https://github.com/ohsu-comp-bio/data-object-schemas/blob/feature/gdc/proto/data_objects.proto
@app.route('/api/v1/data/object/<path:id>', methods=['GET'])
def data_object_get(id):
"""
ga4gh::data-object-schemas data/object
"""
return dcc_proxy.data_object_get(id)
# https://github.com/ohsu-comp-bio/data-object-schemas/blob/feature/gdc/proto/data_objects.proto
@app.route('/api/v1/data/object', methods=['POST'])
def data_object_post():
"""
ga4gh::data-object-schemas data/object
"""
return dcc_proxy.data_object_post()
# https://github.com/ohsu-comp-bio/data-object-schemas/blob/feature/gdc/proto/data_objects.proto
@app.route('/api/v1/datasets', methods=['POST'])
def datasets_post():
"""
ga4gh::data-object-schemas data/object
"""
return dcc_proxy.datasets_post()
# https://github.com/ohsu-comp-bio/data-object-schemas/blob/feature/gdc/proto/data_objects.proto
@app.route('/api/v1/datasets/<path:id>', methods=['GET'])
def datasets_get_one(id):
"""
ga4gh::data-object-schemas data/object
"""
return dcc_proxy.datasets_get_one(id)
# Private util functions
# print useful information at startup
app.logger.debug('URL map {}'.format(app.url_map))
# Entry point of app
if __name__ == '__main__': # pragma: no cover
debug = 'API_DEBUG' in os.environ # TODO does eve override?
api_port = int(os.environ.get('API_PORT', '5000'))
api_host = os.environ.get('API_TARGET', '0.0.0.0')
app.run(debug=debug, port=api_port, host=api_host, threaded=True)
|
[
"brian@bwalsh.com"
] |
brian@bwalsh.com
|
e8fc1d761894c544fc9f36d9aa38ca5900bef47d
|
3fda3ff2e9334433554b6cf923506f428d9e9366
|
/hipeac/migrations/0014_auto_20190121_1724.py
|
ebf27a1920573587d68f431147ce919777e91b00
|
[
"MIT"
] |
permissive
|
CreativeOthman/hipeac
|
12adb61099886a6719dfccfa5ce26fdec8951bf9
|
2ce98da17cac2c6a87ec88df1b7676db4c200607
|
refs/heads/master
| 2022-07-20T10:06:58.771811
| 2020-05-07T11:39:13
| 2020-05-07T11:44:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
# Generated by Django 2.1.5 on 2019-01-21 16:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("hipeac", "0013_auto_20190121_1650"),
]
operations = [
migrations.AlterField(
model_name="video", name="youtube_id", field=models.CharField(max_length=40, unique=True),
),
]
|
[
"eneko.illarramendi@ugent.be"
] |
eneko.illarramendi@ugent.be
|
62761c4213e8d3fd11ac2734a38d39fed0be7f54
|
44b6bc41fe8e424196f98dbc5b2f050c1f9645f8
|
/platforms/windows/dos/34480.py
|
15fb0b06f9a6f2300b2fa265f1402a0d1a77a6aa
|
[] |
no_license
|
angeloobeta/exploit-database
|
21283dd8549f47836a35af6f3ea7b63b8dba11ea
|
43f3d9e94c01a7f51e30561a96214af231dd9d36
|
refs/heads/master
| 2021-08-08T21:07:38.794539
| 2017-11-11T05:01:28
| 2017-11-11T05:01:28
| 110,380,452
| 0
| 1
| null | 2017-11-11T21:09:05
| 2017-11-11T21:09:04
| null |
UTF-8
|
Python
| false
| false
| 1,201
|
py
|
source: http://www.securityfocus.com/bid/42473/info
Xilisoft Video Converter is prone to a buffer-overflow vulnerability because it fails to perform adequate boundary checks on user-supplied data.
Attackers may leverage this issue to execute arbitrary code in the context of the application. Failed attacks will cause denial-of-service conditions.
Xilisoft Video Converter 3.1.8.0720b is vulnerable; other versions may also be affected.
################PoC Start##############################################
print "\nXilisoft Video Converter Wizard 3 ogg file processing DoS"
#Download from
# http://www.downloadatoz.com/xilisoft-video-converter/order.php?download=xilisoft-video-converter&url=downloadatoz.com/xilisoft-video-converter/wizard.html/__xilisoft-video-converter__d1
#http://www.downloadatoz.com/xilisoft-video-converter/wizard.html
buff = "D" * 8400
try:
oggfile = open("XilVC_ogg_crash.ogg","w")
oggfile.write(buff)
oggfile.close()
print "[+]Successfully created ogg file\n"
print "[+]Coded by Praveen Darshanam\n"
except:
print "[+]Cannot create File\n"
################PoC End################################################
|
[
"info@exploit-db.com"
] |
info@exploit-db.com
|
5f7a9b2612c79430d4c1e5242715bc642e2d3fac
|
f64e31cb76909a6f7fb592ad623e0a94deec25ae
|
/leetcode/p1143_longest_common_subsequence.py
|
85183f69cab74bceb6e18858c63272d6a774702d
|
[] |
no_license
|
weak-head/leetcode
|
365d635cb985e1d154985188f6728c18cab1f877
|
9a20e1835652f5e6c33ef5c238f622e81f84ca26
|
refs/heads/main
| 2023-05-11T14:19:58.205709
| 2023-05-05T20:57:13
| 2023-05-05T20:57:13
| 172,853,059
| 0
| 1
| null | 2022-12-09T05:22:32
| 2019-02-27T05:58:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,521
|
py
|
from functools import lru_cache
def lcs_bu_optimized(a, b):
"""
Dynamic Programming, bottom-up
Optimized for space
Time: O(a * b)
Space: O(min(a, b))
a - length of 'a'
b - length of 'b'
"""
if a < b:
a, b = b, a
pre = [0] * (len(b) + 1)
cur = [0] * (len(b) + 1)
for r in range(1, len(a) + 1):
for c in range(1, len(b) + 1):
if a[r - 1] == b[c - 1]:
cur[c] = pre[c - 1] + 1
else:
cur[c] = max(pre[c], cur[c - 1])
pre, cur = cur, pre
return pre[-1]
def lcs_bu(a, b):
"""
Dynamic Programming, bottom-up
Could be optimized for space
Time: (a * b)
Space: (a * b)
a - length of 'a'
b - length of 'b'
"""
m = [[0 for _ in range(len(b) + 1)] for _ in range(len(a) + 1)]
for r in range(1, len(a) + 1):
for c in range(1, len(b) + 1):
if a[r - 1] == b[c - 1]:
m[r][c] = m[r - 1][c - 1] + 1
else:
m[r][c] = max(m[r][c - 1], m[r - 1][c])
return m[-1][-1]
def lcs_td(a, b):
"""
Dynamic programming, top-down
Time: (a * b)
Space: (a * b)
a - length of 'a'
b - length of 'b'
"""
@lru_cache(None)
def lcs(a, b):
if not a or not b:
return 0
if a[-1] == b[-1]:
return 1 + lcs(a[:-1], b[:-1])
else:
return max(lcs(a, b[:-1]), lcs(a[:-1], b))
return lcs(a, b)
|
[
"zinchenko@live.com"
] |
zinchenko@live.com
|
1270e18c86fefef7504af98a1f66c65d422ca3a5
|
bdbeca3fdc7ea3ef2f1b66aab9ea50e15cfbc553
|
/django_sample/urls.py
|
5153d54fffd528e7e5124cdc3fe449615124944d
|
[] |
no_license
|
socialcomputing/django_sample
|
d99b35b4d02ef98e319efec6ef7feee3b29211d7
|
ded880a08afdbdd4b899112a7c6d74f07d7c176f
|
refs/heads/master
| 2021-01-13T01:58:08.970181
| 2013-10-24T12:05:54
| 2013-10-24T12:05:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'english_learner.views.home', name='home'),
# url(r'^english_learner/', include('english_learner.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
|
[
"roman.prokofyev@gmail.com"
] |
roman.prokofyev@gmail.com
|
3a21cb6bea3daf9fdb83c7cc3b6c5ea806fcbf2c
|
01d92ca39cd4836aaef67e2efcf88a44671c7213
|
/code_pack_22/1_threading/locking_with_context.py
|
d2ee717504e82208c6cb39cf3f7507afc038a607
|
[] |
no_license
|
manuelpereira292/py3_bootcamp
|
247f411b80f09c46aeeba90a96e6a5d3fd329f2c
|
1988553394cb993db82c39993ed397e497bd5ae8
|
refs/heads/master
| 2022-08-20T02:25:51.265204
| 2020-05-15T22:26:27
| 2020-05-15T22:26:27
| 263,367,513
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
import threading
total = 0
lock = threading.Lock()
def update_total(amount):
"""
Updates the total by the given amount
"""
global total
with lock:
total += amount
print (total)
if __name__ == '__main__':
for i in range(10):
my_thread = threading.Thread(
target=update_total, args=(5,))
my_thread.start()
|
[
"manuelpereira292@gmail.com"
] |
manuelpereira292@gmail.com
|
9bf24903265c1bc14d6e5a9f7216a605a464b36c
|
b2f84608cc28c492430e972028fa0e178865c78c
|
/source_py2/combi/_python_toolbox/third_party/unittest2/__init__.py
|
49e2cceae502115ad7682c8ecd14fb5abac2f8b0
|
[
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
cool-RR/combi
|
54efa752403a4acb6933475102702e43de93c81d
|
9c5c143a792ffd8fb38b6470f926268c8bacbc31
|
refs/heads/master
| 2021-09-23T10:02:52.984204
| 2021-09-18T08:45:57
| 2021-09-18T08:45:57
| 25,787,956
| 24
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,548
|
py
|
"""
unittest2
unittest2 is a backport of the new features added to the unittest testing
framework in Python 2.7 and beyond. It is tested to run on Python 2.4 - 2.7.
To use unittest2 instead of unittest simply replace ``import unittest`` with
``import python_toolbox.third_party.unittest2``.
Copyright (c) 1999-2003 Steve Purcell
Copyright (c) 2003-2010 Python Software Foundation
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
__all__ = ['TestResult', 'TestCase', 'TestSuite',
'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main',
'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless',
'expectedFailure', 'TextTestResult', '__version__', 'collector']
__version__ = '1.0.1'
# Expose obsolete functions for backwards compatibility
__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
from combi._python_toolbox.third_party.unittest2.collector import collector
from combi._python_toolbox.third_party.unittest2.result import TestResult
from combi._python_toolbox.third_party.unittest2.case import (
TestCase, FunctionTestCase, SkipTest, skip, skipIf,
skipUnless, expectedFailure
)
from combi._python_toolbox.third_party.unittest2.suite import BaseTestSuite, TestSuite
from combi._python_toolbox.third_party.unittest2.loader import (
TestLoader, defaultTestLoader, makeSuite, getTestCaseNames,
findTestCases
)
from combi._python_toolbox.third_party.unittest2.main import TestProgram, main
from combi._python_toolbox.third_party.unittest2.runner import TextTestRunner, TextTestResult
try:
from combi._python_toolbox.third_party.unittest2.signals import (
installHandler, registerResult, removeResult, removeHandler
)
except ImportError:
# Compatibility with platforms that don't have the signal module
pass
else:
__all__.extend(['installHandler', 'registerResult', 'removeResult',
'removeHandler'])
# deprecated
_TextTestResult = TextTestResult
# There are no tests here, so don't try to run anything discovered from
# introspecting the symbols (e.g. FunctionTestCase). Instead, all our
# tests come from within unittest.test.
def load_tests(loader, tests, pattern):
import os.path
# top level directory cached on loader instance
this_dir = os.path.dirname(__file__)
return loader.discover(start_dir=this_dir, pattern=pattern)
__unittest = True
def load_tests(loader, tests, pattern):
# All our tests are in test/ - the test objects found in unittest2 itself
# are base classes not intended to be executed. This load_tests intercepts
# discovery to prevent that.
import python_toolbox.third_party.unittest2.test
result = loader.suiteClass()
for path in unittest2.test.__path__:
result.addTests(loader.discover(path, pattern=pattern))
return result
|
[
"ram@rachum.com"
] |
ram@rachum.com
|
021f1314d0644e555809568baade395ac69141b5
|
0a973640f0b02d7f3cf9211fcce33221c3a50c88
|
/.history/src/easy-money_20210129090305.py
|
ef65231461d111f8f831f2b4447d7863c8308ec4
|
[] |
no_license
|
JiajunChen123/IPO_under_review_crawler
|
5468b9079950fdd11c5e3ce45af2c75ccb30323c
|
031aac915ebe350ec816c05a29b5827fde588567
|
refs/heads/main
| 2023-02-26T08:23:09.622725
| 2021-02-04T10:11:16
| 2021-02-04T10:11:16
| 332,619,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,586
|
py
|
# 东方财富网 首发申报
from datetime import datetime,timedelta
from urllib.parse import urlencode
import pandas as pd
import requests
import re
import time
base_url = 'https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx?'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'}
query = {'type': 'NS',
'sty' : 'NSFR',
'st' : '1',
'sr' : '-1',
'p' : '1',
'ps' : '5000',
'js' : 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt' : '1',
'rt' : '53721774'
}
def date_gen():
r = requests.get('http://data.eastmoney.com/xg/xg/sbqy.html',headers=headers)
r.encoding = 'gbk'
soup = BeautifulSoup(r.text,'html.parser')
dateList = [i.text for i in soup.findAll('option')]
yield dateList
def get_eastmoneyData(dateList):
query = {'type': 'NS',
'sty' : 'NSFR',
'st' : '1',
'sr' : '-1',
'p' : '1',
'ps' : '5000',
'js' : 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt' : '1',
'rt' : '53721774'
}
main_data = []
for date in dateList:
query['fd'] = dateList
# start = datetime.strptime('2017-01-05','%Y-%m-%d').date()
# while start < datetime.today().date():
# query['fd'] = start
url = base_url + urlencode(query)
# yield url
# start += timedelta(days=7)
rs = requests.get(url,headers=headers)
if rs.text == '':
continue
js = rs.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(js[:-1])
main_data.extend(data)
time.sleep(2)
temp = [i.split(',') for i in main_data]
columns = ['会计师事务所','保荐代表人','保荐机构','xxx','律师事务所','日期','所属行业','板块','是否提交财务自查报告',
'注册地','类型','机构名称','签字会计师','签字律师','时间戳','简称']
df = pd.DataFrame(temp,columns=columns)
df['文件链接'] = df['时间戳'].apply(lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf")
df = df[['机构名称', '类型', '板块', '注册地', '保荐机构','保荐代表人', '律师事务所', '签字律师','会计师事务所',
'签字会计师', '是否提交财务自查报告', '所属行业','日期','xxx', '时间戳', '保荐机构','文件链接']]
# df = df[df['板块'] != '创业板']
df.to_csv('C:/Users/chen/Desktop/IPO_info/eastmoney_pre_data.csv',index=False,encoding='utf-8-sig')
# for i in ['2','4']:
# query = {'type': 'NS',
# 'sty' : 'NSSH',
# 'st' : '1',
# 'sr' : '-1',
# 'p' : '1',
# 'ps' : '5000',
# 'js' : 'var IBhynDx={pages:(pc),data:[(x)]}',
# 'mkt' : i,
# 'rt' : '53723990'
# }
# url = base_url + urlencode(query)
# rss = requests.get(url,headers=headers)
# jss = rss.text.split('var KIBhynDx={pages:1,data:')[1]
# data = eval(jss[:-1])
# temp = [j.split(',') for j in data]
# columns = ['时间戳','yyy','公司代码','机构名称','详情链接','申报日期','上会日期','申购日期','上市日期','9','拟发行数量','发行前总股本','发行后总股本','13','占发行后总股本比例','当前状态','上市地点','主承销商','承销方式','发审委委员','网站','简称']
# df = pd.DataFrame(temp,columns=columns)
# df['文件链接'] = df['时间戳'].apply(lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf")
# df['详情链接'] = df['公司代码'].apply(lambda x: "data.eastmoney.com/xg/gh/detail/" + x + ".html")
# df = df[['机构名称', '当前状态', '上市地点', '拟发行数量', '申报日期','上会日期', '申购日期', '上市日期', '主承销商','承销方式', '9', '发行前总股本','发行后总股本','13','占发行后总股本比例','发审委委员','网站','公司代码','yyy','时间戳', '简称', '详情链接','文件链接']]
# df.to_csv('C:/Users/chen/Desktop/IPO_info/easymoney_data_{}_onmeeting.csv'.format(i),index=False,encoding='utf-8-sig')
from urllib.parse import urlencode
import time
import requests
from bs4 import BeautifulSoup
import pandas as pd
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'}
base_url = 'https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx?'
v
zzsc_dict = {}
for date in dateList:
query = {'type': 'NS',
'sty' : 'NSSE',
'st' : '1',
'sr' : '-1',
'p' : '1',
'ps' : '500',
'js' : 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt' : '4',
'stat':'zzsc',
'fd' : date,
'rt' : '53727636'
}
url = base_url + urlencode(query)
rss = requests.get(url,headers=headers)
if rss.text == 'var IBhynDx={pages:0,data:[{stats:false}]}':
continue
jss = rss.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(jss[:-1])
for i in data:
name = i.split(',')[1]
if name not in zzsc_dict:
zzsc_dict[name] = i.split(',')[2]
else:
continue
time.sleep(2)
zzsc = pd.DataFrame(zzsc_dict.items(),columns = ['机构名称','决定终止审查时间'])
zzsc.to_csv('C:/Users/chen/Desktop/IPO_info/zzsc.csv',encoding='utf-8-sig',index=False)
|
[
"chenjiajun.jason@outlook.com"
] |
chenjiajun.jason@outlook.com
|
304e3957bdc086d84bb32f2b58268c58ea73ca28
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Games/Cheese Boys/cheeseboys/sprites/speech.py
|
5e1567b82b17412d9134367a27d554ca30c1da16
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:c6cecd706830ed02d31a0263b2cde423305efa564853d608ed6bd0df573421f1
size 5969
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
b611f93e5bc12f60c52864354fd9bc2e6658c7e2
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5751500831719424_1/Python/knabbers/game.py
|
381bdefdea24efb577b9752009b3a139eb774628
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,449
|
py
|
def solve(file):
f = open(file,'r')
lines = f.readlines()
nCases = int(lines[0])
outf = open("output.txt",'w')
i = 1 # line number
n = 1 # case number
while nCases > 0:
#print("case: " + str(n))
line = lines[i].replace('\n', "").split(" ")
N = int(line[0])
s = []
c = []
repl = []
repn = []
for j in range(0,N):
line = lines[i+1+j].replace('\n', "").split(" ")
s.append(line[0])
c.append(0)
repl.append('a')
repn.append(-1)
#print (s)
result = "nothing"
moves = 0
imp = False
while True:
for j in range(0,len(s)):
l = s[j][c[j]]
count = 0
while c[j] < len(s[j]) and s[j][c[j]] == l:
count += 1
c[j]+=1
repl[j] = l
repn[j] = count
#print(repl)
#print(repn)
l = repl[0]
for j in range(1,len(repl)):
if repl[j] != l:
imp = True
#print(imp)
if imp==True:
break
repn.sort()
med = repn[int((len(repn)-1)/2)]
#print(med)
for j in range(0,len(repn)):
off = repn[j]-med
if off < 0:
off = -off
moves += off
#print(moves)
#check done
notdone = False
done = False
for j in range(0,len(c)):
if c[j]==len(s[j]):
done = True
else:
notdone = True
#print(done)
#print(notdone)
if (done == True and notdone == True):
imp = True
break
if (done == True and notdone == False):
break
if (imp == True):
result = "Fegla Won";
else:
result = str(moves)
outf.write("Case #" + str(n) +": " + result + "\n")
i += N+1
nCases -= 1
n+=1
f.close()
outf.close()
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
b064f1e3de40121d067b76ce1f86adb8ee3c312d
|
70410a0fd7fa9004b1d789ca165b21a6bba80b85
|
/polls/models.py
|
09cec8b62f0c474d5fc67415413ab19d3abdaebd
|
[] |
no_license
|
Sukhrobjon/django-starter-tutorial
|
a310907a132d86ae9a7d0ba757642955b0f49569
|
9b6a6f7a8d98b1b85fe24d64d3ca3b7cf12a3932
|
refs/heads/master
| 2023-04-29T12:05:05.800364
| 2019-07-14T04:17:10
| 2019-07-14T04:17:10
| 189,487,751
| 0
| 0
| null | 2023-04-21T20:37:03
| 2019-05-30T21:57:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,055
|
py
|
import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
"""Return string representation of question text."""
return self.question_text
def was_published_recently(self):
""" Return questions are posted at least one day ago from current day"""
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
""" Return string representation of the text of the choice """
return self.choice_text
|
[
"sgolibbo@mail.ccsf.edu"
] |
sgolibbo@mail.ccsf.edu
|
7e0c8d8062192d16e4bffd7fe377b23e38eb03c8
|
9469e6459a56566ee84731b023eb18bcc14417e6
|
/powerapp_pocket/signals.py
|
7a41a308f503b8d894e44053cff82286ef5e5733
|
[] |
no_license
|
Doist/powerapp-pocket
|
b5adca6b768f98ed61299e5436018535309de60c
|
920beb7892527f8302c8ea53a2e1edf161ccae91
|
refs/heads/master
| 2021-01-25T12:07:27.533750
| 2015-06-04T23:47:33
| 2015-06-04T23:47:33
| 35,607,506
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 655
|
py
|
# -*- coding: utf-8 -*-
from logging import getLogger
from django.dispatch.dispatcher import receiver
from .apps import AppConfig
from .tasks import process_item
from powerapp.core.todoist_utils import get_personal_project
logger = getLogger(__name__)
PROJECT_NAME = 'Pocket task list'
@receiver(AppConfig.signals.todoist_task_added)
@receiver(AppConfig.signals.todoist_task_updated)
def on_task_added_edited(sender, integration=None, obj=None, **kw):
project = get_personal_project(integration, PROJECT_NAME)
if obj['project_id'] == project['id'] and not obj['checked']:
process_item.delay(integration.id, obj['content'], obj['id'])
|
[
"roman.imankulov@gmail.com"
] |
roman.imankulov@gmail.com
|
6dde344d937bbde2680824f18e8a250bc34d3438
|
7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0
|
/0601-0700/0606-Construct String from Binary Tree/0606-Construct String from Binary Tree.py
|
671610319831f8a2afe833ffb4c97b48a252abe7
|
[
"MIT"
] |
permissive
|
jiadaizhao/LeetCode
|
be31bd0db50cc6835d9c9eff8e0175747098afc6
|
4ddea0a532fe7c5d053ffbd6870174ec99fc2d60
|
refs/heads/master
| 2021-11-05T04:38:47.252590
| 2021-10-31T09:54:53
| 2021-10-31T09:54:53
| 99,655,604
| 52
| 28
|
MIT
| 2020-10-02T12:47:47
| 2017-08-08T05:57:26
|
C++
|
UTF-8
|
Python
| false
| false
| 467
|
py
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def tree2str(self, t: TreeNode) -> str:
if t is None:
return ''
left = '({})'.format(self.tree2str(t.left)) if t.left or t.right else ''
right = '({})'.format(self.tree2str(t.right)) if t.right else ''
return '{}{}{}'.format(str(t.val), left, right)
|
[
"jiadaizhao@gmail.com"
] |
jiadaizhao@gmail.com
|
3e1f295e6825076419cc4b9a199f74184b8640d6
|
b6cdef81a572e02c0cbd795a8fb6bbc74f99d627
|
/market/urls.py
|
638b0bfb0222ca32b774f2061af49d1e112a6482
|
[
"MIT"
] |
permissive
|
sodatta/Stocks-Screener
|
4afbdd68c1e80dafece50e3e0b967af35dd83c07
|
0b8da91da40b715beaf3a79163b1bdf6ea3be3b9
|
refs/heads/master
| 2023-07-27T13:14:47.798403
| 2021-05-03T20:04:51
| 2021-05-03T20:04:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 596
|
py
|
from django.urls import path
from market.views import *
app_name = "market"
urlpatterns = [
path('list', list_stocks, name='list_stocks'),
path('create', create_stock, name='create_stock'),
path('update/<int:pk>', update_stock, name='update_stock'),
path('delete/<int:pk>', delete_stock, name='delete_stock'),
path('<int:pk>', view_stock, name='view_stock'),
path('stock_weekly/<int:pk>', view_stock_weekly, name='view_stock_weekly'),
path('stock_monthly/<int:pk>', view_stock_monthly, name='view_stock_monthly'),
path('sectors', sectors, name='sectors'),
]
|
[
"mohammedshokr2014@gmail.com"
] |
mohammedshokr2014@gmail.com
|
0235ff898c02b1ea6cffade6cdbd16bf275f62db
|
054ddbc1fa0e1b1d0a999bbe877591e942aa0f12
|
/python/02-python高级-2/07-调试程序.py
|
fe40b41d92b7c298c62ebdd30a2a6a9627d046fd
|
[] |
no_license
|
VinceBy/newone
|
66c8cf77159344c7d2ec196233d58a412e1c3073
|
ffc6a0d9ccbdb3f66c4995834f01e3bc2df0415d
|
refs/heads/master
| 2022-02-22T23:00:21.720497
| 2019-07-09T08:47:25
| 2019-07-09T08:47:25
| 195,958,240
| 0
| 0
| null | 2022-02-12T09:19:32
| 2019-07-09T07:42:20
|
Python
|
UTF-8
|
Python
| false
| false
| 337
|
py
|
#coding=utf-8
import pdb
def add3Nums(a1,a2,a3):
result = a1+a2+a3
return result
def get3NumsAvarage(s1,s2):
s3 = s1 + s2 + s1
result = 0
result = add3Nums(s1,s2,s3)/3
return result
if __name__ == '__main__':
a = 11
# pdb.set_trace()
b = 12
final = get3NumsAvarage(a,b)
print final
|
[
"1260787968@qq.com"
] |
1260787968@qq.com
|
1dfa288faab5484bb37471149f7533c066d2365c
|
b37e40e4dc4ad3fc9c317b68284fb86955a64bf5
|
/a1/venv/lib/python3.6/site-packages/probability/distributions/functions/discrete_function_1d.py
|
604d6e50deadbe409568e07b3f193e7efeeefe6d
|
[] |
no_license
|
bateikoEd/Text-mining
|
89c57dec1b6ffcae82a49fc6a23acab89d7ca60e
|
ccd3f50b694b90269450e5304c504fac5a117f59
|
refs/heads/master
| 2021-01-14T18:02:35.642837
| 2021-01-13T14:04:56
| 2021-01-13T14:04:56
| 242,695,063
| 0
| 1
| null | 2020-05-20T09:46:22
| 2020-02-24T09:28:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,324
|
py
|
from matplotlib.axes import Axes
from pandas import Series
from scipy.stats import rv_discrete
from typing import Iterable, overload
from probability.distributions.mixins.plottable_mixin import PlottableMixin
from probability.plots import new_axes
class DiscreteFunction1d(object):
def __init__(self, distribution: rv_discrete, method_name: str, name: str,
parent: PlottableMixin):
self._distribution = distribution
self._method_name: str = method_name
self._name: str = name
self._method = getattr(distribution, method_name)
self._parent: PlottableMixin = parent
@overload
def at(self, k: int) -> int:
pass
@overload
def at(self, k: Iterable[int]) -> Series:
pass
def at(self, k):
"""
Evaluation of the function for each value of k.
"""
if isinstance(k, int):
return self._method(k)
elif isinstance(k, Iterable):
return Series(index=k, data=self._method(k), name=self._name)
def plot(self, k: Iterable[int], color: str = 'C0', kind: str = 'bar', ax: Axes = None,
**kwargs) -> Axes:
"""
Plot the function.
:param k: Range of values of k to plot p(k) over.
:param color: Optional color for the series.
:param kind: Kind of plot e.g. 'bar', 'line'.
:param ax: Optional matplotlib axes to plot on.
:param kwargs: Additional arguments for the matplotlib plot function.
"""
data: Series = self.at(k)
ax = ax or new_axes()
# special kwargs
vlines = None
if 'vlines' in kwargs.keys():
vlines = kwargs.pop('vlines')
if self._name == 'PMF':
data.plot(kind=kind, label=self._parent.label, color=color,
ax=ax, **kwargs)
elif self._name == 'CDF':
data.plot(kind='line', label=self._parent.label, color=color,
drawstyle='steps-post', ax=ax,
**kwargs)
else:
raise ValueError('plot not implemented for {}'.format(self._name))
if vlines:
ax.vlines(x=k, ymin=0, ymax=data.values, color=color)
ax.set_xlabel(self._parent.x_label)
ax.set_ylabel(self._name)
return ax
|
[
"bateiko0713@gmail.com"
] |
bateiko0713@gmail.com
|
28d07593900fbe9c33f4f5c559e0ca97f30e0ba5
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/DatadigitalFincloudFinsaasDigitalrmbSendSyncResponse.py
|
230307b7cea48730702e6bdd98d81bd253790e93
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 497
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class DatadigitalFincloudFinsaasDigitalrmbSendSyncResponse(AlipayResponse):
def __init__(self):
super(DatadigitalFincloudFinsaasDigitalrmbSendSyncResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(DatadigitalFincloudFinsaasDigitalrmbSendSyncResponse, self).parse_response_content(response_content)
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
7158e87783927fb9354ce9b8d81f6f0ec4788c5e
|
2fcc860bcc2c76f91e899de880e4c6d8a4b1abfb
|
/spider/users/tests/test_forms.py
|
caca2bfdb6e0549bfa0f222376dd815227564165
|
[
"MIT"
] |
permissive
|
dcopm999/spider
|
54de2e10def4db662ce15f4e4fd3682927f2999b
|
889d2ee6d3d5f54ccb1f22a645244bc7395bb202
|
refs/heads/master
| 2022-12-24T23:15:32.343792
| 2020-10-02T14:33:45
| 2020-10-02T14:33:45
| 300,559,008
| 0
| 0
|
MIT
| 2020-10-02T14:33:46
| 2020-10-02T09:03:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,111
|
py
|
import pytest
from spider.users.forms import UserCreationForm
from spider.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
def test_clean_username(self):
# A user with proto_user params does not exist yet.
proto_user = UserFactory.build()
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert form.is_valid()
assert form.clean_username() == proto_user.username
# Creating a user.
form.save()
# The user with proto_user params already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
|
[
"dcopm999@gmail.com"
] |
dcopm999@gmail.com
|
f3ccf27cfb27169b20dc78aba448e82e482efb89
|
a7371301cea503bacc82619fde651aac85f2738b
|
/mccabe_group/scripts/test/modify_water_xml.py
|
171abb566e3b394533209315a25f7a6a686931f5
|
[] |
no_license
|
PTC-CMC/McCabeGroup
|
b9931a022d0af52aeb2891b548c6294ebc0ab7b7
|
19e0578e91fc82cc24e974b9cc2e3b6a6722d36b
|
refs/heads/master
| 2021-06-11T05:05:53.034326
| 2020-03-24T01:40:05
| 2020-03-24T01:40:05
| 128,414,852
| 1
| 3
| null | 2020-03-06T17:46:44
| 2018-04-06T15:47:17
|
Python
|
UTF-8
|
Python
| false
| false
| 3,192
|
py
|
import pdb
from lxml import etree
import numpy as np
#KJ_TO_KCAL = 0.239
#NM_TO_A = 10
# Units from the chodera group were in agreement with foyer/openmm units
# leave everything in terms of kj and nm, but I'm too lazy to modify the
# functions below, so i'm leaving the conversions here
KJ_TO_KCAL = 1
NM_TO_A = 1
# This code is supposed to modify the charmm36.xml code from the Chodera group
# In a format suitable for Foyer
# Note that this xml has some additional atomtypes specified from Tim's parameters
#tree = etree.parse('charmm36_nowaters.xml')
tree = etree.parse('waters_ions_default.xml')
root = tree.getroot()
# All the relevant XML elements
atomTypes = root.findall('AtomTypes')[0]
harmonicBondForce = root.findall('HarmonicBondForce')[0]
harmonicAngleForce = root.findall('HarmonicAngleForce')[0]
nonbondedForce = root.findall('NonbondedForce')[0]
lennardJonesForce = root.findall("LennardJonesForce")[0]
new_root = etree.Element('ForceField')
# Atomtypes
for type_element in atomTypes:
# Need to add underscores for all elements and definitions
# This is to avoid having to use SMARTS to atomtype a system
# Similar to CG methodology for forcefields
if type_element.attrib['name'] != 'ZN':
type_element.attrib['def'] = "[_{}]".format(type_element.attrib['name'])
type_element.attrib['element'] = "_{}".format(type_element.attrib['name'])
else:
atomTypes.remove(type_element)
new_root.append(atomTypes)
# Bonds
for bond_element in harmonicBondForce:
# Do unit conversions for them all
bond_element.attrib['k'] = "{:15.5f}".format( KJ_TO_KCAL * (NM_TO_A **-2) *\
float(bond_element.attrib['k'])).strip()
bond_element.attrib['length']="{:7.4f}".format(NM_TO_A * \
float(bond_element.attrib['length'])).strip()
new_root.append(harmonicBondForce)
# Angles
for angle_element in harmonicAngleForce:
angle_element.attrib['k'] = "{:15.5f}".format(KJ_TO_KCAL * \
float(angle_element.attrib['k'])).strip()
angle_element.attrib['angle'] = "{:15.5f}".format(\
float(angle_element.attrib['angle'])).strip()
new_root.append(harmonicAngleForce)
# LJ force terms move into the nonbondedforce terms
for nonbond_element in nonbondedForce:
# Look through each nonbonded force
if nonbond_element.tag != "UseAttributeFromResidue" and \
nonbond_element.attrib['type'] != "ZN":
for lj_element in lennardJonesForce:
# Find the lennard jones force with the associated type
if nonbond_element.tag=="Atom" and lj_element.tag=="Atom":
if nonbond_element.attrib['type'] == lj_element.attrib['type']:
nonbond_element.attrib['sigma'] = lj_element.attrib['sigma']
nonbond_element.attrib['epsilon'] = lj_element.attrib['epsilon']
nonbond_element.attrib['charge'] = "0.0"
else:
nonbondedForce.remove(nonbond_element)
new_root.append(nonbondedForce)
# Construct tree and save
new_tree = etree.ElementTree(new_root)
new_tree.write("foyer_water.xml", pretty_print=True)
|
[
"ahy3nz@virginia.edu"
] |
ahy3nz@virginia.edu
|
677aa3639fe10a720b5b170c8406f7544d2a8ab4
|
64832dd1e64cfc6de83c5abf099461eda50f648c
|
/major/migrations/0006_auto_20190212_1502.py
|
9a52e37c1930c929474d4d021feb9c920e6e2d76
|
[] |
no_license
|
showzvan/mysite
|
d7e79e16eb8c2d3598d00d1a96fa0b1940765913
|
32826c83915a2f95440c04ed20a7800d2c343ac1
|
refs/heads/master
| 2020-04-27T17:05:19.143296
| 2019-03-08T09:09:45
| 2019-03-08T09:09:45
| 174,504,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
# Generated by Django 2.1.5 on 2019-02-12 15:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('major', '0005_auto_20190212_1457'),
]
operations = [
migrations.AlterModelOptions(
name='majors',
options={'ordering': ['major_name'], 'verbose_name': '专业信息', 'verbose_name_plural': '专业信息'},
),
]
|
[
"1183005957@qq.com"
] |
1183005957@qq.com
|
3d9532f5a3b3bbc62b89f781bbac4a6bf43b70d0
|
4b0e7c654249cc098ed2230f16e5a19a9329ca4b
|
/Day3/whileExample.py
|
3d9dde6bc49db6e3f2a7ceffb0e1a7139ec1ea48
|
[] |
no_license
|
aravindanath/rio_de_janeiro
|
5a1d41e4ff4201f2b6be2f81437376c01ae30238
|
43a6c0f704ff5d55256705da13057d1532510f16
|
refs/heads/master
| 2020-07-24T13:40:55.655457
| 2019-12-02T02:47:16
| 2019-12-02T02:47:16
| 207,945,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
empty=[]
tup= (1,2,3,55.3,3)
x = 0
while x<=10:
print(x)
empty.append(x)
x=x+3
# break
else:
print("Out of loop!..")
print(empty)
empty.sort(reverse=True)
print(empty)
empty.pop()
print(empty)
|
[
"aravindanath86@gmail.com"
] |
aravindanath86@gmail.com
|
05f77bf55f362c947875830112ac1b7baf13a757
|
118f14634ea34b6301f076cc0137272e2a683322
|
/store/migrations/0002_store.py
|
5faf770281abebb25befe598b0d8fbf90754fb2f
|
[
"MIT"
] |
permissive
|
vollov/ecomstore
|
10fce347c100fcbaa86687a5738dcf57dba05fc9
|
ba9d7e6d74b29de2e3e9411a481248afdc172a40
|
refs/heads/master
| 2021-01-09T21:54:21.976982
| 2015-10-28T20:31:07
| 2015-10-28T20:31:07
| 45,086,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,128
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Store',
fields=[
('id', models.CharField(default=uuid.uuid4, max_length=64, serialize=False, verbose_name='Activation key', primary_key=True)),
('name', models.CharField(max_length=60, unique=True, null=True)),
('code', models.CharField(max_length=10, unique=True, null=True)),
('currency_rate', models.DecimalField(default=5.0, max_digits=9, decimal_places=2, blank=True)),
('tax_rate', models.DecimalField(default=0.13, max_digits=9, decimal_places=2, blank=True)),
('agent_share', models.DecimalField(default=0.4, max_digits=9, decimal_places=2, blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=True)),
],
),
]
|
[
"dike.zhang@gmail.com"
] |
dike.zhang@gmail.com
|
a826b762da02774c07c94c4213076ad10aa14a12
|
ee9a391b2023ec3d02dfa44a00e9b219a56c1890
|
/exercises/timers_ex1.py
|
f93d350c0d02c0795a340ef5fbc0a32cbaea678c
|
[
"Unlicense"
] |
permissive
|
kotalbert/interprog1
|
b12b5f281bd93440818275b6da6a987e206e1cec
|
a49ecef14453839518f1e8a6551fb3af493b1c2c
|
refs/heads/master
| 2021-05-29T01:51:13.758768
| 2015-03-12T15:38:34
| 2015-03-12T15:38:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
"""
An Introduction to Interactive Programming in Python (Part 1)
Practice exercises for timers # 1.
Counter to console.
"""
# Counter ticks
###################################################
# Student should add code where relevant to the following.
import simpleguitk as simplegui
counter = 0
# Timer handler
def tick():
global counter
print counter
counter += 1
# create timer
timer = simplegui.create_timer(1000, tick)
frame = simplegui.create_frame("Ticks.", 200, 200)
timer.start()
frame.start()
|
[
"pawel@daniluk.waw.pl"
] |
pawel@daniluk.waw.pl
|
85ed587604b914ef8a8d2922a8737395eb48e553
|
946ff3fa181aa5ebb3e4f8d4bb7c15b6f6fe63a9
|
/tests/algorithms/conftest.py
|
5686332d238c67925b0c84ee05c1ed34a4376821
|
[
"BSD-3-Clause"
] |
permissive
|
VIDA-NYU/openclean-metanome
|
9908e725d482fab12903e9e87307c5ddf06590c8
|
37948eb25142ed4ba884fc07bfe0cad5666769e8
|
refs/heads/master
| 2023-06-04T15:42:02.600499
| 2021-06-16T08:38:54
| 2021-06-16T08:38:54
| 294,754,491
| 1
| 0
|
BSD-3-Clause
| 2021-06-16T08:38:54
| 2020-09-11T17:02:45
|
Python
|
UTF-8
|
Python
| false
| false
| 641
|
py
|
# This file is part of the Data Cleaning Library (openclean).
#
# Copyright (C) 2018-2021 New York University.
#
# openclean is released under the Revised BSD License. See file LICENSE for
# full license details.
"""Fixtures for Metanome algorithm unit tests."""
import pandas as pd
import pytest
from openclean.data.types import Column
@pytest.fixture
def dataset():
"""Simple pandas data frame with one row and three columns."""
return pd.DataFrame(
data=[[1, 2, 3]],
columns=[
Column(colid=1, name='A'),
Column(colid=2, name='B'),
Column(colid=3, name='C')
]
)
|
[
"heiko.muller@gmail.com"
] |
heiko.muller@gmail.com
|
f672cb05e879429500f154f105cba25f6049575f
|
87e6c47bf745f0b00de2dd9bfe4d4c202cd25def
|
/projects/vcf/__main__.py
|
6d2f55cc310ca95d19a815a07c0850ab7de4803d
|
[
"Apache-2.0"
] |
permissive
|
Scsabiii/vcf-automation
|
16b32fabf63b63022bcf95f12d22e62d11bd5478
|
b76bba2b7d490bc7c1581c9f8d75c4052baaa4e9
|
refs/heads/master
| 2023-06-11T20:36:50.321149
| 2021-07-07T08:50:40
| 2021-07-07T08:50:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,641
|
py
|
"""An OpenStack Python Pulumi program"""
import pulumi
from pulumi_openstack import Provider
from vcf import ManagementStack, WorkloadStack
# stack
config = pulumi.Config()
stack_name = pulumi.get_stack()
stack_type = config.require("stackType")
###################################################################################
# ccadmin/cloud_admin and ccadmin/master provider
###################################################################################
openstack_config = pulumi.Config("openstack")
auth_url = openstack_config.require("authUrl")
region = openstack_config.require("region")
user_name = openstack_config.require("userName")
password = openstack_config.require_secret("password")
provider_cloud_admin = Provider(
"cloud_admin",
user_name=user_name,
password=password,
auth_url=auth_url,
insecure=True,
project_domain_name="ccadmin",
user_domain_name="ccadmin",
tenant_name="cloud_admin",
)
provider_ccadmin_master = Provider(
"ccadmin_master",
user_name=user_name,
password=password,
auth_url=auth_url,
insecure=True,
project_domain_name="ccadmin",
user_domain_name="ccadmin",
tenant_name="master",
)
###################################################################################
# provision
###################################################################################
if stack_type == "management":
ms = ManagementStack(provider_cloud_admin, provider_ccadmin_master)
ms.provision()
exit(0)
if stack_type == "workload":
ws = WorkloadStack(provider_cloud_admin, provider_ccadmin_master)
ws.provision()
exit(0)
|
[
"chuan137@gmail.com"
] |
chuan137@gmail.com
|
043e2c0866d46c3ddd37eb76fe44483a7ca5e30b
|
aa4024b6a846d2f6032a9b79a89d2e29b67d0e49
|
/UMLRT2Kiltera_MM/StateMachineElement.py
|
9e700e1b1b320c32953b3d9ae77e4000af6b9adb
|
[
"MIT"
] |
permissive
|
levilucio/SyVOLT
|
41311743d23fdb0b569300df464709c4954b8300
|
0f88827a653f2e9d3bb7b839a5253e74d48379dc
|
refs/heads/master
| 2023-08-11T22:14:01.998341
| 2023-07-21T13:33:36
| 2023-07-21T13:33:36
| 36,246,850
| 3
| 2
|
MIT
| 2023-07-21T13:33:39
| 2015-05-25T18:15:26
|
Python
|
UTF-8
|
Python
| false
| false
| 3,875
|
py
|
"""
__StateMachineElement.py_____________________________________________________
Automatically generated AToM3 syntactic object (DO NOT MODIFY DIRECTLY)
Author: gehan
Modified: Sat Aug 30 18:23:40 2014
_____________________________________________________________________________
"""
from ASGNode import *
from ATOM3Type import *
from ATOM3String import *
from graph_StateMachineElement import *
class StateMachineElement(ASGNode, ATOM3Type):
def __init__(self, parent = None):
ASGNode.__init__(self)
ATOM3Type.__init__(self)
self.superTypes = ['NamedElement', 'MetaModelElement_S']
self.graphClass_ = graph_StateMachineElement
self.isGraphObjectVisual = True
if(hasattr(self, '_setHierarchicalLink')):
self._setHierarchicalLink(False)
if(hasattr(self, '_setHierarchicalNode')):
self._setHierarchicalNode(False)
self.parent = parent
self.cardinality=ATOM3String('1', 20)
self.cardinality=ATOM3String('1', 20)
self.cardinality=ATOM3String('1', 20)
self.classtype=ATOM3String('t_', 20)
self.classtype=ATOM3String('t_', 20)
self.classtype=ATOM3String('t_', 20)
self.name=ATOM3String('s_', 20)
self.name=ATOM3String('s_', 20)
self.name=ATOM3String('s_', 20)
self.generatedAttributes = {'cardinality': ('ATOM3String', ),
'cardinality': ('ATOM3String', ),
'cardinality': ('ATOM3String', ),
'classtype': ('ATOM3String', ),
'classtype': ('ATOM3String', ),
'classtype': ('ATOM3String', ),
'name': ('ATOM3String', ),
'name': ('ATOM3String', ),
'name': ('ATOM3String', ) }
self.realOrder = ['cardinality','cardinality','cardinality','classtype','classtype','classtype','name','name','name']
self.directEditing = [1,1,1,1,1,1,1,1,1]
def clone(self):
cloneObject = StateMachineElement( self.parent )
for atr in self.realOrder:
cloneObject.setAttrValue(atr, self.getAttrValue(atr).clone() )
ASGNode.cloneActions(self, cloneObject)
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
for atr in self.realOrder:
self.setAttrValue(atr, other.getAttrValue(atr) )
ASGNode.copy(self, other)
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postCondition(actionID, params)
else: return None
def preAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preAction(actionID, params)
else: return None
def postAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postAction(actionID, params)
else: return None
def QOCA(self, params):
"""
QOCA Constraint Template
NOTE: DO NOT select a POST/PRE action trigger
Constraints will be added/removed in a logical manner by other mechanisms.
"""
return # <---- Remove this to use QOCA
""" Get the high level constraint helper and solver """
from Qoca.atom3constraints.OffsetConstraints import OffsetConstraints
oc = OffsetConstraints(self.parent.qocaSolver)
"""
Example constraint, see Kernel/QOCA/atom3constraints/OffsetConstraints.py
For more types of constraints
"""
oc.fixedWidth(self.graphObject_, self.graphObject_.sizeX)
oc.fixedHeight(self.graphObject_, self.graphObject_.sizeY)
|
[
"levi"
] |
levi
|
385b947796cd7f565fd783fac4dea3070265ab44
|
f9609ff4f2bbea570f3cb4cd3f9fe6b3595d4145
|
/commands/cmd_pick.py
|
bc62e4f118aaf1f658d5fc3efcd20ce0ae76c432
|
[] |
no_license
|
VladThePaler/PythonWars-1996
|
2628bd2fb302faacc91688ad942799537c974f50
|
d8fbc27d90f1deb9755c0ad0e1cf2c110f406e28
|
refs/heads/master
| 2023-05-08T19:51:28.586440
| 2021-05-14T04:19:17
| 2021-05-14T04:19:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,025
|
py
|
# PythonWars copyright © 2020, 2021 by Paul Penner. All rights reserved.
# In order to use this codebase you must comply with all licenses.
#
# Original Diku Mud copyright © 1990, 1991 by Sebastian Hammer,
# Michael Seifert, Hans Henrik Stærfeldt, Tom Madsen, and Katja Nyboe.
#
# Merc Diku Mud improvements copyright © 1992, 1993 by Michael
# Chastain, Michael Quan, and Mitchell Tse.
#
# GodWars improvements copyright © 1995, 1996 by Richard Woolcock.
#
# ROM 2.4 is copyright 1993-1998 Russ Taylor. ROM has been brought to
# you by the ROM consortium: Russ Taylor (rtaylor@hypercube.org),
# Gabrielle Taylor (gtaylor@hypercube.org), and Brian Moore (zump@rom.org).
#
# Ported to Python by Davion of MudBytes.net using Miniboa
# (https://code.google.com/p/miniboa/).
#
# In order to use any part of this Merc Diku Mud, you must comply with
# both the original Diku license in 'license.doc' as well the Merc
# license in 'license.txt'. In particular, you may not remove either of
# these copyright notices.
#
# Much time and thought has gone into this software, and you are
# benefiting. We hope that you share your changes too. What goes
# around, comes around.
import const
import game_utils
import handler_game
import handler_room
import instance
import interp
import merc
import state_checks
def cmd_pick(ch, argument):
argument, arg = game_utils.read_word(argument)
if not arg:
ch.send("Pick what?\n")
return
ch.wait_state(const.skill_table["pick lock"].beats)
# look for guards
for gch_id in ch.in_room.people:
gch = instance.characters[gch_id]
if gch.is_npc() and gch.is_awake() and ch.level + 5 < gch.level:
handler_game.act("$N is standing too close to the lock.", ch, None, gch, merc.TO_CHAR)
return
if not ch.is_npc() and game_utils.number_percent() > ch.learned["pick lock"]:
ch.send("You failed.\n")
return
item = ch.get_item_here(arg)
if item:
# 'pick object'
if item.item_type != merc.ITEM_CONTAINER:
ch.send("That's not a container.\n")
return
if not state_checks.is_set(item.value[1], merc.CONT_CLOSED):
ch.send("It's not closed.\n")
return
if item.value < 0:
ch.send("It can't be unlocked.\n")
return
if not state_checks.is_set(item.value[1], merc.CONT_LOCKED):
ch.send("It's already unlocked.\n")
return
if state_checks.is_set(item.value[1], merc.CONT_PICKPROOF):
ch.send("You failed.\n")
return
state_checks.remove_bit(item.value[1], merc.CONT_LOCKED)
ch.send("*Click*\n")
handler_game.act("$n picks $p.", ch, item, None, merc.TO_ROOM)
return
door = handler_room.find_door(ch, arg)
if door >= 0:
# 'pick door'
pexit = ch.in_room.exit[door]
if not pexit.exit_info.is_set(merc.EX_CLOSED):
ch.send("It's not closed.\n")
return
if pexit.key < 0:
ch.send("It can't be picked.\n")
return
if not pexit.exit_info.is_set(merc.EX_LOCKED):
ch.send("It's already unlocked.\n")
return
if pexit.exit_info.is_set(merc.EX_PICKPROOF):
ch.send("You failed.\n")
return
pexit.exit_info.rem_bit(merc.EX_LOCKED)
ch.send("*Click*\n")
handler_game.act("$n picks the $d.", ch, None, pexit.keyword, merc.TO_ROOM)
# pick the other side
to_room = instance.rooms[pexit.to_room]
if to_room and to_room.exit[merc.rev_dir[door]] != 0 and to_room.exit[merc.rev_dir[door]].to_room == ch.in_room:
to_room.exit[merc.rev_dir[door]].exit_info.rem_bit(merc.EX_LOCKED)
interp.register_command(
interp.CmdType(
name="pick",
cmd_fun=cmd_pick,
position=merc.POS_SITTING, level=0,
log=merc.LOG_NORMAL, show=True,
default_arg=""
)
)
|
[
"jindrak@gmail.com"
] |
jindrak@gmail.com
|
74bf53139b3f8bfc328dc22a1fe0d0984f23aa1e
|
48983b88ebd7a81bfeba7abd6f45d6462adc0385
|
/HakerRank/algorithms/warmup/staircase.py
|
f66d8b95f9711713c81d74b72fc3f7ae9288baed
|
[] |
no_license
|
lozdan/oj
|
c6366f450bb6fed5afbaa5573c7091adffb4fa4f
|
79007879c5a3976da1e4713947312508adef2e89
|
refs/heads/master
| 2018-09-24T01:29:49.447076
| 2018-06-19T14:33:37
| 2018-06-19T14:33:37
| 109,335,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
py
|
# author: Daniel Lozano
# source: HackerRank ( https://www.hackerrank.com )
# problem name: Algorithms: Warmup: Staircase
# problem url: https://www.hackerrank.com/challenges/staircase/problem
# date: 7/10/2017
n = int(input())
for i in range(n):
print(" " * (n-1-i) + '#' * (i+1))
|
[
"lozanodaniel02@gmail.com"
] |
lozanodaniel02@gmail.com
|
9ac452966e223226c3e1db9ec7adaaf16dfac7a3
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_34/108.py
|
8e333fdaa30e655bb563ae427b87024a7b16ab27
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,289
|
py
|
#!/usr/bin/env python
def main():
dict = [];
fin = open("A-large.in","r");
fout = open("output.out","w");
st = [];
st = (fin.readline().split());
L = int(st[0]);
# print L;
D = int(st[1]);
N = int(st[2]);
for i in range(D):
dict.append(fin.readline());
flag = 0;
for i in range(N):
str = fin.readline()[:-1];
word = [];
# print str.__len__();
for j in range(str.__len__()):
if str[j] == '(':
flag = 1;
word.append(set());
elif str[j] == ')':
flag = 0;
else:
if flag == 0:
word.append(set(str[j]));
else:
word[word.__len__()-1].add(str[j]);
# print word.__len__();
if word.__len__() != L:
fout.write("Case #{0}: 0\n".format(i+1));
continue;
ans = 0;
for d in range(D):
success = 1;
for j in range(L):
if dict[d][j] not in word[j]:
success = 0;
break;
if success == 1:
ans += 1;
fout.write("Case #{0}: {1}\n".format(i+1, ans));
if __name__ == '__main__':
main();
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
c79ad8ea925dbcbafa6913dc553dcbc36d642ec6
|
8ac3fe3d861a222210912a02effea2110456d052
|
/django_for_beginners/project_5_newspaper_app/project_5_newspaper_app/wsgi.py
|
cca475eb69cb94b0610ce991e98ef571d9d83dc4
|
[
"MIT"
] |
permissive
|
rednafi/django-unchained
|
40446960f52f0c905a6ba3e318154ca11a31188b
|
0f71c8d056699496d4af3ab049f9b2f9d057486b
|
refs/heads/master
| 2022-12-10T10:11:52.906880
| 2020-09-01T17:43:58
| 2020-09-01T17:43:58
| 282,356,752
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
"""
WSGI config for project_5_newspaper_app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project_5_newspaper_app.settings")
application = get_wsgi_application()
|
[
"redowan.nafi@gmail.com"
] |
redowan.nafi@gmail.com
|
bea4cd0cbd5c164f7b157b32e2a4113a59ad3e38
|
228ebc9fb20f25dd3ed2a6959aac41fd31314e64
|
/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py
|
bd52a0e808e5760a15e7a8f060d804b2c5a3c921
|
[
"Apache-2.0"
] |
permissive
|
orionnye/python-aiplatform
|
746e3df0c75025582af38223829faeb2656dc653
|
e3ea683bf754832340853a15bdb0a0662500a70f
|
refs/heads/main
| 2023-08-03T06:14:50.689185
| 2021-09-24T03:24:14
| 2021-09-24T03:24:14
| 410,091,957
| 1
| 0
|
Apache-2.0
| 2021-09-24T20:21:01
| 2021-09-24T20:21:00
| null |
UTF-8
|
Python
| false
| false
| 1,485
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition",
manifest={"AutoMlTextClassification", "AutoMlTextClassificationInputs",},
)
class AutoMlTextClassification(proto.Message):
r"""A TrainingJob that trains and uploads an AutoML Text
Classification Model.
Attributes:
inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTextClassificationInputs):
The input parameters of this TrainingJob.
"""
inputs = proto.Field(
proto.MESSAGE, number=1, message="AutoMlTextClassificationInputs",
)
class AutoMlTextClassificationInputs(proto.Message):
r"""
Attributes:
multi_label (bool):
"""
multi_label = proto.Field(proto.BOOL, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"noreply@github.com"
] |
orionnye.noreply@github.com
|
c00fd188b7a82678a3bc54928e908cf3ac38606d
|
0fa3ad9a3d14c4b7a6cb44833795449d761b3ffd
|
/day13_all/day13/exercise01.py
|
3be55a1b970b4e3020ba5ee3075394ff7c75dee3
|
[] |
no_license
|
dalaAM/month-01
|
3426f08237a895bd9cfac029117c70b50ffcc013
|
e4b4575ab31c2a2962e7c476166b4c3fbf253eab
|
refs/heads/master
| 2022-11-22T23:49:43.037014
| 2020-07-24T07:37:35
| 2020-07-24T07:37:35
| 282,154,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,049
|
py
|
"""
重写下列类型的str方法,体会重写
"""
class Employee:
def __init__(self, eid, did, name, money):
self.eid = eid
self.did = did
self.name = name
self.money = money
def __str__(self):
return f"{self.name}的员工编号是{self.eid},部门编号是{self.did},月薪是{self.money}"
class Department:
def __init__(self, did, title):
self.did = did
self.title = title
def __str__(self):
return f"{self.title}的编号是{self.did},"
class Skill:
def __init__(self, name="", atk_rate=0, cost_sp=0, duration=0):
self.name = name
self.atk_rate = atk_rate
self.cost_sp = cost_sp
self.duration = duration
def __str__(self):
return f"{self.name}的攻击率{self.atk_rate}需要sp{self.cost_sp},持续时间{self.duration}"
e01 = Employee(1001, 9002, "师父", 60000)
print(e01) # e01.__str__()
d01 = Department(9001, "教学部")
print(d01)
s01 = Skill("乾坤大挪移", 1, -10)
print(s01)
|
[
"1105504468@qq.com"
] |
1105504468@qq.com
|
240fb61b081a081b9f3c81c6e1a9ad742e59c0bf
|
1bfad01139237049eded6c42981ee9b4c09bb6de
|
/RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/bgp/ipv4multicast.py
|
2baf3dbdd4ebc6ddbc3f29808a704fecd9fbf9e5
|
[
"MIT"
] |
permissive
|
kakkotetsu/IxNetwork
|
3a395c2b4de1488994a0cfe51bca36d21e4368a5
|
f9fb614b51bb8988af035967991ad36702933274
|
refs/heads/master
| 2020-04-22T09:46:37.408010
| 2019-02-07T18:12:20
| 2019-02-07T18:12:20
| 170,284,084
| 0
| 0
|
MIT
| 2019-02-12T08:51:02
| 2019-02-12T08:51:01
| null |
UTF-8
|
Python
| false
| false
| 7,158
|
py
|
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Ipv4Multicast(Base):
"""The Ipv4Multicast class encapsulates a system managed ipv4Multicast node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the Ipv4Multicast property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server by using the find method.
"""
_SDM_NAME = 'ipv4Multicast'
def __init__(self, parent):
super(Ipv4Multicast, self).__init__(parent)
@property
def AsPath(self):
"""Indicates the local IP address of the BGP router.
Returns:
str
"""
return self._get_attribute('asPath')
@property
def BlockOffset(self):
"""The label block offset (VBO) is the value used to help define this specific label block uniquely as a subset of all of the possible labels.
Returns:
number
"""
return self._get_attribute('blockOffset')
@property
def BlockSize(self):
"""The size of the label block, in bytes.
Returns:
number
"""
return self._get_attribute('blockSize')
@property
def ControlWordEnabled(self):
"""If true, the route label uses a control word, as part of the extended community information. (One of the control flags.)
Returns:
bool
"""
return self._get_attribute('controlWordEnabled')
@property
def IpPrefix(self):
"""The route IP prefix.
Returns:
str
"""
return self._get_attribute('ipPrefix')
@property
def LabelBase(self):
"""The first label to be assigned to the FEC.
Returns:
number
"""
return self._get_attribute('labelBase')
@property
def LocalPreference(self):
"""Indicates the value of the local preference attribute.
Returns:
number
"""
return self._get_attribute('localPreference')
@property
def MaxLabel(self):
"""The last label to use.
Returns:
number
"""
return self._get_attribute('maxLabel')
@property
def MultiExitDiscriminator(self):
"""A metric field of the route file.
Returns:
number
"""
return self._get_attribute('multiExitDiscriminator')
@property
def Neighbor(self):
"""The descriptive identifier for the BGP neighbor.
Returns:
str
"""
return self._get_attribute('neighbor')
@property
def NextHop(self):
"""A 4-octet IP address which indicates the next hop.
Returns:
str
"""
return self._get_attribute('nextHop')
@property
def OriginType(self):
"""An indication of where the route entry originated.
Returns:
str
"""
return self._get_attribute('originType')
@property
def PrefixLength(self):
"""The length of the route IP prefix, in bytes.
Returns:
number
"""
return self._get_attribute('prefixLength')
@property
def RouteDistinguisher(self):
"""The route distinguisher for the route, for use with IPv4 and IPv6 MPLS VPN address types.
Returns:
str
"""
return self._get_attribute('routeDistinguisher')
@property
def SeqDeliveryEnabled(self):
"""Indicates if sequential delivery is enabled.
Returns:
bool
"""
return self._get_attribute('seqDeliveryEnabled')
@property
def SiteId(self):
"""The site ID.
Returns:
number
"""
return self._get_attribute('siteId')
def find(self, AsPath=None, BlockOffset=None, BlockSize=None, ControlWordEnabled=None, IpPrefix=None, LabelBase=None, LocalPreference=None, MaxLabel=None, MultiExitDiscriminator=None, Neighbor=None, NextHop=None, OriginType=None, PrefixLength=None, RouteDistinguisher=None, SeqDeliveryEnabled=None, SiteId=None):
"""Finds and retrieves ipv4Multicast data from the server.
All named parameters support regex and can be used to selectively retrieve ipv4Multicast data from the server.
By default the find method takes no parameters and will retrieve all ipv4Multicast data from the server.
Args:
AsPath (str): Indicates the local IP address of the BGP router.
BlockOffset (number): The label block offset (VBO) is the value used to help define this specific label block uniquely as a subset of all of the possible labels.
BlockSize (number): The size of the label block, in bytes.
ControlWordEnabled (bool): If true, the route label uses a control word, as part of the extended community information. (One of the control flags.)
IpPrefix (str): The route IP prefix.
LabelBase (number): The first label to be assigned to the FEC.
LocalPreference (number): Indicates the value of the local preference attribute.
MaxLabel (number): The last label to use.
MultiExitDiscriminator (number): A metric field of the route file.
Neighbor (str): The descriptive identifier for the BGP neighbor.
NextHop (str): A 4-octet IP address which indicates the next hop.
OriginType (str): An indication of where the route entry originated.
PrefixLength (number): The length of the route IP prefix, in bytes.
RouteDistinguisher (str): The route distinguisher for the route, for use with IPv4 and IPv6 MPLS VPN address types.
SeqDeliveryEnabled (bool): Indicates if sequential delivery is enabled.
SiteId (number): The site ID.
Returns:
self: This instance with matching ipv4Multicast data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of ipv4Multicast data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the ipv4Multicast data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
[
"hubert.gee@keysight.com"
] |
hubert.gee@keysight.com
|
4d10320d891c2f97f5555759930874aabd3c98d3
|
0c42cb64dfe3ec9a046fc95bd26543faa4e0aa6b
|
/users/forms.py
|
3785a73dcb9b70433314f4a6da334bb0885e8f3b
|
[] |
no_license
|
shahjalalh/zed
|
38bf0e4ad2eea4b066b6be55e6a7f1aefffc00e5
|
7bc9dc5b5e1921204c2b7cf72afe43d798953599
|
refs/heads/master
| 2022-12-12T10:17:19.797791
| 2021-01-27T15:31:48
| 2021-01-27T15:31:48
| 171,094,577
| 0
| 0
| null | 2021-01-27T15:32:54
| 2019-02-17T07:35:58
|
CSS
|
UTF-8
|
Python
| false
| false
| 3,605
|
py
|
from django import forms
from users.models import User
from django.contrib.auth.forms import UserCreationForm
from allauth.account.forms import LoginForm, SignupForm
# login form
class CustomLoginForm(LoginForm):
def __init__(self, *args, **kwargs):
super(CustomLoginForm, self).__init__(*args, **kwargs)
self.fields['login'].widget = forms.TextInput(attrs={'type': 'text', 'class': 'form-control form-control-user', 'placeholder': 'Username', 'autofocus':''})
self.fields['password'].widget = forms.PasswordInput(attrs={'class': 'form-control form-control-user', 'placeholder': 'Password'})
self.fields['remember'].widget = forms.CheckboxInput(attrs={'class': 'custom-control-input'})
def login(self, *args, **kwargs):
# Add your own processing here.
# You must return the original result.
return super(CustomLoginForm, self).login(*args, **kwargs)
class Meta:
model = User
fields = {'username', 'password'}
# for all forms
class CustomSignupForm(SignupForm):
def __init__(self, *args, **kwargs):
super(CustomSignupForm, self).__init__(*args, **kwargs)
self.fields['username'].widget = forms.TextInput(attrs={'type': 'text', 'class': 'form-control form-control-user', 'placeholder': 'Username', 'autofocus':''})
self.fields['email'].widget = forms.TextInput(attrs={'type': 'email', 'class': 'form-control form-control-user', 'placeholder': 'E-mail address', 'required':''})
self.fields['password1'].widget = forms.PasswordInput(attrs={'class': 'form-control form-control-user', 'placeholder': 'Password'})
self.fields['password2'].widget = forms.PasswordInput(attrs={'class': 'form-control form-control-user', 'placeholder': 'Password (again)'})
def save(self, request):
# Ensure you call the parent classes save.
# .save() returns a User object.
user = super(CustomSignupForm, self).save(request)
# Add your own processing here.
# You must return the original result.
return user
class Meta:
model = User
fields = {'username', 'email', 'password1', 'password2'}
# Profile Edit Form
class UserDetailUpdateForm(forms.ModelForm):
first_name = forms.CharField(max_length=30, widget=forms.TextInput(attrs={'class': 'form-control form-control-user', 'placeholder': 'First Name'}))
last_name = forms.CharField(max_length=30, widget=forms.TextInput(attrs={'class': 'form-control form-control-user', 'placeholder': 'First Name'}))
email = forms.CharField(max_length=254, widget=forms.EmailInput(attrs={'class': 'form-control form-control-user'}))
portfolio_site = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control form-control-user'}))
birth_date = forms.DateField(widget=forms.DateInput(attrs={'class': 'form-control form-control-user'}))
location = forms.CharField(max_length=30, widget=forms.TextInput(attrs={'class': 'form-control form-control-user', 'placeholder': 'First Name'}))
bio = forms.CharField(max_length=200, widget=forms.Textarea(attrs={'class': 'form-control form-control-user'}), help_text='Write here your message!')
phone = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'class': 'form-control form-control-user'}))
profession = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'class': 'form-control form-control-user'}))
class Meta:
model = User
fields = {'first_name', 'last_name', 'email', 'portfolio_site', 'birth_date', 'location', 'bio'}
|
[
"shahjalal.tipu@gmail.com"
] |
shahjalal.tipu@gmail.com
|
860481e131fd0dc2c26a1d2ead6ea3a7a31f40ac
|
e5cf5fd657b28d1c01d8fd954a911d72526e3112
|
/rflx/OLD/rflx_fun.py
|
6b410b651f85f51fbb1669486baa8c55b88da077
|
[] |
no_license
|
parkermac/ptools
|
6b100f13a44ff595de03705a6ebf14a2fdf80291
|
a039261cd215fe13557baee322a5cae3e976c9fd
|
refs/heads/master
| 2023-01-09T11:04:16.998228
| 2023-01-02T19:09:18
| 2023-01-02T19:09:18
| 48,205,248
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,582
|
py
|
"""
Functions for the rflx code.
"""
import numpy as np
# function to create Sin and Sout
def get_Sio_chatwin(Qr, Socn, ds, nx):
L0 = 50e3 # length of salt intrusion for Qr = Qr0
Qr0 = 1e3 # m3/s
L= L0 * (Qr/Qr0)**(-1/3) # length of salt intrusion (m)
a = Socn/(L**1.5)
alpha = ds/L
x = np.linspace((alpha/(2*a))**2,L,nx)
Sin = a*x**1.5 + alpha*x/2
Sout = a*x**1.5 - alpha*x/2
return Sin, Sout, x, L
def qa_calc(Qin, Qout, Sin, Sout, I):
# Use I = 0 to ignore the second river part
# calculate Efflux-Reflux fractions (a21 and a34), at xm
q1 = Qin[1:]
q3 = Qin[:-1]
q2 = -Qout[1:]
q4 = -Qout[:-1]
s1 = Sin[1:]
s3 = Sin[:-1]
s2 = Sout[1:]
s4 = Sout[:-1]
# notation follows Cokelet and Stewart (1985)
a21 = (q2/q1)*(s2-s4)/(s1-s4) # efflux - up
a34 = (q3/q4)*(s1-s3)/(s1-s4) # reflux - down
# add the upwelling required for the second river
if I > 0:
a21x = np.zeros_like(a21)
a21x[I] = (q1[I]-q1[I-1])/q1[I]
a21 = a21 + a21x
return q1, q2, q3, q4, a21, a34
def c_calc(csp, cdp, info_tup, riv=0, ocn=0, riv2=0, do_age=False):
# unpack some parameters
NS, NX, NT, dt, dvs, dvd, q1, q2, q3, q4, a21, a34, o_Qr, I = info_tup
NTs = int(NT/NS) # save interval, in timesteps
if do_age:
# arrays for age calculation
ccsp = csp.copy()
ccdp = cdp.copy()
# arrays to save in
csa = np.nan * np.ones((NS,NX-1))
cda = np.nan * np.ones((NS,NX-1))
# time series to save in
c_tot = np.nan * np.ones(NT) # net amount of tracer
t_vec = np.nan * np.ones(NT) # time (s)
# these will be time series of the tracer flux at open boundaries
f1_vec = np.nan * np.ones(NT)
f2_vec = np.nan * np.ones(NT)
f3_vec = np.nan * np.ones(NT)
f4_vec = np.nan * np.ones(NT)
friv2_vec = np.nan * np.ones(NT)
#
riv_mask = np.zeros(NX-1)
riv_mask[I] = 1
tta = 0 # index for periodic saves
for tt in range(NT):
# boundary conditions
c4 = np.concatenate(([riv], csp[:-1])) # river
c1 = np.concatenate((cdp[1:], [ocn])) # ocean
if do_age:
cc4 = np.concatenate(([riv], ccsp[:-1])) # river
cc1 = np.concatenate((ccdp[1:], [ocn])) # ocean
# save time series entries
t_vec[tt] = tt*dt
c_tot[tt] = np.sum(csp*dvs + cdp*dvd)
f1_vec[tt] = q1[-1]*c1[-1]
f2_vec[tt] = - q2[-1]*csp[-1]
f3_vec[tt] = - q3[0]*cdp[0]
f4_vec[tt] = q4[0]*c4[0]
friv2_vec[tt] = o_Qr*riv2
# update fields
cs = csp + (dt/dvs)*(q4*c4 + a21*q1*cdp - a34*q4*csp - q2*csp + o_Qr*riv2*riv_mask)
cd = cdp + (dt/dvs)*(q1*c1 - a21*q1*cdp + a34*q4*csp - q3*cdp)
if do_age:
# ageing versions
ccs = ccsp + (dt/dvs)*(q4*cc4 + a21*q1*ccdp - a34*q4*ccsp - q2*ccsp + o_Qr*riv2*riv_mask) + dt*csp/86400
ccd = ccdp + (dt/dvs)*(q1*cc1 - a21*q1*ccdp + a34*q4*ccsp - q3*ccdp) + dt*cdp/86400
ccsp = ccs.copy()
ccdp = ccd.copy()
csp = cs.copy()
cdp = cd.copy()
if (np.mod(tt, NTs) == 0) and tta < NS:
# periodic save
csa[tta,:] = cs
cda[tta,:] = cd
tta += 1
# work on time series
T = t_vec/86400 # time axis in days
# pack things
f_tup = (T, c_tot, f1_vec, f2_vec, f3_vec, f4_vec, friv2_vec)
if do_age:
age_tup = (cs, cd, ccs, ccd)
return csa, cda, f_tup, age_tup
else:
return csa, cda, f_tup
|
[
"p.maccready@gmail.com"
] |
p.maccready@gmail.com
|
737148ea7829692220c67ae1a0891bebe51dbb7c
|
be84cb7f6d239e72ffe0bd727124ced688560e83
|
/zhiyou/items.py
|
ee8914e431897f6604ba475639a0cd5daa8e2752
|
[] |
no_license
|
liuyuqiong88/youji_spider
|
6fded24514256907e463377ecf0abb48bca2c71e
|
2c2ae28a4e3654d46c025b3d1e736d5db7ca8d2d
|
refs/heads/master
| 2020-03-17T12:51:22.029216
| 2018-05-16T03:34:21
| 2018-05-16T03:34:21
| 133,606,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ZhiyouItem(scrapy.Item):
# define the fields for your item here like:
data_source = scrapy.Field()
time_stamp = scrapy.Field()
company_name = scrapy.Field()
category = scrapy.Field()
num = scrapy.Field()
industry = scrapy.Field()
desc = scrapy.Field()
good = scrapy.Field()
salary = scrapy.Field()
fancing_info = scrapy.Field()
address = scrapy.Field()
contact = scrapy.Field()
qq = scrapy.Field()
pass
|
[
"xwp_fullstack@163.com"
] |
xwp_fullstack@163.com
|
fac4b28d499e22ce687f715e8ecc4a4f5f132391
|
362224f8a23387e8b369b02a6ff8690c200a2bce
|
/django/django_orm/wall/login_app/migrations/0003_auto_20210511_1328.py
|
2728d4dbaa480b108c62108ed84f8255c853d617
|
[] |
no_license
|
Helenyixuanwang/python_stack
|
ac94c7c532655bf47592a8453738daac10f220ad
|
97fbc77e3971b5df1fe3e79652b294facf8d6cee
|
refs/heads/main
| 2023-06-11T02:17:27.277551
| 2021-06-21T17:01:09
| 2021-06-21T17:01:09
| 364,336,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 677
|
py
|
# Generated by Django 2.2 on 2021-05-11 20:28
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('login_app', '0002_comment_message'),
]
operations = [
migrations.AddField(
model_name='comment',
name='comment',
field=models.TextField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='message',
name='message',
field=models.TextField(default=django.utils.timezone.now),
preserve_default=False,
),
]
|
[
"wangyixuan@msn.com"
] |
wangyixuan@msn.com
|
8c6e52f21979f9c706821df3c1e10416bad935da
|
910d4dd8e56e9437cf09dd8b9c61167673140a1f
|
/0219 stack 2/부분집합.py
|
9cc5973c23a43a26be7da70e2af42e568605cd34
|
[] |
no_license
|
nopasanadamindy/Algorithms
|
10825b212395680401b200a37ab4fde9085bc61f
|
44b82d2f129c4cc6e811b651c0202a18719689cb
|
refs/heads/master
| 2022-09-28T11:39:54.630487
| 2020-05-29T09:49:56
| 2020-05-29T09:49:56
| 237,923,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
# {1, 2, 3} 모든 부분집합 출력하기
N = 3
A = [0 for _ in range(N)] # 원소의 포함여부 저장(0,1)
# print(A)
data = [1, 2, 3]
def printSet(n):
for i in range(n): # 각 부분 배열의 원소 출력
if A[i] ==1: # A[i]가 1이면 포함된 것이므로 출력
print(data[i], end =" ")
print()
def powerset(n, k): # n : 원소의 갯수, k : 현재 depth
if n == k: # Basis Part
printSet(n)
else: # Inductive Part
A[k] = 1 # k번 요소 O
powerset(n, k + 1) # 다음 요소 포함 여부 결정
A[k] = 0 # k번 요소 X
powerset(n, k + 1) # 다음 요소 포함 여부 결정
powerset(N, 0)
|
[
"iuui21@snu.ac.kr"
] |
iuui21@snu.ac.kr
|
30651ba1a8f4ce79c71012e118c0a9a7428f5387
|
e951c686fc947efd10ff41069b43a7b875672c33
|
/server/network_monitor_web_server/db/mysql_relevant/sql_str/monitor_detail.py
|
92f774d13f2e381df4822faef8d8b106510b5dba
|
[] |
no_license
|
JasonBourne-sxy/host-web
|
f48c794f2eb2ec7f8a5148620b6ca3f9b062b924
|
649d1a61ac15182b55c17e47c126d98d9b956b44
|
refs/heads/master
| 2022-12-20T09:21:57.947230
| 2019-10-28T07:23:41
| 2019-10-28T07:23:41
| 208,742,511
| 1
| 0
| null | 2022-12-11T06:57:55
| 2019-09-16T07:56:38
|
Python
|
UTF-8
|
Python
| false
| false
| 354
|
py
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: monitor_detail
Description :
Author : 'li'
date: 2019/9/27
-------------------------------------------------
Change Activity:
2019/9/27:
-------------------------------------------------
"""
__author__ = 'li'
|
[
"marjey_lee@163.com"
] |
marjey_lee@163.com
|
6be47ea1b25a7f616323e96ebf21cd91bbb04755
|
8e39a4f4ae1e8e88d3b2d731059689ad5b201a56
|
/media-libs/mesa/mesa-10.2.6.py
|
0a7a0fdb1972e3f63a51df926253636303e94fa2
|
[] |
no_license
|
wdysln/new
|
d5f5193f81a1827769085932ab7327bb10ef648e
|
b643824b26148e71859a1afe4518fe05a79d333c
|
refs/heads/master
| 2020-05-31T00:12:05.114056
| 2016-01-04T11:38:40
| 2016-01-04T11:38:40
| 37,287,357
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,869
|
py
|
metadata = """
summary @ Mesa 3-D graphics libraries and include files
homepage @ http://mesa3d.sourceforge.net/
license @ custom
src_url @ http://ftp.osuosl.org/pub/blfs/conglomeration/MesaLib/MesaLib-$version.tar.bz2
arch @ ~x86_64
"""
depends = """
build @ x11-libs/libXdamage x11-libs/libXext x11-libs/libXfixes
x11-libs/libXxf86vm x11-libs/libdrm sys-libs/talloc x11-proto/dri3proto
x11-proto/glproto x11-libs/libXt x11-misc/makedepend x11-proto/presentproto x11-libs/libxshmfence
runtime @ x11-libs/libXdamage x11-libs/libXext x11-libs/libXfixes
x11-libs/libXxf86vm x11-libs/libdrm sys-libs/talloc
x11-proto/glproto x11-libs/libXt x11-misc/makedepend
"""
srcdir = "Mesa-%s" % version
def configure():
"""conf("--with-dri-driverdir=/usr/lib/xorg/modules/dri \
--with-gallium-drivers=r300,r600,nouveau,svga,swrast \
--enable-gallium-llvm \
--enable-gallium-egl --enable-shared-glapi\
--enable-glx-tls \
--with-driver=dri \
--enable-xcb \
--with-state-trackers=dri,glx \
--disable-glut \
--enable-gles1 \
--enable-gles2 \
--enable-egl \
--disable-gallium-egl")
#LOLWUT# sed("configs/autoconf", "(PYTHON_FLAGS) = .*", r"\1 = -t")
#autoreconf("-vif")
#conf("--enable-pic \
# --disable-xcb \
# --enable-glx-tls \
# --disable-gl-osmesa \
# --disable-egl \
# --disable-glw \
# --disable-glut \
# --enable-gallium \
# --enable-gallium-llvm \
# --disable-gallium-svga \
# --disable-gallium-i915 \
# --disable-gallium-i965 \
# --enable-gallium-radeon \
# --enable-gallium-r600 \
# --enable-gallium-nouveau \
# --with-driver=dri \
# --with-dri-driverdir=/usr/lib/xorg/modules/dri \
# --with-dri-drivers=i810,i915,i965,mach64,nouveau,r128,r200,r600,radeon,sis,tdfx \
# --with-state-trackers=dri,glx")
"""
autoreconf("-vif")
conf("--with-dri-driverdir=/usr/lib/xorg/modules/dri",
"--with-gallium-drivers=nouveau,svga,swrast",
"--with-dri-drivers=i915,i965,r200,radeon,nouveau,swrast",
"--with-egl-platforms=x11,drm",
"--disable-gallium-llvm",
"--enable-gallium-egl",
"--enable-shared-glapi",
"--enable-glx-tls",
"--enable-dri",
"--enable-glx",
"--enable-osmesa",
"--enable-gles1",
"--enable-gles2",
"--enable-egl",
"--enable-texture-float",
"--enable-xa")
def build():
export("PYTHONDONTWRITEBYTECODE", "1")
make()
def install():
raw_install("DESTDIR=%s" % install_dir)
|
[
"zirkovandersen@gmail.com"
] |
zirkovandersen@gmail.com
|
9e3a392c6349b84aba9f6ca914893cfcbd83c8b2
|
73f9ce203129a8a5b3742655ab36bb0014ebf30b
|
/example/test.py
|
32cb2da78bf4cec3f3e51de4b3b902d3b6be2bb3
|
[
"MIT"
] |
permissive
|
masterliuf/akshare
|
bd20c6999253a8e4ccc7949a8d89c4fc5559b3bf
|
15907161341041dce0fd7a7bdcad7bda4b999187
|
refs/heads/master
| 2022-07-15T22:57:19.249644
| 2022-06-21T01:26:12
| 2022-06-21T01:26:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,819
|
py
|
import akshare
import empyrical
def test():
print('测试及功能展示: ')
# ----------------------------------------------------------------------
print('\n' + '-' * 80 + '\n一个品种在时间轴上的展期收益率')
df = akshare.get_roll_yield_bar(type_method='date', var='RB', start_day='20181206', end_day='20181210', plot=False)
print(df)
# ----------------------------------------------------------------------
print('\n' + '-' * 80 + '\n一个品种在不同交割标的上的价格比较')
df = akshare.get_roll_yield_bar(type_method='symbol', var='RB', date='20181210', plot=False)
print(df)
# ----------------------------------------------------------------------
print('\n' + '-' * 80 + '\n多个品种在某天的展期收益率横截面比较')
df = akshare.get_roll_yield_bar(type_method='var', date='20181210', plot=False)
print(df)
# ----------------------------------------------------------------------
print('\n' + '-' * 80 + '\n特定两个标的的展期收益率')
df = akshare.get_roll_yield(date='20181210', var='IF', symbol1='IF1812', symbol2='IF1901')
print(df)
# ----------------------------------------------------------------------
print('\n' + '-' * 80 + '\n特定品种、特定时段的交易所注册仓单')
df = akshare.get_receipt(start_day='20181207', end_day='20181210', vars_list=['CU', 'NI'])
print(df)
# ----------------------------------------------------------------------
print('\n' + '-' * 80 + '\n特定日期的现货价格及基差')
df = akshare.get_spot_price('20181210')
print(df)
# ----------------------------------------------------------------------
print('\n' + '-' * 80 + '\n特定品种、特定时段的现货价格及基差')
df = akshare.get_spot_price_daily(start_day='20181210', end_day='20181210', vars_list=['CU', 'RB'])
print(df)
# ----------------------------------------------------------------------
print('\n' + '-' * 80 + '\n特定品种、特定时段的会员持仓排名求和')
df = akshare.get_rank_sum_daily(start_day='20181210', end_day='20181210', vars_list=['IF', 'C'])
print(df)
# ----------------------------------------------------------------------
print('\n' + '-' * 80 + '\n大商所会员持仓排名细节;郑商所、上期所、中金所分别改成get_czce_rank_table、get_shfe_rank_table、get_cffex_rank_table')
df = akshare.get_dce_rank_table('20181210')
print(df)
# ----------------------------------------------------------------------
print('\n' + '-' * 80 + '\n日线行情获取')
df = akshare.get_futures_daily(start_day='20181210', end_day='20181210', market='DCE', index_bar=True)
print(df)
if __name__ == '__main__':
test()
|
[
"jindaxiang@163.com"
] |
jindaxiang@163.com
|
9971c8b78a57215b996b334a79046ce2255c989c
|
1acafe9b0497db2a481828a0505ceb042a80e43b
|
/tree/stack.py
|
f0de889ef2a86a55f572bd1b888554abc397cca7
|
[] |
no_license
|
weiweiECNU/pythonDataStructure
|
fbe6dc1579deb85483b805ff416a61a513e41dea
|
971a2f74423eec581bf6134c6aa21719209608ee
|
refs/heads/master
| 2020-06-13T21:36:37.844642
| 2019-08-07T10:55:01
| 2019-08-07T10:55:01
| 194,793,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,001
|
py
|
class Stack:
def __init__(self):
'''Stack()创建一个新的空栈。不需要参数,并返回一个空栈'''
self.items = []
def push(self, item):
'''Push(item)将新项添加到堆栈的顶部。它需要参数 item 并且没有返回值'''
self.items.append(item)
def pop(self):
'''pop()从栈顶删除项目。它不需要参数,返回 item。栈被修改'''
return self.items.pop()
def peek(self):
"""返回栈顶的项,不删除它。它不需要参数。堆栈不被修改。"""
return self.items[-1]
def isEmpty(self):
"""测试看栈是否为空。它不需要参数,返回一个布尔值。"""
if len(self.items) == 0:
return True
else:
return False
def size(self):
"""返回栈的项目数。它不需要参数,返回一个整数。"""
return len(self.items)
def __str__(self):
return str(self.items)
|
[
"weiweiwill995@gmail.com"
] |
weiweiwill995@gmail.com
|
67e7fa2072ca379da2b2785d764631b9a9e688bd
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/scattergl/marker/colorbar/_tick0.py
|
6ae415fecf9370dbd6eba565777c31f610d94a67
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994
| 2021-12-21T03:49:19
| 2021-12-21T03:49:19
| 234,146,634
| 0
| 0
|
MIT
| 2020-01-15T18:33:43
| 2020-01-15T18:33:41
| null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
import _plotly_utils.basevalidators
class Tick0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name="tick0", parent_name="scattergl.marker.colorbar", **kwargs
):
super(Tick0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"tickmode": "linear"}),
**kwargs
)
|
[
"noreply@github.com"
] |
hugovk.noreply@github.com
|
9cf5b65f60eebae66537830916a502043fbbc1cf
|
a27cde0fa2415f2cb296369dd9bfab65f655164c
|
/5/5-11.py
|
43e4de162f8e7246940f7518e5bd2bcb8b928de6
|
[] |
no_license
|
paalso/hse_python_course
|
c47ea010ba4173fefbcbb6b97fc3c74a84a9fd12
|
9e9d2001143afbd873152833dafb9682b5bae824
|
refs/heads/master
| 2021-06-25T05:12:37.368806
| 2020-12-21T09:48:37
| 2020-12-21T09:48:37
| 187,812,620
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 891
|
py
|
# https://www.coursera.org/learn/python-osnovy-programmirovaniya/programming/NY8PQ/diofantovo-uravnieniie-2/submission
# Потерянная карточка
# Для настольной игры используются карточки с номерами от 1 до N.Одна карточка
# потерялась. Найдите ее, зная номера оставшихся карточек.
#
# Формат ввода / вывода
#
# Дано число N, далее N-1 номер оставшихся карточек (различные числа от 1 до N)
# Программа должна вывести номер потерянной карточки.
n = int(input())
expected_sum = n * (n + 1) // 2
actual_sum = 0
for k in range(1, n):
actual_sum += int(input())
missed_card = expected_sum - actual_sum
print(missed_card)
|
[
"40a.pvl@gmail.com"
] |
40a.pvl@gmail.com
|
9703bc9d35531437a484a615653587eca9a9b406
|
62553a9743257f06dc4a77d57c85aa99bfd51c0f
|
/FN/nn_tutorial/explanation_tf.py
|
3e07932c7f5ba130571686a91d88f60905167d10
|
[
"Apache-2.0"
] |
permissive
|
tetsuoh0103/baby-steps-of-rl-ja
|
21a855e0f0e38ba48de5c6d13522372fd4de0d24
|
6e0f44b0906cb28ac883546d3d8a30d21d5895b5
|
refs/heads/master
| 2020-11-23T22:42:02.127637
| 2020-01-03T03:16:46
| 2020-01-03T03:16:46
| 227,851,226
| 0
| 0
|
Apache-2.0
| 2019-12-13T13:55:09
| 2019-12-13T13:55:08
| null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
import numpy as np
import tensorflow as tf
# Weight (row=4 x col=2).
a = tf.Variable(np.random.rand(4, 2))
# Bias (row=4 x col=1).
b = tf.Variable(np.random.rand(4, 1))
# Input(x) (row=2 x col=1).
x = tf.compat.v1.placeholder(tf.float64, shape=(2, 1))
# Output(y) (row=4 x col=1).
y = tf.matmul(a, x) + b
with tf.Session() as sess:
# Initialize variable.
init = tf.global_variables_initializer()
sess.run(init)
# Make input to x.
x_value = np.random.rand(2, 1)
# Execute culculation.
y_output = sess.run(y, feed_dict={x: x_value})
print(y_output.shape) # Will be (4, 1)
|
[
"icoxfog417@yahoo.co.jp"
] |
icoxfog417@yahoo.co.jp
|
515ec4e7f1567a0c9deee0a837eb0859fd622ebc
|
4c32103014a7893a59b15210aab7d76422c542b1
|
/generank/api/models/news_feed.py
|
c1f82436adac02478a68ebf7c11b7c4efb451645
|
[
"MIT"
] |
permissive
|
shuchenliu/mygenerank-api
|
cfd49e3d4786de7cd97da9848b50f3f894929b55
|
3c36cb733816c9aa305f02773487f35e194b6566
|
refs/heads/master
| 2020-03-25T12:14:04.248263
| 2018-08-08T23:44:46
| 2018-08-08T23:44:46
| 143,765,629
| 0
| 0
| null | 2018-08-06T18:12:20
| 2018-08-06T18:12:19
| null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
import uuid
from django.conf import settings
from django.db import models
from django.utils import timezone
class Item(models.Model):
""" An model that represents an item in the global news feed. These models
will have various extensions added to them with additional data depending
on their source.
"""
SOURCES = {
'reddit': 0,
}
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
source = models.IntegerField()
link = models.URLField()
title = models.CharField(max_length=300)
image = models.URLField(null=True, blank=True)
description = models.CharField(max_length=450, null=True, blank=True)
created_on = models.DateTimeField(default=timezone.now)
def __str__(self):
return '<Item: %s>' % self.title
|
[
"brian@brianschrader.com"
] |
brian@brianschrader.com
|
07a96c972f722c090d0379879a87e26635e8403c
|
5537eec7f43098d216d2b550678c8d10b2a26f09
|
/venv/tower/lib/python2.7/site-packages/azure/mgmt/compute/models/os_disk_image.py
|
465c5d3ec4e5434147ab908f71e2b2aba896e679
|
[] |
no_license
|
wipro-sdx/Automation
|
f0ae1512b8d9d491d7bacec94c8906d06d696407
|
a8c46217d0fbe51a71597b5db87cbe98ed19297a
|
refs/heads/master
| 2021-07-08T11:09:05.314435
| 2018-05-02T07:18:54
| 2018-05-02T07:18:54
| 131,812,982
| 0
| 1
| null | 2020-07-23T23:22:33
| 2018-05-02T07:15:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,154
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OSDiskImage(Model):
"""Contains the os disk image information.
:param operating_system: the operating system of the osDiskImage.
Possible values include: 'Windows', 'Linux'
:type operating_system: str or :class:`OperatingSystemTypes
<azure.mgmt.compute.models.OperatingSystemTypes>`
"""
_validation = {
'operating_system': {'required': True},
}
_attribute_map = {
'operating_system': {'key': 'operatingSystem', 'type': 'OperatingSystemTypes'},
}
def __init__(self, operating_system):
self.operating_system = operating_system
|
[
"admin@example.com"
] |
admin@example.com
|
97fbfcb2881b5f3663fae1b99173fae2a50d54bb
|
085406a6754c33957ca694878db9bbe37f84b970
|
/Django_02/Django_02/wsgi.py
|
30b5f8d71d360b56789738de45e22ceadcedb05e
|
[] |
no_license
|
dewlytg/Python-example
|
82157958da198ce42014e678dfe507c72ed67ef0
|
1e179e4037eccd9fefabefd252b060564a2eafce
|
refs/heads/master
| 2021-01-01T18:36:08.868861
| 2019-01-18T10:39:08
| 2019-01-18T10:39:08
| 98,375,528
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
"""
WSGI config for Django_02 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Django_02.settings")
application = get_wsgi_application()
|
[
"gang.tang@cutt.com"
] |
gang.tang@cutt.com
|
341333740509c486def6c606e86184f76da82b95
|
b3b066a566618f49ae83c81e963543a9b956a00a
|
/Unsupervised Learning in Python/03_Decorrelating your data and dimension reduction/01_Correlated data in nature.py
|
47f750e21c91db0e713455c2d355c1df22d6f1fb
|
[] |
no_license
|
ahmed-gharib89/DataCamp_Data_Scientist_with_Python_2020
|
666c4129c3f0b5d759b511529a365dfd36c12f1a
|
f3d20b788c8ef766e7c86c817e6c2ef7b69520b8
|
refs/heads/master
| 2022-12-22T21:09:13.955273
| 2020-09-30T01:16:05
| 2020-09-30T01:16:05
| 289,991,534
| 2
| 0
| null | 2020-08-24T17:15:43
| 2020-08-24T17:15:42
| null |
UTF-8
|
Python
| false
| false
| 1,366
|
py
|
'''
Correlated data in nature
You are given an array grains giving the width and length of samples of grain. You suspect that width and length will be correlated. To confirm this, make a scatter plot of width vs length and measure their Pearson correlation.
INSTRUCTIONS
100XP
Import:
matplotlib.pyplot as plt.
pearsonr from scipy.stats.
Assign column 0 of grains to width and column 1 of grains to length.
Make a scatter plot with width on the x-axis and length on the y-axis.
Use the pearsonr() function to calculate the Pearson correlation of width and length.
'''
# Perform the necessary imports
import matplotlib.pyplot as plt
from scipy.stats import pearsonr
# Assign the 0th column of grains: width
width = grains[:,0]
# Assign the 1st column of grains: length
length = grains[:,1]
# Scatter plot width vs length
plt.scatter(width, length)
plt.axis('equal')
plt.show()
# Calculate the Pearson correlation
correlation, pvalue = pearsonr(width, length)
# Display the correlation
print(correlation)
#========================================================#
# DEVELOPER #
# BasitAminBhatti #
# Github #
# https://github.com/basitaminbhatti #
#========================================================#
|
[
"Your-Email"
] |
Your-Email
|
c83b9b05ea3394c8d408353555f98b20d69ba9e7
|
6a1390ec579dc16ef20255517a2fe566c0e514d5
|
/try_conv3d.py
|
551a298994cdbc8366961880b2c6a9abc931fe74
|
[
"BSD-3-Clause"
] |
permissive
|
imandr/cconv
|
71f6b24502ba9249de08ed9618951d8363bf8b43
|
45e1e404b11070fd823bf5345875f447d9574c2f
|
refs/heads/master
| 2021-08-01T07:59:42.878692
| 2021-07-27T22:49:02
| 2021-07-27T22:49:02
| 186,833,374
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
import numpy as np
from cconv import convolve_3d
def convolve_xw(inp, w, mode):
# inp: (nb, nx, ny, nc_in)
# w: (nx, ny, nc_in, nc_out)
# returns (nb, x, y, nc_out)
mode = 0 if mode == 'valid' else 1
inp = inp.transpose((0,4,1,2,3)) # -> [mb, ic, x, y, z]
w = w.transpose((4,3,0,1,2)) # -> [oc, ic, x, y, z]
#print "convolve_xw: convolve_3d..."
return convolve_3d(inp, w, mode).transpose((0,2,3,4,1)) # -> [mb, x,y,z, oc]
def convolve_xy(x, y):
# x: (nb, nx, ny, nc_in)
# y: (nb, mx, my, nc_out) (mx,my) < (nx,ny)
# returns (fx, fy, nc_in, nc_out)
x = x.transpose((4,0,1,2,3)) # -> [ic, mb, x, y, z]
y = y.transpose((4,0,1,2,3)) # -> [oc, mb, x, y, z]
#print "convolve_xy: convolve_3d..."
return convolve_3d(x, y, 0).transpose((2,3,4,0,1)) # -> [x,y,z,ic,oc]
image = np.ones((1,4,3,2,1))
filter = np.ones((2,2,2,1,1))
#filter[0,0,0,:,:] = 1
filtered = convolve_xw(image, filter, 'valid')
print filtered
|
[
"igorvm@gmail.com"
] |
igorvm@gmail.com
|
8550417b7422369930a90a9291698978faa2d2eb
|
d1d633abb313c235b4c178ccf2939537bd4232b0
|
/team_app/tests/test_team.py
|
92afcfe0235389acab16d1b0b7799c2fa986da3a
|
[
"MIT"
] |
permissive
|
dcopm999/initpy
|
ee82229bf5ac311d83297d484012a7048c015a02
|
26aa6bda25947f67ab842d298ce2f121b89616bf
|
refs/heads/master
| 2022-12-01T23:45:35.446225
| 2022-06-09T04:58:26
| 2022-06-09T04:58:26
| 195,928,228
| 0
| 1
|
MIT
| 2019-07-15T04:13:26
| 2019-07-09T03:47:10
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,710
|
py
|
from django.contrib.auth.models import User
from django.test import TestCase
from team_app import models
# Create your tests here.
class TeamTestCase(TestCase):
fixtures = ['auth.json', 'team.json']
def test_team_str(self):
team = models.TeamModel.objects.get(id=1)
self.assertEqual(team.__str__(), 'Павел Таньчев')
def test_skill_str(self):
skill = models.SkillModel.objects.get(id=1)
self.assertEqual(skill.__str__(), 'Системный администратор Linux')
def test_contact_str(self):
contact = models.ContactModel.objects.get(id=1)
self.assertEqual(contact.__str__(), '998909199375 998909199375 dcopm999@gmail.com')
def test_service_str(self):
service = models.ServiceModel.objects.get(id=1)
self.assertEqual(service.__str__(), 'Разработка Web приложений')
def test_service_item_str(self):
item = models.ServiceItemModel.objects.get(id=1)
self.assertEqual(item.__str__(), 'Django Web framework')
def test_about_str(self):
about = models.AboutModel.objects.get(id=1)
self.assertEqual(about.__str__(), 'Наши услуги')
def test_about_item_str(self):
item = models.AboutItemModel.objects.get(id=1)
self.assertEqual(item.__str__(), 'Автоматизация бизнес процессов.')
def test_certs_str(self):
cert = models.CertificatesModel.objects.get(id=1)
self.assertEqual(cert.__str__(), 'Павел Таньчев')
def test_feedback_str(self):
feed = models.FeedbackModel.objects.get(id=1)
self.assertEqual(feed.__str__(), 'test@test.test')
|
[
"dcopm999@gmail.com"
] |
dcopm999@gmail.com
|
81b4b87bad12384a7baf4cb6c743426e8c881ed7
|
08e039046e2b3c526b5fd2169e02d5c5bbe253c5
|
/0x03-python-data_structures/5-no_c.py
|
0e51fd28731f21eb9788cffa39f09fc7c0575fa4
|
[] |
no_license
|
VinneyJ/alx-higher_level_programming
|
22a976a22583334aff1f0c4120fb81117905e35b
|
0ea8719ec5f28c76faf06bb5e67c14abb71fa3d0
|
refs/heads/main
| 2023-07-31T15:44:30.390103
| 2021-10-01T21:27:31
| 2021-10-01T21:27:31
| 361,816,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
#!/usr/bin/python3
def no_c(my_string):
letter = ""
for x in range(0, len(my_string)):
if my_string[x] != 'C' and my_string[x] != 'c':
letter = letter + my_string[x]
return letter
|
[
"vincentjayden49@gmail.com"
] |
vincentjayden49@gmail.com
|
999664ee9f32896f3b0d3260623259b784d7764a
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_117/1065.py
|
7fe863e98eee741f39826ab2ec8ae886728b8035
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,541
|
py
|
inputFile = open("B-large.in")
line = inputFile.readline()[:-1]
number = int(line)
def validCut(mow, lawn):
for i in range(len(lawn)):
theseCuts = []
for cutLength in lawn[i]:
if cutLength not in theseCuts:
theseCuts.append(cutLength)
theseCuts.sort()
for thisCut in theseCuts:
cols = []
for j in range(len(lawn[i])):
if lawn[i][j] == thisCut:
cols.append(j)
if (not checkRow(thisCut, i, lawn)) and (not checkCol(thisCut, cols, lawn)):
print "Case #" + str(mow + 1) + ": NO"
return
print "Case #" + str(mow + 1) + ": YES"
return
def checkRow(cut, row, lawn):
if len(lawn[row]) == 0:
return True
for j in range(len(lawn[row])):
if lawn[row][j] > cut:
return False
return True
def checkCol(cut, cols, lawn):
if len(lawn) == 1:
return True
for col in cols:
for i in range(len(lawn)):
if lawn[i][col] > cut:
return False
return True
for mow in range(number):
line = inputFile.readline()[:-1]
dimensions = line.split(" ")
rows = int(dimensions[0])
cols = int(dimensions[1])
lawn = []
for row in range(rows):
line = inputFile.readline()[:-1]
thisRow = line.split(" ")
lawn.append(thisRow)
for row in range(rows):
for col in range(cols):
lawn[row][col] = int(lawn[row][col])
validCut(mow, lawn)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
53e916a2354173ad94fe2ffd901c9249e8a2572a
|
539af892f18d9a63f1d6966a15a100f004a5a908
|
/next-greater-number.py
|
cd9e250103832cc5ad5f3643d483ac75584f541a
|
[] |
no_license
|
woofan/leetcode
|
5f0e7cfbcd9d0fddd7b25c7a96896e6a24ccbd15
|
4a13026b6e04a71d5da56c7c35ac58877b27f69b
|
refs/heads/master
| 2022-12-05T17:27:00.979915
| 2022-11-09T08:58:01
| 2022-11-09T08:58:01
| 226,093,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
def nextGreaterElement( nums1, nums2) :
res = [-1] * len(nums1)
for i in range(len(nums1)):
for j in range(nums2.index(nums1[i]),len(nums2)):
if nums2[j] > nums1[i]:
res[i] = nums2[j]
break
if j == len(nums2):
res[i] = -1
return res
print(nextGreaterElement([4,1,2],[1,3,4,2]))
|
[
"495464616@qq.com"
] |
495464616@qq.com
|
d1b6d651f777c45464dcbacff005184aa87ae11f
|
8da91c26d423bacbeee1163ac7e969904c7e4338
|
/pyvisdk/do/vim_esx_cl_ifcoeadapterlist_fcoe_adapter_device.py
|
3330a86d5a6754317ae63e082cfb7c861561ad0d
|
[] |
no_license
|
pexip/os-python-infi-pyvisdk
|
5d8f3a3858cdd61fb76485574e74ae525cdc7e25
|
1aadea0afbc306d09f6ecb9af0e683dbbf961d20
|
refs/heads/master
| 2023-08-28T02:40:28.789786
| 2020-07-16T04:00:53
| 2020-07-16T04:00:53
| 10,032,240
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
# This module is NOT auto-generated
# Inspired by decompiled Java classes from vCenter's internalvim25stubs.jar
# Unless states otherside, the methods and attributes were not used by esxcli,
# and thus not tested
log = logging.getLogger(__name__)
def VimEsxCLIfcoeadapterlistFcoeAdapterDevice(vim, *args, **kwargs):
obj = vim.client.factory.create('{urn:vim25}VimEsxCLIfcoeadapterlistFcoeAdapterDevice')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'AdapterName', 'FCFMAC', 'PhysicalNIC', 'SourceMAC', 'UserPriority', 'VLANid', 'VNPortMAC' ]
for name, arg in zip(required + optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
[
"jmb@pexip.com"
] |
jmb@pexip.com
|
0b4cd98072362cc8682d11ed73c4125e6c758daa
|
f707303e4dfe383cf82c23a6bb42ccfdc4cfdb67
|
/pandas-ml-common/pandas_ml_common_test/unit/utils/test_random.py
|
6a133b3025ab3ed9c52849c9e9d31683a5abd8f8
|
[
"MIT"
] |
permissive
|
jcoffi/pandas-ml-quant
|
1830ec256f8c09c04f1aa77e2eecfba07d34fe68
|
650a8e8f77bc4d71136518d1c7ee65c194a99cf0
|
refs/heads/master
| 2023-08-31T06:45:38.060737
| 2021-09-09T04:44:35
| 2021-09-09T04:44:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
from unittest import TestCase
import numpy as np
from pandas_ml_common.utils import normalize_probabilities
class TestRandumUtils(TestCase):
def test_probs_normalisation(self):
p1 = [0.2, 0.8]
p2 = [2, 8]
np.testing.assert_equal(np.array(p1), normalize_probabilities(p1))
np.testing.assert_equal(np.array(p1), normalize_probabilities(p2))
|
[
"kic@kic.kic"
] |
kic@kic.kic
|
e967581074ee231c67a7f10c06ca6d07b6452aba
|
51f6443116ef09aa91cca0ac91387c1ce9cb445a
|
/Curso_de_Python_3_do_Basico_Ao_Avancado_Udemy/aula165/aula165/wsgi.py
|
6a1760d3a621190f05ff162edb5cb0e32301a322
|
[
"MIT"
] |
permissive
|
DanilooSilva/Cursos_de_Python
|
f449f75bc586f7cb5a7e43000583a83fff942e53
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
refs/heads/main
| 2023-07-30T02:11:27.002831
| 2021-10-01T21:52:15
| 2021-10-01T21:52:15
| 331,683,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
"""
WSGI config for aula165 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'aula165.settings')
application = get_wsgi_application()
|
[
"dno.gomesps@gmail.com"
] |
dno.gomesps@gmail.com
|
427be95a3e10919c7f60362ba818390acf5a3f9b
|
1b7929f6e521042595fa5d8b04753b09fcf4825c
|
/webdev/public_catering/hello_py.py
|
cf5c547298ec0d27dc37cca15384c843c4bae85b
|
[] |
no_license
|
xyzza/convert_hg
|
a73b8c5b3681decd75540570ff6ba0c7cf1ce576
|
9cfd9b34fcebbb2c0583b6bb19a98f28870c293d
|
refs/heads/master
| 2021-01-10T21:07:04.499530
| 2014-04-15T13:09:36
| 2014-04-15T13:09:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
#coding:utf-8
"""
This is new python module for some stuff
ok, i'll fix it!
"""
import os
import sys
print "hello module 01 in task 003"
|
[
"devnull@localhost"
] |
devnull@localhost
|
771f82b1529867ea4e13963c6cf6ef6d6932fb22
|
16d4474e7777da03aef6ead78e112edf8931d131
|
/core/migrations/0007_category_direct_link.py
|
e10d5e9b585a815fcb128bd6f83b63203f61be65
|
[] |
no_license
|
BijoySingh/HomePageMaker
|
f91dd03bc1e2b6a46893b8738f87686d105968b8
|
ba366dfd9514b10cc6283bb4e120037f8229fc1e
|
refs/heads/master
| 2020-12-06T23:51:51.122187
| 2017-01-30T23:31:38
| 2017-01-30T23:31:38
| 66,867,578
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 483
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-01 06:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_content_cover'),
]
operations = [
migrations.AddField(
model_name='category',
name='direct_link',
field=models.CharField(blank=True, default='', max_length=256, null=True),
),
]
|
[
"bijoysingh693@gmail.com"
] |
bijoysingh693@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.